Kyle McMartin 26ea0f2
From 08ae078a33245bc01dcf895bd886f30103cc6178 Mon Sep 17 00:00:00 2001
Kyle McMartin 26ea0f2
From: Thomas Hellstrom <thellstrom@vmware.com>
Kyle McMartin 26ea0f2
Date: Thu, 30 Sep 2010 12:36:45 +0200
Kyle McMartin 26ea0f2
Subject: drm/ttm: Fix two race conditions + fix busy codepaths
Kyle McMartin 26ea0f2
Kyle McMartin 26ea0f2
This fixes a race pointed out by Dave Airlie where we don't take a buffer
Kyle McMartin 26ea0f2
object about to be destroyed off the LRU lists properly. It also fixes a rare
Kyle McMartin 26ea0f2
case where a buffer object could be destroyed in the middle of an
Kyle McMartin 26ea0f2
accelerated eviction.
Kyle McMartin 26ea0f2
Kyle McMartin 26ea0f2
The patch also adds a utility function that can be used to prematurely
Kyle McMartin 26ea0f2
release GPU memory space usage of an object waiting to be destroyed.
Kyle McMartin 26ea0f2
For example during eviction or swapout.
Kyle McMartin 26ea0f2
Kyle McMartin 26ea0f2
The above mentioned commit didn't queue the buffer on the delayed destroy
Kyle McMartin 26ea0f2
list under some rare circumstances. It also didn't completely honor the
Kyle McMartin 26ea0f2
remove_all parameter.
Kyle McMartin 26ea0f2
Kyle McMartin 26ea0f2
Fixes:
Kyle McMartin 26ea0f2
https://bugzilla.redhat.com/show_bug.cgi?id=615505
Kyle McMartin 26ea0f2
http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=591061
Kyle McMartin 26ea0f2
Kyle McMartin 26ea0f2
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Kyle McMartin 26ea0f2
Signed-off-by: Dave Airlie <airlied@redhat.com>
Kyle McMartin 26ea0f2
---
Kyle McMartin 26ea0f2
 drivers/gpu/drm/ttm/ttm_bo.c |   84 +++++++++++++++++++++++++++++++++++------
Kyle McMartin 26ea0f2
 include/drm/ttm/ttm_bo_api.h |    4 +-
Kyle McMartin 26ea0f2
 2 files changed, 74 insertions(+), 14 deletions(-)
Kyle McMartin 26ea0f2
Kyle McMartin 26ea0f2
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
Kyle McMartin 26ea0f2
index 555ebb1..77f22ba 100644
Kyle McMartin 26ea0f2
--- a/drivers/gpu/drm/ttm/ttm_bo.c
Kyle McMartin 26ea0f2
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
Kyle McMartin 26ea0f2
@@ -442,6 +442,43 @@ out_err:
Kyle McMartin 26ea0f2
 }
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
 /**
Kyle McMartin 26ea0f2
+ * Call bo::reserved and with the lru lock held.
Kyle McMartin 26ea0f2
+ * Will release GPU memory type usage on destruction.
Kyle McMartin 26ea0f2
+ * This is the place to put in driver specific hooks.
Kyle McMartin 26ea0f2
+ * Will release the bo::reserved lock and the
Kyle McMartin 26ea0f2
+ * lru lock on exit.
Kyle McMartin 26ea0f2
+ */
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
Kyle McMartin 26ea0f2
+{
Kyle McMartin 26ea0f2
+	struct ttm_bo_global *glob = bo->glob;
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+	if (bo->ttm) {
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+		/**
Kyle McMartin 26ea0f2
+		 * Release the lru_lock, since we don't want to have
Kyle McMartin 26ea0f2
+		 * an atomic requirement on ttm_tt[unbind|destroy].
Kyle McMartin 26ea0f2
+		 */
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+		spin_unlock(&glob->lru_lock);
Kyle McMartin 26ea0f2
+		ttm_tt_unbind(bo->ttm);
Kyle McMartin 26ea0f2
+		ttm_tt_destroy(bo->ttm);
Kyle McMartin 26ea0f2
+		bo->ttm = NULL;
Kyle McMartin 26ea0f2
+		spin_lock(&glob->lru_lock);
Kyle McMartin 26ea0f2
+	}
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+	if (bo->mem.mm_node) {
Kyle McMartin 26ea0f2
+		drm_mm_put_block(bo->mem.mm_node);
Kyle McMartin 26ea0f2
+		bo->mem.mm_node = NULL;
Kyle McMartin 26ea0f2
+	}
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+	atomic_set(&bo->reserved, 0);
Kyle McMartin 26ea0f2
+	wake_up_all(&bo->event_queue);
Kyle McMartin 26ea0f2
+	spin_unlock(&glob->lru_lock);
Kyle McMartin 26ea0f2
+}
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+/**
Kyle McMartin 26ea0f2
  * If bo idle, remove from delayed- and lru lists, and unref.
Kyle McMartin 26ea0f2
  * If not idle, and already on delayed list, do nothing.
Kyle McMartin 26ea0f2
  * If not idle, and not on delayed list, put on delayed list,
Kyle McMartin 26ea0f2
@@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
Kyle McMartin 26ea0f2
 	int ret;
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
 	spin_lock(&bo->lock);
Kyle McMartin 26ea0f2
+retry:
Kyle McMartin 26ea0f2
 	(void) ttm_bo_wait(bo, false, false, !remove_all);
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
 	if (!bo->sync_obj) {
Kyle McMartin 26ea0f2
@@ -464,32 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
Kyle McMartin 26ea0f2
 		spin_unlock(&bo->lock);
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
 		spin_lock(&glob->lru_lock);
Kyle McMartin 26ea0f2
-		put_count = ttm_bo_del_from_lru(bo);
Kyle McMartin 26ea0f2
+		ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+		/**
Kyle McMartin 26ea0f2
+		 * Someone else has the object reserved. Bail and retry.
Kyle McMartin 26ea0f2
+		 */
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
-		ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
Kyle McMartin 26ea0f2
-		BUG_ON(ret);
Kyle McMartin 26ea0f2
-		if (bo->ttm)
Kyle McMartin 26ea0f2
-			ttm_tt_unbind(bo->ttm);
Kyle McMartin 26ea0f2
+		if (unlikely(ret == -EBUSY)) {
Kyle McMartin 26ea0f2
+			spin_unlock(&glob->lru_lock);
Kyle McMartin 26ea0f2
+			spin_lock(&bo->lock);
Kyle McMartin 26ea0f2
+			goto requeue;
Kyle McMartin 26ea0f2
+		}
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+		/**
Kyle McMartin 26ea0f2
+		 * We can re-check for sync object without taking
Kyle McMartin 26ea0f2
+		 * the bo::lock since setting the sync object requires
Kyle McMartin 26ea0f2
+		 * also bo::reserved. A busy object at this point may
Kyle McMartin 26ea0f2
+		 * be caused by another thread starting an accelerated
Kyle McMartin 26ea0f2
+		 * eviction.
Kyle McMartin 26ea0f2
+		 */
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+		if (unlikely(bo->sync_obj)) {
Kyle McMartin 26ea0f2
+			atomic_set(&bo->reserved, 0);
Kyle McMartin 26ea0f2
+			wake_up_all(&bo->event_queue);
Kyle McMartin 26ea0f2
+			spin_unlock(&glob->lru_lock);
Kyle McMartin 26ea0f2
+			spin_lock(&bo->lock);
Kyle McMartin 26ea0f2
+			if (remove_all)
Kyle McMartin 26ea0f2
+				goto retry;
Kyle McMartin 26ea0f2
+			else
Kyle McMartin 26ea0f2
+				goto requeue;
Kyle McMartin 26ea0f2
+		}
Kyle McMartin 26ea0f2
+
Kyle McMartin 26ea0f2
+		put_count = ttm_bo_del_from_lru(bo);
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
 		if (!list_empty(&bo->ddestroy)) {
Kyle McMartin 26ea0f2
 			list_del_init(&bo->ddestroy);
Kyle McMartin 26ea0f2
 			++put_count;
Kyle McMartin 26ea0f2
 		}
Kyle McMartin 26ea0f2
-		if (bo->mem.mm_node) {
Kyle McMartin 26ea0f2
-			bo->mem.mm_node->private = NULL;
Kyle McMartin 26ea0f2
-			drm_mm_put_block(bo->mem.mm_node);
Kyle McMartin 26ea0f2
-			bo->mem.mm_node = NULL;
Kyle McMartin 26ea0f2
-		}
Kyle McMartin 26ea0f2
-		spin_unlock(&glob->lru_lock);
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
-		atomic_set(&bo->reserved, 0);
Kyle McMartin 26ea0f2
+		ttm_bo_cleanup_memtype_use(bo);
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
 		while (put_count--)
Kyle McMartin 26ea0f2
 			kref_put(&bo->list_kref, ttm_bo_ref_bug);
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
 		return 0;
Kyle McMartin 26ea0f2
 	}
Kyle McMartin 26ea0f2
-
Kyle McMartin 26ea0f2
+requeue:
Kyle McMartin 26ea0f2
 	spin_lock(&glob->lru_lock);
Kyle McMartin 26ea0f2
 	if (list_empty(&bo->ddestroy)) {
Kyle McMartin 26ea0f2
 		void *sync_obj = bo->sync_obj;
Kyle McMartin 26ea0f2
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
Kyle McMartin 26ea0f2
index 267a86c..2040e6c 100644
Kyle McMartin 26ea0f2
--- a/include/drm/ttm/ttm_bo_api.h
Kyle McMartin 26ea0f2
+++ b/include/drm/ttm/ttm_bo_api.h
Kyle McMartin 26ea0f2
@@ -246,9 +246,11 @@ struct ttm_buffer_object {
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
 	atomic_t reserved;
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
-
Kyle McMartin 26ea0f2
 	/**
Kyle McMartin 26ea0f2
 	 * Members protected by the bo::lock
Kyle McMartin 26ea0f2
+	 * In addition, setting sync_obj to anything else
Kyle McMartin 26ea0f2
+	 * than NULL requires bo::reserved to be held. This allows for
Kyle McMartin 26ea0f2
+	 * checking NULL while reserved but not holding bo::lock.
Kyle McMartin 26ea0f2
 	 */
Kyle McMartin 26ea0f2
 
Kyle McMartin 26ea0f2
 	void *sync_obj_arg;
Kyle McMartin 26ea0f2
-- 
Kyle McMartin 26ea0f2
1.7.3.2
Kyle McMartin 26ea0f2