Blob Blame History Raw
commit 262b6d363fcff16359c93bd58c297f961f6e6273
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Tue Jan 15 16:17:54 2013 +0000

    drm/i915: Invalidate the relocation presumed_offsets along the slow path
    
    In the slow path, we are forced to copy the relocations prior to
    acquiring the struct mutex in order to handle pagefaults. We forgo
    copying the new offsets back into the relocation entries in order to
    prevent a recursive locking bug should we trigger a pagefault whilst
    holding the mutex for the reservations of the execbuffer. Therefore, we
    need to reset the presumed_offsets just in case the objects are rebound
    back into their old locations after relocating for this exexbuffer - if
    that were to happen we would assume the relocations were valid and leave
    the actual pointers to the kernels dangling, instant hang.
    
    Fixes regression from commit bcf50e2775bbc3101932d8e4ab8c7902aa4163b4
    Author: Chris Wilson <chris@chris-wilson.co.uk>
    Date:   Sun Nov 21 22:07:12 2010 +0000
    
        drm/i915: Handle pagefaults in execbuffer user relocations
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=55984
    Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
    Cc: Daniel Vetter <daniel.vetter@fwll.ch>
    Cc: stable@vger.kernel.org
    Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d6a994a..26d08bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 	total = 0;
 	for (i = 0; i < count; i++) {
 		struct drm_i915_gem_relocation_entry __user *user_relocs;
+		u64 invalid_offset = (u64)-1;
+		int j;

 		user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;

@@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 			goto err;
 		}

+		/* As we do not update the known relocation offsets after
+		 * relocating (due to the complexities in lock handling),
+		 * we need to mark them as invalid now so that we force the
+		 * relocation processing next time. Just in case the target
+		 * object is evicted and then rebound into its old
+		 * presumed_offset before the next execbuffer - if that
+		 * happened we would make the mistake of assuming that the
+		 * relocations were valid.
+		 */
+		for (j = 0; j < exec[i].relocation_count; j++) {
+			if (copy_to_user(&user_relocs[j].presumed_offset,
+					 &invalid_offset,
+					 sizeof(invalid_offset))) {
+				ret = -EFAULT;
+				mutex_lock(&dev->struct_mutex);
+				goto err;
+			}
+		}
+
 		reloc_offset[i] = total;
 		total += exec[i].relocation_count;
 	}