drm/i915: Attempt to prefault user pages for pread/pwrite

... in the hope that it makes the atomic fast paths more likely.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 942e4b3..b44c09a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -265,19 +265,14 @@
 		char __user *data,
 		int length)
 {
-	char __iomem *vaddr;
 	int unwritten;
+	char *vaddr;
 
 	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
-	if (vaddr == NULL)
-		return -ENOMEM;
 	unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
 	kunmap_atomic(vaddr, KM_USER0);
 
-	if (unwritten)
-		return -EFAULT;
-
-	return 0;
+	return unwritten ? -EFAULT : 0;
 }
 
 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
@@ -602,6 +597,13 @@
 		goto out;
 	}
 
+	ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
+				       args->size);
+	if (ret) {
+		ret = -EFAULT;
+		goto out;
+	}
+
 	if (i915_gem_object_needs_bit17_swizzle(obj)) {
 		ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
 	} else {
@@ -668,18 +670,14 @@
 		 char __user *data,
 		 int length)
 {
-	char __iomem *vaddr;
-	unsigned long unwritten;
+	int unwritten;
+	char *vaddr;
 
 	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
-	if (vaddr == NULL)
-		return -ENOMEM;
 	unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
 	kunmap_atomic(vaddr, KM_USER0);
 
-	if (unwritten)
-		return -EFAULT;
-	return 0;
+	return unwritten ? -EFAULT : 0;
 }
 
 /**
@@ -1078,6 +1076,13 @@
 		goto out;
 	}
 
+	ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
+				      args->size);
+	if (ret) {
+		ret = -EFAULT;
+		goto out;
+	}
+
 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
 	 * it would end up going through the fenced access, and we'll get
 	 * different detiling behavior between reading and writing.