drm/i915: Refactor duplicate object vmap functions

We now have two implementations for vmapping a whole object, one for
dma-buf and one for the ringbuffer. If we couple the mapping into the
obj->pages lifetime, then we can reuse an obj->mapping for both and at
the same time couple it into the shrinker. There is a third vmapping
routine in the cmdparser that maps only a range within the object, for
the time being that is left alone, but will eventually use these routines
in order to cache the mapping between invocations.

v2: Mark the failable kmalloc() as __GFP_NOWARN (vsyrjala)
v3: Call unpin_vmap from the right dmabuf unmapper

v4: Rename vmap to map as we don't wish to imply the type of mapping
involved, just that it contiguously maps the object into kernel space.
Add kerneldoc and lockdep annotations

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Dave Gordon <david.s.gordon@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1460113874-17366-4-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 339d183..55ad50b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2232,6 +2232,11 @@
 	 * lists early. */
 	list_del(&obj->global_list);
 
+	if (obj->mapping) {
+		vunmap(obj->mapping);
+		obj->mapping = NULL;
+	}
+
 	ops->put_pages(obj);
 	obj->pages = NULL;
 
@@ -2400,6 +2405,45 @@
 	return 0;
 }
 
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
+{
+	int ret;
+
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ERR_PTR(ret);
+
+	i915_gem_object_pin_pages(obj);
+
+	if (obj->mapping == NULL) {
+		struct sg_page_iter sg_iter;
+		struct page **pages;
+		int n;
+
+		n = obj->base.size >> PAGE_SHIFT;
+		pages = kmalloc(n*sizeof(*pages), GFP_TEMPORARY | __GFP_NOWARN);
+		if (pages == NULL)
+			pages = drm_malloc_ab(n, sizeof(*pages));
+		if (pages != NULL) {
+			n = 0;
+			for_each_sg_page(obj->pages->sgl, &sg_iter,
+					 obj->pages->nents, 0)
+				pages[n++] = sg_page_iter_page(&sg_iter);
+
+			obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
+			drm_free_large(pages);
+		}
+		if (obj->mapping == NULL) {
+			i915_gem_object_unpin_pages(obj);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	return obj->mapping;
+}
+
 void i915_vma_move_to_active(struct i915_vma *vma,
 			     struct drm_i915_gem_request *req)
 {