drm/i915: range-restricted eviction support

Add a mappable parameter to i915_gem_evict_something to distinguish
the two cases (non-restricted vs. mappable gtt allocations). No
functional changes because the mappable limit is set to the end of
the gtt currently.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f168e82..dc0a21a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -535,6 +535,8 @@
 		struct drm_mm vram;
 		/** Memory allocator for GTT */
 		struct drm_mm gtt_space;
+		/** End of mappable part of GTT */
+		unsigned long gtt_mappable_end;
 
 		struct io_mapping *gtt_mapping;
 		int gtt_mtrr;
@@ -1067,7 +1069,8 @@
 void i915_gem_shrinker_exit(void);
 
 /* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
+int i915_gem_evict_something(struct drm_device *dev, int min_size,
+			     unsigned alignment, bool mappable);
 int i915_gem_evict_everything(struct drm_device *dev);
 int i915_gem_evict_inactive(struct drm_device *dev);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d0aaf97..254eb0c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -187,6 +187,7 @@
 		    end - start);
 
 	dev_priv->mm.gtt_total = end - start;
+	dev_priv->mm.gtt_mappable_end = end;
 
 	return 0;
 }
@@ -413,7 +414,8 @@
 		struct drm_device *dev = obj->dev;
 
 		ret = i915_gem_evict_something(dev, obj->size,
-					       i915_gem_get_gtt_alignment(obj));
+					       i915_gem_get_gtt_alignment(obj),
+					       false);
 		if (ret)
 			return ret;
 
@@ -2672,7 +2674,7 @@
 		/* If the gtt is empty and we're still having trouble
 		 * fitting our object in, we're out of memory.
 		 */
-		ret = i915_gem_evict_something(dev, obj->size, alignment);
+		ret = i915_gem_evict_something(dev, obj->size, alignment, true);
 		if (ret)
 			return ret;
 
@@ -2687,7 +2689,7 @@
 		if (ret == -ENOMEM) {
 			/* first try to clear up some space from the GTT */
 			ret = i915_gem_evict_something(dev, obj->size,
-						       alignment);
+						       alignment, true);
 			if (ret) {
 				/* now try to shrink everyone else */
 				if (gfpmask) {
@@ -2717,7 +2719,7 @@
 		drm_mm_put_block(obj_priv->gtt_space);
 		obj_priv->gtt_space = NULL;
 
-		ret = i915_gem_evict_something(dev, obj->size, alignment);
+		ret = i915_gem_evict_something(dev, obj->size, alignment, true);
 		if (ret)
 			return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013..3a4215f 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -41,7 +41,8 @@
 }
 
 int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+	 		 unsigned alignment, bool mappable)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct list_head eviction_list, unwind_list;
@@ -51,9 +52,17 @@
 	i915_gem_retire_requests(dev);
 
 	/* Re-check for free space after retiring requests */
-	if (drm_mm_search_free(&dev_priv->mm.gtt_space,
-			       min_size, alignment, 0))
-		return 0;
+	if (mappable) {
+		if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+				  		min_size, alignment, 0,
+						dev_priv->mm.gtt_mappable_end,
+						0))
+			return 0;
+	} else {
+		if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+				       min_size, alignment, 0))
+			return 0;
+	}
 
 	/*
 	 * The goal is to evict objects and amalgamate space in LRU order.
@@ -79,7 +88,12 @@
 	 */
 
 	INIT_LIST_HEAD(&unwind_list);
-	drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+	if (mappable)
+		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+					    alignment, 0,
+					    dev_priv->mm.gtt_mappable_end);
+	else
+		drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
 
 	/* First see if there is a large enough contiguous idle region... */
 	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {