drm/i915: Implement a framework for batch buffer pools

This adds a small module for managing a pool of batch buffers.
The only current use case is for the command parser, as described
in the kerneldoc in the patch. The code is simple, but separating
it out makes it easier to change the underlying algorithms and to
extend to future use cases should they arise.

The interface is simple: init to create an empty pool, fini to
clean it up, get to obtain a new buffer. Note that all buffers are
expected to be inactive before cleaning up the pool.

Locking is currently based on the caller holding the struct_mutex.
We already do that in the places where we will use the batch pool
for the command parser.

v2:
- s/BUG_ON/WARN_ON/ for locking assertions
- Remove the cap on pool size
- Switch from alloc/free to init/fini

v3:
- Idiomatic looping structure in _fini
- Correct handling of purged objects
- Don't return a buffer that's too much larger than needed

v4:
- Rebased to latest -nightly

v5:
- Remove _put() function and clean up comments to match

v6:
- Move purged check inside the loop (danvet, from v4 1/7 feedback)

v7:
- Use single list instead of two. (Chris W)
- s/active_list/cache_list
- Squashed in debug patches (Chris W)
  drm/i915: Add a batch pool debugfs file

  It provides some useful information about the buffers in
  the global command parser batch pool.

  v2: rebase on global pool instead of per-ring pools
  v3: rebase

  drm/i915: Add batch pool details to i915_gem_objects debugfs

  To better account for the potentially large memory consumption
  of the batch pool.

v8:
- Keep cache in LRU order (danvet, from v6 1/5 feedback)

Issue: VIZ-4719
Signed-off-by: Brad Volkin <bradley.d.volkin@intel.com>
Reviewed-By: Jon Bloomfield <jon.bloomfield@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8d2988a..e515aad 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -360,6 +360,33 @@
 	return 0;
 }
 
+#define print_file_stats(m, name, stats) \
+	seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+		   name, \
+		   stats.count, \
+		   stats.total, \
+		   stats.active, \
+		   stats.inactive, \
+		   stats.global, \
+		   stats.shared, \
+		   stats.unbound)
+
+static void print_batch_pool_stats(struct seq_file *m,
+				   struct drm_i915_private *dev_priv)
+{
+	struct drm_i915_gem_object *obj;
+	struct file_stats stats;
+
+	memset(&stats, 0, sizeof(stats));
+
+	list_for_each_entry(obj,
+			    &dev_priv->mm.batch_pool.cache_list,
+			    batch_pool_list)
+		per_file_stats(0, obj, &stats);
+
+	print_file_stats(m, "batch pool", stats);
+}
+
 #define count_vmas(list, member) do { \
 	list_for_each_entry(vma, list, member) { \
 		size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -442,6 +469,9 @@
 		   dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
 
 	seq_putc(m, '\n');
+	print_batch_pool_stats(m, dev_priv);
+
+	seq_putc(m, '\n');
 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 		struct file_stats stats;
 		struct task_struct *task;
@@ -459,15 +489,7 @@
 		 */
 		rcu_read_lock();
 		task = pid_task(file->pid, PIDTYPE_PID);
-		seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
-			   task ? task->comm : "<unknown>",
-			   stats.count,
-			   stats.total,
-			   stats.active,
-			   stats.inactive,
-			   stats.global,
-			   stats.shared,
-			   stats.unbound);
+		print_file_stats(m, task ? task->comm : "<unknown>", stats);
 		rcu_read_unlock();
 	}
 
@@ -584,6 +606,36 @@
 	return 0;
 }
 
+static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	int count = 0;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_puts(m, "cache:\n");
+	list_for_each_entry(obj,
+			    &dev_priv->mm.batch_pool.cache_list,
+			    batch_pool_list) {
+		seq_puts(m, "   ");
+		describe_obj(m, obj);
+		seq_putc(m, '\n');
+		count++;
+	}
+
+	seq_printf(m, "total: %d\n", count);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
 static int i915_gem_request_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = m->private;
@@ -4357,6 +4409,7 @@
 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
 	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
+	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
 	{"i915_frequency_info", i915_frequency_info, 0},
 	{"i915_drpc_info", i915_drpc_info, 0},
 	{"i915_emon_status", i915_emon_status, 0},