drm/i915: introduce for_each_engine_id()
Equivalent to the existing for_each_engine() macro, this will replace
the latter wherever the third argument *is* actually wanted (in most
places, it is not used). The third argument is renamed to emphasise
that it is an engine id (type enum intel_engine_id). All the callers of
the macro that actually need the third argument are updated to use this
version, and the argument (generally 'i') is also updated to be 'id'.
Other callers (where the third argument is unused) are untouched for
now; they will be updated in the next patch.
Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e0ba3e3..77dce52 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -132,7 +132,7 @@
struct intel_engine_cs *engine;
struct i915_vma *vma;
int pin_count = 0;
- int i;
+ enum intel_engine_id id;
seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
&obj->base,
@@ -143,9 +143,9 @@
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
- for_each_engine(engine, dev_priv, i)
+ for_each_engine_id(engine, dev_priv, id)
seq_printf(m, "%x ",
- i915_gem_request_get_seqno(obj->last_read_req[i]));
+ i915_gem_request_get_seqno(obj->last_read_req[id]));
seq_printf(m, "] %x %x%s%s%s",
i915_gem_request_get_seqno(obj->last_write_req),
i915_gem_request_get_seqno(obj->last_fenced_req),
@@ -1334,7 +1334,8 @@
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
u32 instdone[I915_NUM_INSTDONE_REG];
- int i, j;
+ enum intel_engine_id id;
+ int j;
if (!i915.enable_hangcheck) {
seq_printf(m, "Hangcheck disabled\n");
@@ -1343,9 +1344,9 @@
intel_runtime_pm_get(dev_priv);
- for_each_engine(engine, dev_priv, i) {
- seqno[i] = engine->get_seqno(engine, false);
- acthd[i] = intel_ring_get_active_head(engine);
+ for_each_engine_id(engine, dev_priv, id) {
+ seqno[id] = engine->get_seqno(engine, false);
+ acthd[id] = intel_ring_get_active_head(engine);
}
i915_get_extra_instdone(dev, instdone);
@@ -1359,13 +1360,13 @@
} else
seq_printf(m, "Hangcheck inactive\n");
- for_each_engine(engine, dev_priv, i) {
+ for_each_engine_id(engine, dev_priv, id) {
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x]\n",
- engine->hangcheck.seqno, seqno[i]);
+ engine->hangcheck.seqno, seqno[id]);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
- (long long)acthd[i]);
+ (long long)acthd[id]);
seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
@@ -1947,7 +1948,8 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
struct intel_context *ctx;
- int ret, i;
+ enum intel_engine_id id;
+ int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -1965,11 +1967,11 @@
if (i915.enable_execlists) {
seq_putc(m, '\n');
- for_each_engine(engine, dev_priv, i) {
+ for_each_engine_id(engine, dev_priv, id) {
struct drm_i915_gem_object *ctx_obj =
- ctx->engine[i].state;
+ ctx->engine[id].state;
struct intel_ringbuffer *ringbuf =
- ctx->engine[i].ringbuf;
+ ctx->engine[id].ringbuf;
seq_printf(m, "%s: ", engine->name);
if (ctx_obj)
@@ -3134,7 +3136,8 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
- int i, j, ret;
+ enum intel_engine_id id;
+ int j, ret;
if (!i915_semaphore_is_enabled(dev)) {
seq_puts(m, "Semaphores are disabled\n");
@@ -3153,14 +3156,14 @@
page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
- for_each_engine(engine, dev_priv, i) {
+ for_each_engine_id(engine, dev_priv, id) {
uint64_t offset;
seq_printf(m, "%s\n", engine->name);
seq_puts(m, " Last signal:");
for (j = 0; j < num_rings; j++) {
- offset = i * I915_NUM_ENGINES + j;
+ offset = id * I915_NUM_ENGINES + j;
seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8);
}
@@ -3168,7 +3171,7 @@
seq_puts(m, " Last wait: ");
for (j = 0; j < num_rings; j++) {
- offset = i + (j * I915_NUM_ENGINES);
+ offset = id + (j * I915_NUM_ENGINES);
seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8);
}
@@ -3178,7 +3181,7 @@
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
- for_each_engine(engine, dev_priv, i)
+ for_each_engine(engine, dev_priv, id)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
I915_READ(engine->semaphore.mbox.signal[j]));
@@ -3186,7 +3189,7 @@
}
seq_puts(m, "\nSync seqno:\n");
- for_each_engine(engine, dev_priv, i) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < num_rings; j++) {
seq_printf(m, " 0x%08x ",
engine->semaphore.sync_seqno[j]);
@@ -3236,6 +3239,7 @@
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *workarounds = &dev_priv->workarounds;
+ enum intel_engine_id id;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -3244,9 +3248,9 @@
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_engine(engine, dev_priv, i)
+ for_each_engine_id(engine, dev_priv, id)
seq_printf(m, "HW whitelist count for %s: %d\n",
- engine->name, workarounds->hw_whitelist_count[i]);
+ engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
i915_reg_t addr;
u32 mask, value, read;