Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 1 | /* |
Michal Wajdeczko | 058a9b4 | 2018-03-08 09:50:36 +0000 | [diff] [blame] | 2 | * SPDX-License-Identifier: MIT |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 3 | * |
Michal Wajdeczko | 058a9b4 | 2018-03-08 09:50:36 +0000 | [diff] [blame] | 4 | * Copyright © 2017-2018 Intel Corporation |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 7 | #include "i915_pmu.h" |
| 8 | #include "intel_ringbuffer.h" |
Michal Wajdeczko | 058a9b4 | 2018-03-08 09:50:36 +0000 | [diff] [blame] | 9 | #include "i915_drv.h" |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 10 | |
| 11 | /* Frequency for the sampling timer for events which need it. */ |
| 12 | #define FREQUENCY 200 |
| 13 | #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) |
| 14 | |
| 15 | #define ENGINE_SAMPLE_MASK \ |
| 16 | (BIT(I915_SAMPLE_BUSY) | \ |
| 17 | BIT(I915_SAMPLE_WAIT) | \ |
| 18 | BIT(I915_SAMPLE_SEMA)) |
| 19 | |
| 20 | #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS) |
| 21 | |
Chris Wilson | 141a089 | 2017-11-23 12:34:31 +0000 | [diff] [blame] | 22 | static cpumask_t i915_pmu_cpumask; |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 23 | |
| 24 | static u8 engine_config_sample(u64 config) |
| 25 | { |
| 26 | return config & I915_PMU_SAMPLE_MASK; |
| 27 | } |
| 28 | |
| 29 | static u8 engine_event_sample(struct perf_event *event) |
| 30 | { |
| 31 | return engine_config_sample(event->attr.config); |
| 32 | } |
| 33 | |
| 34 | static u8 engine_event_class(struct perf_event *event) |
| 35 | { |
| 36 | return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; |
| 37 | } |
| 38 | |
| 39 | static u8 engine_event_instance(struct perf_event *event) |
| 40 | { |
| 41 | return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; |
| 42 | } |
| 43 | |
| 44 | static bool is_engine_config(u64 config) |
| 45 | { |
| 46 | return config < __I915_PMU_OTHER(0); |
| 47 | } |
| 48 | |
| 49 | static unsigned int config_enabled_bit(u64 config) |
| 50 | { |
| 51 | if (is_engine_config(config)) |
| 52 | return engine_config_sample(config); |
| 53 | else |
| 54 | return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0)); |
| 55 | } |
| 56 | |
| 57 | static u64 config_enabled_mask(u64 config) |
| 58 | { |
| 59 | return BIT_ULL(config_enabled_bit(config)); |
| 60 | } |
| 61 | |
| 62 | static bool is_engine_event(struct perf_event *event) |
| 63 | { |
| 64 | return is_engine_config(event->attr.config); |
| 65 | } |
| 66 | |
| 67 | static unsigned int event_enabled_bit(struct perf_event *event) |
| 68 | { |
| 69 | return config_enabled_bit(event->attr.config); |
| 70 | } |
| 71 | |
Tvrtko Ursulin | feff0dc | 2017-11-21 18:18:46 +0000 | [diff] [blame] | 72 | static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) |
| 73 | { |
| 74 | u64 enable; |
| 75 | |
| 76 | /* |
| 77 | * Only some counters need the sampling timer. |
| 78 | * |
| 79 | * We start with a bitmask of all currently enabled events. |
| 80 | */ |
| 81 | enable = i915->pmu.enable; |
| 82 | |
| 83 | /* |
| 84 | * Mask out all the ones which do not need the timer, or in |
| 85 | * other words keep all the ones that could need the timer. |
| 86 | */ |
| 87 | enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | |
| 88 | config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) | |
| 89 | ENGINE_SAMPLE_MASK; |
| 90 | |
| 91 | /* |
| 92 | * When the GPU is idle per-engine counters do not need to be |
| 93 | * running so clear those bits out. |
| 94 | */ |
| 95 | if (!gpu_active) |
| 96 | enable &= ~ENGINE_SAMPLE_MASK; |
Tvrtko Ursulin | b3add01 | 2017-11-21 18:18:49 +0000 | [diff] [blame] | 97 | /* |
| 98 | * Also there is software busyness tracking available we do not |
| 99 | * need the timer for I915_SAMPLE_BUSY counter. |
Tvrtko Ursulin | cf669b4 | 2017-11-29 10:28:05 +0000 | [diff] [blame] | 100 | * |
| 101 | * Use RCS as proxy for all engines. |
Tvrtko Ursulin | b3add01 | 2017-11-21 18:18:49 +0000 | [diff] [blame] | 102 | */ |
Tvrtko Ursulin | cf669b4 | 2017-11-29 10:28:05 +0000 | [diff] [blame] | 103 | else if (intel_engine_supports_stats(i915->engine[RCS])) |
Tvrtko Ursulin | b3add01 | 2017-11-21 18:18:49 +0000 | [diff] [blame] | 104 | enable &= ~BIT(I915_SAMPLE_BUSY); |
Tvrtko Ursulin | feff0dc | 2017-11-21 18:18:46 +0000 | [diff] [blame] | 105 | |
| 106 | /* |
| 107 | * If some bits remain it means we need the sampling timer running. |
| 108 | */ |
| 109 | return enable; |
| 110 | } |
| 111 | |
| 112 | void i915_pmu_gt_parked(struct drm_i915_private *i915) |
| 113 | { |
| 114 | if (!i915->pmu.base.event_init) |
| 115 | return; |
| 116 | |
| 117 | spin_lock_irq(&i915->pmu.lock); |
| 118 | /* |
| 119 | * Signal sampling timer to stop if only engine events are enabled and |
| 120 | * GPU went idle. |
| 121 | */ |
| 122 | i915->pmu.timer_enabled = pmu_needs_timer(i915, false); |
| 123 | spin_unlock_irq(&i915->pmu.lock); |
| 124 | } |
| 125 | |
| 126 | static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) |
| 127 | { |
| 128 | if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { |
| 129 | i915->pmu.timer_enabled = true; |
| 130 | hrtimer_start_range_ns(&i915->pmu.timer, |
| 131 | ns_to_ktime(PERIOD), 0, |
| 132 | HRTIMER_MODE_REL_PINNED); |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | void i915_pmu_gt_unparked(struct drm_i915_private *i915) |
| 137 | { |
| 138 | if (!i915->pmu.base.event_init) |
| 139 | return; |
| 140 | |
| 141 | spin_lock_irq(&i915->pmu.lock); |
| 142 | /* |
| 143 | * Re-enable sampling timer when GPU goes active. |
| 144 | */ |
| 145 | __i915_pmu_maybe_start_timer(i915); |
| 146 | spin_unlock_irq(&i915->pmu.lock); |
| 147 | } |
| 148 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 149 | static bool grab_forcewake(struct drm_i915_private *i915, bool fw) |
| 150 | { |
| 151 | if (!fw) |
| 152 | intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); |
| 153 | |
| 154 | return true; |
| 155 | } |
| 156 | |
| 157 | static void |
| 158 | update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val) |
| 159 | { |
Tvrtko Ursulin | 8ee4f19 | 2017-11-24 09:49:59 +0000 | [diff] [blame] | 160 | sample->cur += mul_u32_u32(val, unit); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | static void engines_sample(struct drm_i915_private *dev_priv) |
| 164 | { |
| 165 | struct intel_engine_cs *engine; |
| 166 | enum intel_engine_id id; |
| 167 | bool fw = false; |
| 168 | |
| 169 | if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) |
| 170 | return; |
| 171 | |
| 172 | if (!dev_priv->gt.awake) |
| 173 | return; |
| 174 | |
| 175 | if (!intel_runtime_pm_get_if_in_use(dev_priv)) |
| 176 | return; |
| 177 | |
| 178 | for_each_engine(engine, dev_priv, id) { |
| 179 | u32 current_seqno = intel_engine_get_seqno(engine); |
| 180 | u32 last_seqno = intel_engine_last_submit(engine); |
| 181 | u32 val; |
| 182 | |
| 183 | val = !i915_seqno_passed(current_seqno, last_seqno); |
| 184 | |
| 185 | update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY], |
| 186 | PERIOD, val); |
| 187 | |
| 188 | if (val && (engine->pmu.enable & |
| 189 | (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) { |
| 190 | fw = grab_forcewake(dev_priv, fw); |
| 191 | |
| 192 | val = I915_READ_FW(RING_CTL(engine->mmio_base)); |
| 193 | } else { |
| 194 | val = 0; |
| 195 | } |
| 196 | |
| 197 | update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT], |
| 198 | PERIOD, !!(val & RING_WAIT)); |
| 199 | |
| 200 | update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA], |
| 201 | PERIOD, !!(val & RING_WAIT_SEMAPHORE)); |
| 202 | } |
| 203 | |
| 204 | if (fw) |
| 205 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
| 206 | |
| 207 | intel_runtime_pm_put(dev_priv); |
| 208 | } |
| 209 | |
| 210 | static void frequency_sample(struct drm_i915_private *dev_priv) |
| 211 | { |
| 212 | if (dev_priv->pmu.enable & |
| 213 | config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { |
| 214 | u32 val; |
| 215 | |
| 216 | val = dev_priv->gt_pm.rps.cur_freq; |
| 217 | if (dev_priv->gt.awake && |
| 218 | intel_runtime_pm_get_if_in_use(dev_priv)) { |
| 219 | val = intel_get_cagf(dev_priv, |
| 220 | I915_READ_NOTRACE(GEN6_RPSTAT1)); |
| 221 | intel_runtime_pm_put(dev_priv); |
| 222 | } |
| 223 | |
| 224 | update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], |
| 225 | 1, intel_gpu_freq(dev_priv, val)); |
| 226 | } |
| 227 | |
| 228 | if (dev_priv->pmu.enable & |
| 229 | config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { |
| 230 | update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1, |
| 231 | intel_gpu_freq(dev_priv, |
| 232 | dev_priv->gt_pm.rps.cur_freq)); |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) |
| 237 | { |
| 238 | struct drm_i915_private *i915 = |
| 239 | container_of(hrtimer, struct drm_i915_private, pmu.timer); |
| 240 | |
Tvrtko Ursulin | 8ee4f19 | 2017-11-24 09:49:59 +0000 | [diff] [blame] | 241 | if (!READ_ONCE(i915->pmu.timer_enabled)) |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 242 | return HRTIMER_NORESTART; |
| 243 | |
| 244 | engines_sample(i915); |
| 245 | frequency_sample(i915); |
| 246 | |
| 247 | hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD)); |
| 248 | return HRTIMER_RESTART; |
| 249 | } |
| 250 | |
Tvrtko Ursulin | 0cd4684 | 2017-11-21 18:18:50 +0000 | [diff] [blame] | 251 | static u64 count_interrupts(struct drm_i915_private *i915) |
| 252 | { |
| 253 | /* open-coded kstat_irqs() */ |
| 254 | struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq); |
| 255 | u64 sum = 0; |
| 256 | int cpu; |
| 257 | |
| 258 | if (!desc || !desc->kstat_irqs) |
| 259 | return 0; |
| 260 | |
| 261 | for_each_possible_cpu(cpu) |
| 262 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
| 263 | |
| 264 | return sum; |
| 265 | } |
| 266 | |
Tvrtko Ursulin | b2f78cd | 2018-02-05 09:34:48 +0000 | [diff] [blame] | 267 | static void engine_event_destroy(struct perf_event *event) |
| 268 | { |
| 269 | struct drm_i915_private *i915 = |
| 270 | container_of(event->pmu, typeof(*i915), pmu.base); |
| 271 | struct intel_engine_cs *engine; |
| 272 | |
| 273 | engine = intel_engine_lookup_user(i915, |
| 274 | engine_event_class(event), |
| 275 | engine_event_instance(event)); |
| 276 | if (WARN_ON_ONCE(!engine)) |
| 277 | return; |
| 278 | |
| 279 | if (engine_event_sample(event) == I915_SAMPLE_BUSY && |
| 280 | intel_engine_supports_stats(engine)) |
| 281 | intel_disable_engine_stats(engine); |
| 282 | } |
| 283 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 284 | static void i915_pmu_event_destroy(struct perf_event *event) |
| 285 | { |
| 286 | WARN_ON(event->parent); |
Tvrtko Ursulin | b2f78cd | 2018-02-05 09:34:48 +0000 | [diff] [blame] | 287 | |
| 288 | if (is_engine_event(event)) |
| 289 | engine_event_destroy(event); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 290 | } |
| 291 | |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 292 | static int |
| 293 | engine_event_status(struct intel_engine_cs *engine, |
| 294 | enum drm_i915_pmu_engine_sample sample) |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 295 | { |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 296 | switch (sample) { |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 297 | case I915_SAMPLE_BUSY: |
| 298 | case I915_SAMPLE_WAIT: |
| 299 | break; |
| 300 | case I915_SAMPLE_SEMA: |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 301 | if (INTEL_GEN(engine->i915) < 6) |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 302 | return -ENODEV; |
| 303 | break; |
| 304 | default: |
| 305 | return -ENOENT; |
| 306 | } |
| 307 | |
| 308 | return 0; |
| 309 | } |
| 310 | |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 311 | static int |
| 312 | config_status(struct drm_i915_private *i915, u64 config) |
| 313 | { |
| 314 | switch (config) { |
| 315 | case I915_PMU_ACTUAL_FREQUENCY: |
| 316 | if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) |
| 317 | /* Requires a mutex for sampling! */ |
| 318 | return -ENODEV; |
| 319 | /* Fall-through. */ |
| 320 | case I915_PMU_REQUESTED_FREQUENCY: |
| 321 | if (INTEL_GEN(i915) < 6) |
| 322 | return -ENODEV; |
| 323 | break; |
| 324 | case I915_PMU_INTERRUPTS: |
| 325 | break; |
| 326 | case I915_PMU_RC6_RESIDENCY: |
| 327 | if (!HAS_RC6(i915)) |
| 328 | return -ENODEV; |
| 329 | break; |
| 330 | default: |
| 331 | return -ENOENT; |
| 332 | } |
| 333 | |
| 334 | return 0; |
| 335 | } |
| 336 | |
| 337 | static int engine_event_init(struct perf_event *event) |
| 338 | { |
| 339 | struct drm_i915_private *i915 = |
| 340 | container_of(event->pmu, typeof(*i915), pmu.base); |
| 341 | struct intel_engine_cs *engine; |
Tvrtko Ursulin | b2f78cd | 2018-02-05 09:34:48 +0000 | [diff] [blame] | 342 | u8 sample; |
| 343 | int ret; |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 344 | |
| 345 | engine = intel_engine_lookup_user(i915, engine_event_class(event), |
| 346 | engine_event_instance(event)); |
| 347 | if (!engine) |
| 348 | return -ENODEV; |
| 349 | |
Tvrtko Ursulin | b2f78cd | 2018-02-05 09:34:48 +0000 | [diff] [blame] | 350 | sample = engine_event_sample(event); |
| 351 | ret = engine_event_status(engine, sample); |
| 352 | if (ret) |
| 353 | return ret; |
| 354 | |
| 355 | if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) |
| 356 | ret = intel_enable_engine_stats(engine); |
| 357 | |
| 358 | return ret; |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 359 | } |
| 360 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 361 | static int i915_pmu_event_init(struct perf_event *event) |
| 362 | { |
| 363 | struct drm_i915_private *i915 = |
| 364 | container_of(event->pmu, typeof(*i915), pmu.base); |
Tvrtko Ursulin | 0426c04 | 2017-11-23 12:34:32 +0000 | [diff] [blame] | 365 | int ret; |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 366 | |
| 367 | if (event->attr.type != event->pmu->type) |
| 368 | return -ENOENT; |
| 369 | |
| 370 | /* unsupported modes and filters */ |
| 371 | if (event->attr.sample_period) /* no sampling */ |
| 372 | return -EINVAL; |
| 373 | |
| 374 | if (has_branch_stack(event)) |
| 375 | return -EOPNOTSUPP; |
| 376 | |
| 377 | if (event->cpu < 0) |
| 378 | return -EINVAL; |
| 379 | |
Tvrtko Ursulin | 0426c04 | 2017-11-23 12:34:32 +0000 | [diff] [blame] | 380 | /* only allow running on one cpu at a time */ |
| 381 | if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) |
Tvrtko Ursulin | 00a7972 | 2017-11-28 10:55:15 +0000 | [diff] [blame] | 382 | return -EINVAL; |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 383 | |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 384 | if (is_engine_event(event)) |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 385 | ret = engine_event_init(event); |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 386 | else |
| 387 | ret = config_status(i915, event->attr.config); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 388 | if (ret) |
| 389 | return ret; |
| 390 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 391 | if (!event->parent) |
| 392 | event->destroy = i915_pmu_event_destroy; |
| 393 | |
| 394 | return 0; |
| 395 | } |
| 396 | |
Chris Wilson | 05273c9 | 2018-02-07 16:04:28 +0000 | [diff] [blame] | 397 | static u64 __get_rc6(struct drm_i915_private *i915) |
| 398 | { |
| 399 | u64 val; |
| 400 | |
| 401 | val = intel_rc6_residency_ns(i915, |
| 402 | IS_VALLEYVIEW(i915) ? |
| 403 | VLV_GT_RENDER_RC6 : |
| 404 | GEN6_GT_GFX_RC6); |
| 405 | |
| 406 | if (HAS_RC6p(i915)) |
| 407 | val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); |
| 408 | |
| 409 | if (HAS_RC6pp(i915)) |
| 410 | val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); |
| 411 | |
| 412 | return val; |
| 413 | } |
| 414 | |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 415 | static u64 get_rc6(struct drm_i915_private *i915) |
Tvrtko Ursulin | 1fe699e | 2018-02-06 18:33:11 +0000 | [diff] [blame] | 416 | { |
Chris Wilson | 05273c9 | 2018-02-07 16:04:28 +0000 | [diff] [blame] | 417 | #if IS_ENABLED(CONFIG_PM) |
Tvrtko Ursulin | 1fe699e | 2018-02-06 18:33:11 +0000 | [diff] [blame] | 418 | unsigned long flags; |
| 419 | u64 val; |
| 420 | |
| 421 | if (intel_runtime_pm_get_if_in_use(i915)) { |
Chris Wilson | 05273c9 | 2018-02-07 16:04:28 +0000 | [diff] [blame] | 422 | val = __get_rc6(i915); |
Tvrtko Ursulin | 1fe699e | 2018-02-06 18:33:11 +0000 | [diff] [blame] | 423 | intel_runtime_pm_put(i915); |
| 424 | |
| 425 | /* |
| 426 | * If we are coming back from being runtime suspended we must |
| 427 | * be careful not to report a larger value than returned |
| 428 | * previously. |
| 429 | */ |
| 430 | |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 431 | spin_lock_irqsave(&i915->pmu.lock, flags); |
Tvrtko Ursulin | 1fe699e | 2018-02-06 18:33:11 +0000 | [diff] [blame] | 432 | |
| 433 | if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { |
| 434 | i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; |
| 435 | i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; |
| 436 | } else { |
| 437 | val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; |
| 438 | } |
| 439 | |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 440 | spin_unlock_irqrestore(&i915->pmu.lock, flags); |
Tvrtko Ursulin | 1fe699e | 2018-02-06 18:33:11 +0000 | [diff] [blame] | 441 | } else { |
| 442 | struct pci_dev *pdev = i915->drm.pdev; |
| 443 | struct device *kdev = &pdev->dev; |
Tvrtko Ursulin | 1fe699e | 2018-02-06 18:33:11 +0000 | [diff] [blame] | 444 | |
| 445 | /* |
| 446 | * We are runtime suspended. |
| 447 | * |
| 448 | * Report the delta from when the device was suspended to now, |
| 449 | * on top of the last known real value, as the approximated RC6 |
| 450 | * counter value. |
| 451 | */ |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 452 | spin_lock_irqsave(&i915->pmu.lock, flags); |
| 453 | spin_lock(&kdev->power.lock); |
Tvrtko Ursulin | 1fe699e | 2018-02-06 18:33:11 +0000 | [diff] [blame] | 454 | |
| 455 | if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) |
| 456 | i915->pmu.suspended_jiffies_last = |
| 457 | kdev->power.suspended_jiffies; |
| 458 | |
| 459 | val = kdev->power.suspended_jiffies - |
| 460 | i915->pmu.suspended_jiffies_last; |
| 461 | val += jiffies - kdev->power.accounting_timestamp; |
| 462 | |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 463 | spin_unlock(&kdev->power.lock); |
Tvrtko Ursulin | 1fe699e | 2018-02-06 18:33:11 +0000 | [diff] [blame] | 464 | |
| 465 | val = jiffies_to_nsecs(val); |
| 466 | val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; |
| 467 | i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; |
| 468 | |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 469 | spin_unlock_irqrestore(&i915->pmu.lock, flags); |
Tvrtko Ursulin | 1fe699e | 2018-02-06 18:33:11 +0000 | [diff] [blame] | 470 | } |
| 471 | |
| 472 | return val; |
Chris Wilson | 05273c9 | 2018-02-07 16:04:28 +0000 | [diff] [blame] | 473 | #else |
| 474 | return __get_rc6(i915); |
| 475 | #endif |
Tvrtko Ursulin | 1fe699e | 2018-02-06 18:33:11 +0000 | [diff] [blame] | 476 | } |
| 477 | |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 478 | static u64 __i915_pmu_event_read(struct perf_event *event) |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 479 | { |
| 480 | struct drm_i915_private *i915 = |
| 481 | container_of(event->pmu, typeof(*i915), pmu.base); |
| 482 | u64 val = 0; |
| 483 | |
| 484 | if (is_engine_event(event)) { |
| 485 | u8 sample = engine_event_sample(event); |
| 486 | struct intel_engine_cs *engine; |
| 487 | |
| 488 | engine = intel_engine_lookup_user(i915, |
| 489 | engine_event_class(event), |
| 490 | engine_event_instance(event)); |
| 491 | |
| 492 | if (WARN_ON_ONCE(!engine)) { |
| 493 | /* Do nothing */ |
Tvrtko Ursulin | b3add01 | 2017-11-21 18:18:49 +0000 | [diff] [blame] | 494 | } else if (sample == I915_SAMPLE_BUSY && |
Tvrtko Ursulin | b2f78cd | 2018-02-05 09:34:48 +0000 | [diff] [blame] | 495 | intel_engine_supports_stats(engine)) { |
Tvrtko Ursulin | b3add01 | 2017-11-21 18:18:49 +0000 | [diff] [blame] | 496 | val = ktime_to_ns(intel_engine_get_busy_time(engine)); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 497 | } else { |
| 498 | val = engine->pmu.sample[sample].cur; |
| 499 | } |
| 500 | } else { |
| 501 | switch (event->attr.config) { |
| 502 | case I915_PMU_ACTUAL_FREQUENCY: |
| 503 | val = |
| 504 | div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur, |
| 505 | FREQUENCY); |
| 506 | break; |
| 507 | case I915_PMU_REQUESTED_FREQUENCY: |
| 508 | val = |
| 509 | div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur, |
| 510 | FREQUENCY); |
| 511 | break; |
Tvrtko Ursulin | 0cd4684 | 2017-11-21 18:18:50 +0000 | [diff] [blame] | 512 | case I915_PMU_INTERRUPTS: |
| 513 | val = count_interrupts(i915); |
| 514 | break; |
Tvrtko Ursulin | 6060b6a | 2017-11-21 18:18:52 +0000 | [diff] [blame] | 515 | case I915_PMU_RC6_RESIDENCY: |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 516 | val = get_rc6(i915); |
Tvrtko Ursulin | 6060b6a | 2017-11-21 18:18:52 +0000 | [diff] [blame] | 517 | break; |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 518 | } |
| 519 | } |
| 520 | |
| 521 | return val; |
| 522 | } |
| 523 | |
| 524 | static void i915_pmu_event_read(struct perf_event *event) |
| 525 | { |
| 526 | struct hw_perf_event *hwc = &event->hw; |
| 527 | u64 prev, new; |
| 528 | |
| 529 | again: |
| 530 | prev = local64_read(&hwc->prev_count); |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 531 | new = __i915_pmu_event_read(event); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 532 | |
| 533 | if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) |
| 534 | goto again; |
| 535 | |
| 536 | local64_add(new - prev, &event->count); |
| 537 | } |
| 538 | |
| 539 | static void i915_pmu_enable(struct perf_event *event) |
| 540 | { |
| 541 | struct drm_i915_private *i915 = |
| 542 | container_of(event->pmu, typeof(*i915), pmu.base); |
| 543 | unsigned int bit = event_enabled_bit(event); |
| 544 | unsigned long flags; |
| 545 | |
| 546 | spin_lock_irqsave(&i915->pmu.lock, flags); |
| 547 | |
| 548 | /* |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 549 | * Update the bitmask of enabled events and increment |
| 550 | * the event reference counter. |
| 551 | */ |
| 552 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); |
| 553 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); |
| 554 | i915->pmu.enable |= BIT_ULL(bit); |
| 555 | i915->pmu.enable_count[bit]++; |
| 556 | |
| 557 | /* |
Tvrtko Ursulin | feff0dc | 2017-11-21 18:18:46 +0000 | [diff] [blame] | 558 | * Start the sampling timer if needed and not already enabled. |
| 559 | */ |
| 560 | __i915_pmu_maybe_start_timer(i915); |
| 561 | |
| 562 | /* |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 563 | * For per-engine events the bitmask and reference counting |
| 564 | * is stored per engine. |
| 565 | */ |
| 566 | if (is_engine_event(event)) { |
| 567 | u8 sample = engine_event_sample(event); |
| 568 | struct intel_engine_cs *engine; |
| 569 | |
| 570 | engine = intel_engine_lookup_user(i915, |
| 571 | engine_event_class(event), |
| 572 | engine_event_instance(event)); |
| 573 | GEM_BUG_ON(!engine); |
| 574 | engine->pmu.enable |= BIT(sample); |
| 575 | |
| 576 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); |
| 577 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); |
Tvrtko Ursulin | b2f78cd | 2018-02-05 09:34:48 +0000 | [diff] [blame] | 578 | engine->pmu.enable_count[sample]++; |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 579 | } |
| 580 | |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 581 | spin_unlock_irqrestore(&i915->pmu.lock, flags); |
| 582 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 583 | /* |
| 584 | * Store the current counter value so we can report the correct delta |
| 585 | * for all listeners. Even when the event was already enabled and has |
| 586 | * an existing non-zero value. |
| 587 | */ |
Tvrtko Ursulin | ad055fb | 2018-03-14 08:05:35 +0000 | [diff] [blame] | 588 | local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 589 | } |
| 590 | |
| 591 | static void i915_pmu_disable(struct perf_event *event) |
| 592 | { |
| 593 | struct drm_i915_private *i915 = |
| 594 | container_of(event->pmu, typeof(*i915), pmu.base); |
| 595 | unsigned int bit = event_enabled_bit(event); |
| 596 | unsigned long flags; |
| 597 | |
| 598 | spin_lock_irqsave(&i915->pmu.lock, flags); |
| 599 | |
| 600 | if (is_engine_event(event)) { |
| 601 | u8 sample = engine_event_sample(event); |
| 602 | struct intel_engine_cs *engine; |
| 603 | |
| 604 | engine = intel_engine_lookup_user(i915, |
| 605 | engine_event_class(event), |
| 606 | engine_event_instance(event)); |
| 607 | GEM_BUG_ON(!engine); |
| 608 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); |
| 609 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); |
| 610 | /* |
| 611 | * Decrement the reference count and clear the enabled |
| 612 | * bitmask when the last listener on an event goes away. |
| 613 | */ |
Tvrtko Ursulin | b2f78cd | 2018-02-05 09:34:48 +0000 | [diff] [blame] | 614 | if (--engine->pmu.enable_count[sample] == 0) |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 615 | engine->pmu.enable &= ~BIT(sample); |
| 616 | } |
| 617 | |
| 618 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); |
| 619 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); |
| 620 | /* |
| 621 | * Decrement the reference count and clear the enabled |
| 622 | * bitmask when the last listener on an event goes away. |
| 623 | */ |
Tvrtko Ursulin | feff0dc | 2017-11-21 18:18:46 +0000 | [diff] [blame] | 624 | if (--i915->pmu.enable_count[bit] == 0) { |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 625 | i915->pmu.enable &= ~BIT_ULL(bit); |
Tvrtko Ursulin | feff0dc | 2017-11-21 18:18:46 +0000 | [diff] [blame] | 626 | i915->pmu.timer_enabled &= pmu_needs_timer(i915, true); |
| 627 | } |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 628 | |
| 629 | spin_unlock_irqrestore(&i915->pmu.lock, flags); |
| 630 | } |
| 631 | |
| 632 | static void i915_pmu_event_start(struct perf_event *event, int flags) |
| 633 | { |
| 634 | i915_pmu_enable(event); |
| 635 | event->hw.state = 0; |
| 636 | } |
| 637 | |
| 638 | static void i915_pmu_event_stop(struct perf_event *event, int flags) |
| 639 | { |
| 640 | if (flags & PERF_EF_UPDATE) |
| 641 | i915_pmu_event_read(event); |
| 642 | i915_pmu_disable(event); |
| 643 | event->hw.state = PERF_HES_STOPPED; |
| 644 | } |
| 645 | |
| 646 | static int i915_pmu_event_add(struct perf_event *event, int flags) |
| 647 | { |
| 648 | if (flags & PERF_EF_START) |
| 649 | i915_pmu_event_start(event, flags); |
| 650 | |
| 651 | return 0; |
| 652 | } |
| 653 | |
| 654 | static void i915_pmu_event_del(struct perf_event *event, int flags) |
| 655 | { |
| 656 | i915_pmu_event_stop(event, PERF_EF_UPDATE); |
| 657 | } |
| 658 | |
| 659 | static int i915_pmu_event_event_idx(struct perf_event *event) |
| 660 | { |
| 661 | return 0; |
| 662 | } |
| 663 | |
Chris Wilson | b7d3aab | 2017-11-23 21:17:51 +0000 | [diff] [blame] | 664 | struct i915_str_attribute { |
| 665 | struct device_attribute attr; |
| 666 | const char *str; |
| 667 | }; |
| 668 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 669 | static ssize_t i915_pmu_format_show(struct device *dev, |
| 670 | struct device_attribute *attr, char *buf) |
| 671 | { |
Chris Wilson | b7d3aab | 2017-11-23 21:17:51 +0000 | [diff] [blame] | 672 | struct i915_str_attribute *eattr; |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 673 | |
Chris Wilson | b7d3aab | 2017-11-23 21:17:51 +0000 | [diff] [blame] | 674 | eattr = container_of(attr, struct i915_str_attribute, attr); |
| 675 | return sprintf(buf, "%s\n", eattr->str); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 676 | } |
| 677 | |
| 678 | #define I915_PMU_FORMAT_ATTR(_name, _config) \ |
Chris Wilson | b7d3aab | 2017-11-23 21:17:51 +0000 | [diff] [blame] | 679 | (&((struct i915_str_attribute[]) { \ |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 680 | { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ |
Chris Wilson | b7d3aab | 2017-11-23 21:17:51 +0000 | [diff] [blame] | 681 | .str = _config, } \ |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 682 | })[0].attr.attr) |
| 683 | |
| 684 | static struct attribute *i915_pmu_format_attrs[] = { |
| 685 | I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), |
| 686 | NULL, |
| 687 | }; |
| 688 | |
| 689 | static const struct attribute_group i915_pmu_format_attr_group = { |
| 690 | .name = "format", |
| 691 | .attrs = i915_pmu_format_attrs, |
| 692 | }; |
| 693 | |
Chris Wilson | b7d3aab | 2017-11-23 21:17:51 +0000 | [diff] [blame] | 694 | struct i915_ext_attribute { |
| 695 | struct device_attribute attr; |
| 696 | unsigned long val; |
| 697 | }; |
| 698 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 699 | static ssize_t i915_pmu_event_show(struct device *dev, |
| 700 | struct device_attribute *attr, char *buf) |
| 701 | { |
Chris Wilson | b7d3aab | 2017-11-23 21:17:51 +0000 | [diff] [blame] | 702 | struct i915_ext_attribute *eattr; |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 703 | |
Chris Wilson | b7d3aab | 2017-11-23 21:17:51 +0000 | [diff] [blame] | 704 | eattr = container_of(attr, struct i915_ext_attribute, attr); |
| 705 | return sprintf(buf, "config=0x%lx\n", eattr->val); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 706 | } |
| 707 | |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 708 | static struct attribute_group i915_pmu_events_attr_group = { |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 709 | .name = "events", |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 710 | /* Patch in attrs at runtime. */ |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 711 | }; |
| 712 | |
| 713 | static ssize_t |
| 714 | i915_pmu_get_attr_cpumask(struct device *dev, |
| 715 | struct device_attribute *attr, |
| 716 | char *buf) |
| 717 | { |
| 718 | return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); |
| 719 | } |
| 720 | |
| 721 | static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL); |
| 722 | |
| 723 | static struct attribute *i915_cpumask_attrs[] = { |
| 724 | &dev_attr_cpumask.attr, |
| 725 | NULL, |
| 726 | }; |
| 727 | |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 728 | static const struct attribute_group i915_pmu_cpumask_attr_group = { |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 729 | .attrs = i915_cpumask_attrs, |
| 730 | }; |
| 731 | |
| 732 | static const struct attribute_group *i915_pmu_attr_groups[] = { |
| 733 | &i915_pmu_format_attr_group, |
| 734 | &i915_pmu_events_attr_group, |
| 735 | &i915_pmu_cpumask_attr_group, |
| 736 | NULL |
| 737 | }; |
| 738 | |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 739 | #define __event(__config, __name, __unit) \ |
| 740 | { \ |
| 741 | .config = (__config), \ |
| 742 | .name = (__name), \ |
| 743 | .unit = (__unit), \ |
| 744 | } |
| 745 | |
| 746 | #define __engine_event(__sample, __name) \ |
| 747 | { \ |
| 748 | .sample = (__sample), \ |
| 749 | .name = (__name), \ |
| 750 | } |
| 751 | |
| 752 | static struct i915_ext_attribute * |
| 753 | add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config) |
| 754 | { |
Chris Wilson | 2bbba4e | 2018-01-11 14:04:02 +0000 | [diff] [blame] | 755 | sysfs_attr_init(&attr->attr.attr); |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 756 | attr->attr.attr.name = name; |
| 757 | attr->attr.attr.mode = 0444; |
| 758 | attr->attr.show = i915_pmu_event_show; |
| 759 | attr->val = config; |
| 760 | |
| 761 | return ++attr; |
| 762 | } |
| 763 | |
| 764 | static struct perf_pmu_events_attr * |
| 765 | add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, |
| 766 | const char *str) |
| 767 | { |
Chris Wilson | 2bbba4e | 2018-01-11 14:04:02 +0000 | [diff] [blame] | 768 | sysfs_attr_init(&attr->attr.attr); |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 769 | attr->attr.attr.name = name; |
| 770 | attr->attr.attr.mode = 0444; |
| 771 | attr->attr.show = perf_event_sysfs_show; |
| 772 | attr->event_str = str; |
| 773 | |
| 774 | return ++attr; |
| 775 | } |
| 776 | |
| 777 | static struct attribute ** |
| 778 | create_event_attributes(struct drm_i915_private *i915) |
| 779 | { |
| 780 | static const struct { |
| 781 | u64 config; |
| 782 | const char *name; |
| 783 | const char *unit; |
| 784 | } events[] = { |
| 785 | __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"), |
| 786 | __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"), |
| 787 | __event(I915_PMU_INTERRUPTS, "interrupts", NULL), |
| 788 | __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), |
| 789 | }; |
| 790 | static const struct { |
| 791 | enum drm_i915_pmu_engine_sample sample; |
| 792 | char *name; |
| 793 | } engine_events[] = { |
| 794 | __engine_event(I915_SAMPLE_BUSY, "busy"), |
| 795 | __engine_event(I915_SAMPLE_SEMA, "sema"), |
| 796 | __engine_event(I915_SAMPLE_WAIT, "wait"), |
| 797 | }; |
| 798 | unsigned int count = 0; |
| 799 | struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter; |
| 800 | struct i915_ext_attribute *i915_attr = NULL, *i915_iter; |
| 801 | struct attribute **attr = NULL, **attr_iter; |
| 802 | struct intel_engine_cs *engine; |
| 803 | enum intel_engine_id id; |
| 804 | unsigned int i; |
| 805 | |
| 806 | /* Count how many counters we will be exposing. */ |
| 807 | for (i = 0; i < ARRAY_SIZE(events); i++) { |
| 808 | if (!config_status(i915, events[i].config)) |
| 809 | count++; |
| 810 | } |
| 811 | |
| 812 | for_each_engine(engine, i915, id) { |
| 813 | for (i = 0; i < ARRAY_SIZE(engine_events); i++) { |
| 814 | if (!engine_event_status(engine, |
| 815 | engine_events[i].sample)) |
| 816 | count++; |
| 817 | } |
| 818 | } |
| 819 | |
| 820 | /* Allocate attribute objects and table. */ |
Tvrtko Ursulin | dd5fec8 | 2018-01-12 17:03:40 +0000 | [diff] [blame] | 821 | i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL); |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 822 | if (!i915_attr) |
| 823 | goto err_alloc; |
| 824 | |
Tvrtko Ursulin | dd5fec8 | 2018-01-12 17:03:40 +0000 | [diff] [blame] | 825 | pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL); |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 826 | if (!pmu_attr) |
| 827 | goto err_alloc; |
| 828 | |
| 829 | /* Max one pointer of each attribute type plus a termination entry. */ |
Tvrtko Ursulin | dd5fec8 | 2018-01-12 17:03:40 +0000 | [diff] [blame] | 830 | attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL); |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 831 | if (!attr) |
| 832 | goto err_alloc; |
| 833 | |
| 834 | i915_iter = i915_attr; |
| 835 | pmu_iter = pmu_attr; |
| 836 | attr_iter = attr; |
| 837 | |
| 838 | /* Initialize supported non-engine counters. */ |
| 839 | for (i = 0; i < ARRAY_SIZE(events); i++) { |
| 840 | char *str; |
| 841 | |
| 842 | if (config_status(i915, events[i].config)) |
| 843 | continue; |
| 844 | |
| 845 | str = kstrdup(events[i].name, GFP_KERNEL); |
| 846 | if (!str) |
| 847 | goto err; |
| 848 | |
| 849 | *attr_iter++ = &i915_iter->attr.attr; |
| 850 | i915_iter = add_i915_attr(i915_iter, str, events[i].config); |
| 851 | |
| 852 | if (events[i].unit) { |
| 853 | str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); |
| 854 | if (!str) |
| 855 | goto err; |
| 856 | |
| 857 | *attr_iter++ = &pmu_iter->attr.attr; |
| 858 | pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit); |
| 859 | } |
| 860 | } |
| 861 | |
| 862 | /* Initialize supported engine counters. */ |
| 863 | for_each_engine(engine, i915, id) { |
| 864 | for (i = 0; i < ARRAY_SIZE(engine_events); i++) { |
| 865 | char *str; |
| 866 | |
| 867 | if (engine_event_status(engine, |
| 868 | engine_events[i].sample)) |
| 869 | continue; |
| 870 | |
| 871 | str = kasprintf(GFP_KERNEL, "%s-%s", |
| 872 | engine->name, engine_events[i].name); |
| 873 | if (!str) |
| 874 | goto err; |
| 875 | |
| 876 | *attr_iter++ = &i915_iter->attr.attr; |
| 877 | i915_iter = |
| 878 | add_i915_attr(i915_iter, str, |
Tvrtko Ursulin | 8810bc5 | 2018-01-23 13:45:58 +0000 | [diff] [blame] | 879 | __I915_PMU_ENGINE(engine->uabi_class, |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 880 | engine->instance, |
| 881 | engine_events[i].sample)); |
| 882 | |
| 883 | str = kasprintf(GFP_KERNEL, "%s-%s.unit", |
| 884 | engine->name, engine_events[i].name); |
| 885 | if (!str) |
| 886 | goto err; |
| 887 | |
| 888 | *attr_iter++ = &pmu_iter->attr.attr; |
| 889 | pmu_iter = add_pmu_attr(pmu_iter, str, "ns"); |
| 890 | } |
| 891 | } |
| 892 | |
| 893 | i915->pmu.i915_attr = i915_attr; |
| 894 | i915->pmu.pmu_attr = pmu_attr; |
| 895 | |
| 896 | return attr; |
| 897 | |
| 898 | err:; |
| 899 | for (attr_iter = attr; *attr_iter; attr_iter++) |
| 900 | kfree((*attr_iter)->name); |
| 901 | |
| 902 | err_alloc: |
| 903 | kfree(attr); |
| 904 | kfree(i915_attr); |
| 905 | kfree(pmu_attr); |
| 906 | |
| 907 | return NULL; |
| 908 | } |
| 909 | |
| 910 | static void free_event_attributes(struct drm_i915_private *i915) |
| 911 | { |
| 912 | struct attribute **attr_iter = i915_pmu_events_attr_group.attrs; |
| 913 | |
| 914 | for (; *attr_iter; attr_iter++) |
| 915 | kfree((*attr_iter)->name); |
| 916 | |
| 917 | kfree(i915_pmu_events_attr_group.attrs); |
| 918 | kfree(i915->pmu.i915_attr); |
| 919 | kfree(i915->pmu.pmu_attr); |
| 920 | |
| 921 | i915_pmu_events_attr_group.attrs = NULL; |
| 922 | i915->pmu.i915_attr = NULL; |
| 923 | i915->pmu.pmu_attr = NULL; |
| 924 | } |
| 925 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 926 | static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) |
| 927 | { |
| 928 | struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 929 | |
| 930 | GEM_BUG_ON(!pmu->base.event_init); |
| 931 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 932 | /* Select the first online CPU as a designated reader. */ |
Tvrtko Ursulin | 0426c04 | 2017-11-23 12:34:32 +0000 | [diff] [blame] | 933 | if (!cpumask_weight(&i915_pmu_cpumask)) |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 934 | cpumask_set_cpu(cpu, &i915_pmu_cpumask); |
| 935 | |
| 936 | return 0; |
| 937 | } |
| 938 | |
| 939 | static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) |
| 940 | { |
| 941 | struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); |
| 942 | unsigned int target; |
| 943 | |
| 944 | GEM_BUG_ON(!pmu->base.event_init); |
| 945 | |
| 946 | if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { |
| 947 | target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); |
| 948 | /* Migrate events if there is a valid target */ |
| 949 | if (target < nr_cpu_ids) { |
| 950 | cpumask_set_cpu(target, &i915_pmu_cpumask); |
| 951 | perf_pmu_migrate_context(&pmu->base, cpu, target); |
| 952 | } |
| 953 | } |
| 954 | |
| 955 | return 0; |
| 956 | } |
| 957 | |
| 958 | static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 959 | |
| 960 | static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) |
| 961 | { |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 962 | enum cpuhp_state slot; |
| 963 | int ret; |
| 964 | |
| 965 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, |
| 966 | "perf/x86/intel/i915:online", |
| 967 | i915_pmu_cpu_online, |
| 968 | i915_pmu_cpu_offline); |
| 969 | if (ret < 0) |
| 970 | return ret; |
| 971 | |
| 972 | slot = ret; |
| 973 | ret = cpuhp_state_add_instance(slot, &i915->pmu.node); |
| 974 | if (ret) { |
| 975 | cpuhp_remove_multi_state(slot); |
| 976 | return ret; |
| 977 | } |
| 978 | |
| 979 | cpuhp_slot = slot; |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 980 | return 0; |
| 981 | } |
| 982 | |
| 983 | static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) |
| 984 | { |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 985 | WARN_ON(cpuhp_slot == CPUHP_INVALID); |
| 986 | WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node)); |
| 987 | cpuhp_remove_multi_state(cpuhp_slot); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 988 | } |
| 989 | |
| 990 | void i915_pmu_register(struct drm_i915_private *i915) |
| 991 | { |
| 992 | int ret; |
| 993 | |
| 994 | if (INTEL_GEN(i915) <= 2) { |
| 995 | DRM_INFO("PMU not supported for this GPU."); |
| 996 | return; |
| 997 | } |
| 998 | |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 999 | i915_pmu_events_attr_group.attrs = create_event_attributes(i915); |
| 1000 | if (!i915_pmu_events_attr_group.attrs) { |
| 1001 | ret = -ENOMEM; |
| 1002 | goto err; |
| 1003 | } |
| 1004 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 1005 | i915->pmu.base.attr_groups = i915_pmu_attr_groups; |
| 1006 | i915->pmu.base.task_ctx_nr = perf_invalid_context; |
| 1007 | i915->pmu.base.event_init = i915_pmu_event_init; |
| 1008 | i915->pmu.base.add = i915_pmu_event_add; |
| 1009 | i915->pmu.base.del = i915_pmu_event_del; |
| 1010 | i915->pmu.base.start = i915_pmu_event_start; |
| 1011 | i915->pmu.base.stop = i915_pmu_event_stop; |
| 1012 | i915->pmu.base.read = i915_pmu_event_read; |
| 1013 | i915->pmu.base.event_idx = i915_pmu_event_event_idx; |
| 1014 | |
| 1015 | spin_lock_init(&i915->pmu.lock); |
| 1016 | hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 1017 | i915->pmu.timer.function = i915_sample; |
| 1018 | |
| 1019 | ret = perf_pmu_register(&i915->pmu.base, "i915", -1); |
| 1020 | if (ret) |
| 1021 | goto err; |
| 1022 | |
| 1023 | ret = i915_pmu_register_cpuhp_state(i915); |
| 1024 | if (ret) |
| 1025 | goto err_unreg; |
| 1026 | |
| 1027 | return; |
| 1028 | |
| 1029 | err_unreg: |
| 1030 | perf_pmu_unregister(&i915->pmu.base); |
| 1031 | err: |
| 1032 | i915->pmu.base.event_init = NULL; |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 1033 | free_event_attributes(i915); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 1034 | DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); |
| 1035 | } |
| 1036 | |
| 1037 | void i915_pmu_unregister(struct drm_i915_private *i915) |
| 1038 | { |
| 1039 | if (!i915->pmu.base.event_init) |
| 1040 | return; |
| 1041 | |
| 1042 | WARN_ON(i915->pmu.enable); |
| 1043 | |
| 1044 | hrtimer_cancel(&i915->pmu.timer); |
| 1045 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 1046 | i915_pmu_unregister_cpuhp_state(i915); |
| 1047 | |
| 1048 | perf_pmu_unregister(&i915->pmu.base); |
| 1049 | i915->pmu.base.event_init = NULL; |
Tvrtko Ursulin | 109ec55 | 2018-01-11 08:35:25 +0000 | [diff] [blame] | 1050 | free_event_attributes(i915); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 1051 | } |