blob: 01b5ee67c1bf802c9711715b250f7761639fd2fd [file] [log] [blame]
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001/*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/perf_event.h>
26#include <linux/pm_runtime.h>
27
28#include "i915_drv.h"
29#include "i915_pmu.h"
30#include "intel_ringbuffer.h"
31
32/* Frequency for the sampling timer for events which need it. */
33#define FREQUENCY 200
34#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
35
36#define ENGINE_SAMPLE_MASK \
37 (BIT(I915_SAMPLE_BUSY) | \
38 BIT(I915_SAMPLE_WAIT) | \
39 BIT(I915_SAMPLE_SEMA))
40
41#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
42
43static cpumask_t i915_pmu_cpumask = CPU_MASK_NONE;
44
45static u8 engine_config_sample(u64 config)
46{
47 return config & I915_PMU_SAMPLE_MASK;
48}
49
50static u8 engine_event_sample(struct perf_event *event)
51{
52 return engine_config_sample(event->attr.config);
53}
54
55static u8 engine_event_class(struct perf_event *event)
56{
57 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
58}
59
60static u8 engine_event_instance(struct perf_event *event)
61{
62 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
63}
64
65static bool is_engine_config(u64 config)
66{
67 return config < __I915_PMU_OTHER(0);
68}
69
70static unsigned int config_enabled_bit(u64 config)
71{
72 if (is_engine_config(config))
73 return engine_config_sample(config);
74 else
75 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
76}
77
78static u64 config_enabled_mask(u64 config)
79{
80 return BIT_ULL(config_enabled_bit(config));
81}
82
83static bool is_engine_event(struct perf_event *event)
84{
85 return is_engine_config(event->attr.config);
86}
87
88static unsigned int event_enabled_bit(struct perf_event *event)
89{
90 return config_enabled_bit(event->attr.config);
91}
92
93static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
94{
95 if (!fw)
96 intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
97
98 return true;
99}
100
101static void
102update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
103{
104 /*
105 * Since we are doing stochastic sampling for these counters,
106 * average the delta with the previous value for better accuracy.
107 */
108 sample->cur += div_u64(mul_u32_u32(sample->prev + val, unit), 2);
109 sample->prev = val;
110}
111
112static void engines_sample(struct drm_i915_private *dev_priv)
113{
114 struct intel_engine_cs *engine;
115 enum intel_engine_id id;
116 bool fw = false;
117
118 if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
119 return;
120
121 if (!dev_priv->gt.awake)
122 return;
123
124 if (!intel_runtime_pm_get_if_in_use(dev_priv))
125 return;
126
127 for_each_engine(engine, dev_priv, id) {
128 u32 current_seqno = intel_engine_get_seqno(engine);
129 u32 last_seqno = intel_engine_last_submit(engine);
130 u32 val;
131
132 val = !i915_seqno_passed(current_seqno, last_seqno);
133
134 update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
135 PERIOD, val);
136
137 if (val && (engine->pmu.enable &
138 (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
139 fw = grab_forcewake(dev_priv, fw);
140
141 val = I915_READ_FW(RING_CTL(engine->mmio_base));
142 } else {
143 val = 0;
144 }
145
146 update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
147 PERIOD, !!(val & RING_WAIT));
148
149 update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
150 PERIOD, !!(val & RING_WAIT_SEMAPHORE));
151 }
152
153 if (fw)
154 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
155
156 intel_runtime_pm_put(dev_priv);
157}
158
159static void frequency_sample(struct drm_i915_private *dev_priv)
160{
161 if (dev_priv->pmu.enable &
162 config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
163 u32 val;
164
165 val = dev_priv->gt_pm.rps.cur_freq;
166 if (dev_priv->gt.awake &&
167 intel_runtime_pm_get_if_in_use(dev_priv)) {
168 val = intel_get_cagf(dev_priv,
169 I915_READ_NOTRACE(GEN6_RPSTAT1));
170 intel_runtime_pm_put(dev_priv);
171 }
172
173 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
174 1, intel_gpu_freq(dev_priv, val));
175 }
176
177 if (dev_priv->pmu.enable &
178 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
179 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
180 intel_gpu_freq(dev_priv,
181 dev_priv->gt_pm.rps.cur_freq));
182 }
183}
184
185static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
186{
187 struct drm_i915_private *i915 =
188 container_of(hrtimer, struct drm_i915_private, pmu.timer);
189
190 if (i915->pmu.enable == 0)
191 return HRTIMER_NORESTART;
192
193 engines_sample(i915);
194 frequency_sample(i915);
195
196 hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
197 return HRTIMER_RESTART;
198}
199
200static void i915_pmu_event_destroy(struct perf_event *event)
201{
202 WARN_ON(event->parent);
203}
204
205static int engine_event_init(struct perf_event *event)
206{
207 struct drm_i915_private *i915 =
208 container_of(event->pmu, typeof(*i915), pmu.base);
209
210 if (!intel_engine_lookup_user(i915, engine_event_class(event),
211 engine_event_instance(event)))
212 return -ENODEV;
213
214 switch (engine_event_sample(event)) {
215 case I915_SAMPLE_BUSY:
216 case I915_SAMPLE_WAIT:
217 break;
218 case I915_SAMPLE_SEMA:
219 if (INTEL_GEN(i915) < 6)
220 return -ENODEV;
221 break;
222 default:
223 return -ENOENT;
224 }
225
226 return 0;
227}
228
229static int i915_pmu_event_init(struct perf_event *event)
230{
231 struct drm_i915_private *i915 =
232 container_of(event->pmu, typeof(*i915), pmu.base);
233 int cpu, ret;
234
235 if (event->attr.type != event->pmu->type)
236 return -ENOENT;
237
238 /* unsupported modes and filters */
239 if (event->attr.sample_period) /* no sampling */
240 return -EINVAL;
241
242 if (has_branch_stack(event))
243 return -EOPNOTSUPP;
244
245 if (event->cpu < 0)
246 return -EINVAL;
247
248 cpu = cpumask_any_and(&i915_pmu_cpumask,
249 topology_sibling_cpumask(event->cpu));
250 if (cpu >= nr_cpu_ids)
251 return -ENODEV;
252
253 if (is_engine_event(event)) {
254 ret = engine_event_init(event);
255 } else {
256 ret = 0;
257 switch (event->attr.config) {
258 case I915_PMU_ACTUAL_FREQUENCY:
259 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
260 /* Requires a mutex for sampling! */
261 ret = -ENODEV;
262 case I915_PMU_REQUESTED_FREQUENCY:
263 if (INTEL_GEN(i915) < 6)
264 ret = -ENODEV;
265 break;
266 default:
267 ret = -ENOENT;
268 break;
269 }
270 }
271 if (ret)
272 return ret;
273
274 event->cpu = cpu;
275 if (!event->parent)
276 event->destroy = i915_pmu_event_destroy;
277
278 return 0;
279}
280
281static u64 __i915_pmu_event_read(struct perf_event *event)
282{
283 struct drm_i915_private *i915 =
284 container_of(event->pmu, typeof(*i915), pmu.base);
285 u64 val = 0;
286
287 if (is_engine_event(event)) {
288 u8 sample = engine_event_sample(event);
289 struct intel_engine_cs *engine;
290
291 engine = intel_engine_lookup_user(i915,
292 engine_event_class(event),
293 engine_event_instance(event));
294
295 if (WARN_ON_ONCE(!engine)) {
296 /* Do nothing */
297 } else {
298 val = engine->pmu.sample[sample].cur;
299 }
300 } else {
301 switch (event->attr.config) {
302 case I915_PMU_ACTUAL_FREQUENCY:
303 val =
304 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
305 FREQUENCY);
306 break;
307 case I915_PMU_REQUESTED_FREQUENCY:
308 val =
309 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
310 FREQUENCY);
311 break;
312 }
313 }
314
315 return val;
316}
317
318static void i915_pmu_event_read(struct perf_event *event)
319{
320 struct hw_perf_event *hwc = &event->hw;
321 u64 prev, new;
322
323again:
324 prev = local64_read(&hwc->prev_count);
325 new = __i915_pmu_event_read(event);
326
327 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
328 goto again;
329
330 local64_add(new - prev, &event->count);
331}
332
333static void i915_pmu_enable(struct perf_event *event)
334{
335 struct drm_i915_private *i915 =
336 container_of(event->pmu, typeof(*i915), pmu.base);
337 unsigned int bit = event_enabled_bit(event);
338 unsigned long flags;
339
340 spin_lock_irqsave(&i915->pmu.lock, flags);
341
342 /*
343 * Start the sampling timer when enabling the first event.
344 */
345 if (i915->pmu.enable == 0)
346 hrtimer_start_range_ns(&i915->pmu.timer,
347 ns_to_ktime(PERIOD), 0,
348 HRTIMER_MODE_REL_PINNED);
349
350 /*
351 * Update the bitmask of enabled events and increment
352 * the event reference counter.
353 */
354 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
355 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
356 i915->pmu.enable |= BIT_ULL(bit);
357 i915->pmu.enable_count[bit]++;
358
359 /*
360 * For per-engine events the bitmask and reference counting
361 * is stored per engine.
362 */
363 if (is_engine_event(event)) {
364 u8 sample = engine_event_sample(event);
365 struct intel_engine_cs *engine;
366
367 engine = intel_engine_lookup_user(i915,
368 engine_event_class(event),
369 engine_event_instance(event));
370 GEM_BUG_ON(!engine);
371 engine->pmu.enable |= BIT(sample);
372
373 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
374 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
375 engine->pmu.enable_count[sample]++;
376 }
377
378 /*
379 * Store the current counter value so we can report the correct delta
380 * for all listeners. Even when the event was already enabled and has
381 * an existing non-zero value.
382 */
383 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
384
385 spin_unlock_irqrestore(&i915->pmu.lock, flags);
386}
387
388static void i915_pmu_disable(struct perf_event *event)
389{
390 struct drm_i915_private *i915 =
391 container_of(event->pmu, typeof(*i915), pmu.base);
392 unsigned int bit = event_enabled_bit(event);
393 unsigned long flags;
394
395 spin_lock_irqsave(&i915->pmu.lock, flags);
396
397 if (is_engine_event(event)) {
398 u8 sample = engine_event_sample(event);
399 struct intel_engine_cs *engine;
400
401 engine = intel_engine_lookup_user(i915,
402 engine_event_class(event),
403 engine_event_instance(event));
404 GEM_BUG_ON(!engine);
405 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
406 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
407 /*
408 * Decrement the reference count and clear the enabled
409 * bitmask when the last listener on an event goes away.
410 */
411 if (--engine->pmu.enable_count[sample] == 0)
412 engine->pmu.enable &= ~BIT(sample);
413 }
414
415 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
416 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
417 /*
418 * Decrement the reference count and clear the enabled
419 * bitmask when the last listener on an event goes away.
420 */
421 if (--i915->pmu.enable_count[bit] == 0)
422 i915->pmu.enable &= ~BIT_ULL(bit);
423
424 spin_unlock_irqrestore(&i915->pmu.lock, flags);
425}
426
427static void i915_pmu_event_start(struct perf_event *event, int flags)
428{
429 i915_pmu_enable(event);
430 event->hw.state = 0;
431}
432
433static void i915_pmu_event_stop(struct perf_event *event, int flags)
434{
435 if (flags & PERF_EF_UPDATE)
436 i915_pmu_event_read(event);
437 i915_pmu_disable(event);
438 event->hw.state = PERF_HES_STOPPED;
439}
440
441static int i915_pmu_event_add(struct perf_event *event, int flags)
442{
443 if (flags & PERF_EF_START)
444 i915_pmu_event_start(event, flags);
445
446 return 0;
447}
448
449static void i915_pmu_event_del(struct perf_event *event, int flags)
450{
451 i915_pmu_event_stop(event, PERF_EF_UPDATE);
452}
453
454static int i915_pmu_event_event_idx(struct perf_event *event)
455{
456 return 0;
457}
458
459static ssize_t i915_pmu_format_show(struct device *dev,
460 struct device_attribute *attr, char *buf)
461{
462 struct dev_ext_attribute *eattr;
463
464 eattr = container_of(attr, struct dev_ext_attribute, attr);
465 return sprintf(buf, "%s\n", (char *)eattr->var);
466}
467
468#define I915_PMU_FORMAT_ATTR(_name, _config) \
469 (&((struct dev_ext_attribute[]) { \
470 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
471 .var = (void *)_config, } \
472 })[0].attr.attr)
473
474static struct attribute *i915_pmu_format_attrs[] = {
475 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
476 NULL,
477};
478
479static const struct attribute_group i915_pmu_format_attr_group = {
480 .name = "format",
481 .attrs = i915_pmu_format_attrs,
482};
483
484static ssize_t i915_pmu_event_show(struct device *dev,
485 struct device_attribute *attr, char *buf)
486{
487 struct dev_ext_attribute *eattr;
488
489 eattr = container_of(attr, struct dev_ext_attribute, attr);
490 return sprintf(buf, "config=0x%lx\n", (unsigned long)eattr->var);
491}
492
493#define I915_EVENT_ATTR(_name, _config) \
494 (&((struct dev_ext_attribute[]) { \
495 { .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \
496 .var = (void *)_config, } \
497 })[0].attr.attr)
498
499#define I915_EVENT_STR(_name, _str) \
500 (&((struct perf_pmu_events_attr[]) { \
501 { .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
502 .id = 0, \
503 .event_str = _str, } \
504 })[0].attr.attr)
505
506#define I915_EVENT(_name, _config, _unit) \
507 I915_EVENT_ATTR(_name, _config), \
508 I915_EVENT_STR(_name.unit, _unit)
509
510#define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \
511 I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \
512 I915_EVENT_STR(_name.unit, "ns")
513
514#define I915_ENGINE_EVENTS(_name, _class, _instance) \
515 I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \
516 I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \
517 I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT)
518
519static struct attribute *i915_pmu_events_attrs[] = {
520 I915_ENGINE_EVENTS(rcs, I915_ENGINE_CLASS_RENDER, 0),
521 I915_ENGINE_EVENTS(bcs, I915_ENGINE_CLASS_COPY, 0),
522 I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 0),
523 I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 1),
524 I915_ENGINE_EVENTS(vecs, I915_ENGINE_CLASS_VIDEO_ENHANCE, 0),
525
526 I915_EVENT(actual-frequency, I915_PMU_ACTUAL_FREQUENCY, "MHz"),
527 I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"),
528
529 NULL,
530};
531
532static const struct attribute_group i915_pmu_events_attr_group = {
533 .name = "events",
534 .attrs = i915_pmu_events_attrs,
535};
536
537static ssize_t
538i915_pmu_get_attr_cpumask(struct device *dev,
539 struct device_attribute *attr,
540 char *buf)
541{
542 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
543}
544
545static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
546
547static struct attribute *i915_cpumask_attrs[] = {
548 &dev_attr_cpumask.attr,
549 NULL,
550};
551
552static struct attribute_group i915_pmu_cpumask_attr_group = {
553 .attrs = i915_cpumask_attrs,
554};
555
556static const struct attribute_group *i915_pmu_attr_groups[] = {
557 &i915_pmu_format_attr_group,
558 &i915_pmu_events_attr_group,
559 &i915_pmu_cpumask_attr_group,
560 NULL
561};
562
563#ifdef CONFIG_HOTPLUG_CPU
564static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
565{
566 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
567 unsigned int target;
568
569 GEM_BUG_ON(!pmu->base.event_init);
570
571 target = cpumask_any_and(&i915_pmu_cpumask, &i915_pmu_cpumask);
572 /* Select the first online CPU as a designated reader. */
573 if (target >= nr_cpu_ids)
574 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
575
576 return 0;
577}
578
579static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
580{
581 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
582 unsigned int target;
583
584 GEM_BUG_ON(!pmu->base.event_init);
585
586 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
587 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
588 /* Migrate events if there is a valid target */
589 if (target < nr_cpu_ids) {
590 cpumask_set_cpu(target, &i915_pmu_cpumask);
591 perf_pmu_migrate_context(&pmu->base, cpu, target);
592 }
593 }
594
595 return 0;
596}
597
598static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
599#endif
600
601static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
602{
603#ifdef CONFIG_HOTPLUG_CPU
604 enum cpuhp_state slot;
605 int ret;
606
607 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
608 "perf/x86/intel/i915:online",
609 i915_pmu_cpu_online,
610 i915_pmu_cpu_offline);
611 if (ret < 0)
612 return ret;
613
614 slot = ret;
615 ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
616 if (ret) {
617 cpuhp_remove_multi_state(slot);
618 return ret;
619 }
620
621 cpuhp_slot = slot;
622#endif
623 return 0;
624}
625
626static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
627{
628#ifdef CONFIG_HOTPLUG_CPU
629 WARN_ON(cpuhp_slot == CPUHP_INVALID);
630 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
631 cpuhp_remove_multi_state(cpuhp_slot);
632#endif
633}
634
635void i915_pmu_register(struct drm_i915_private *i915)
636{
637 int ret;
638
639 if (INTEL_GEN(i915) <= 2) {
640 DRM_INFO("PMU not supported for this GPU.");
641 return;
642 }
643
644 i915->pmu.base.attr_groups = i915_pmu_attr_groups;
645 i915->pmu.base.task_ctx_nr = perf_invalid_context;
646 i915->pmu.base.event_init = i915_pmu_event_init;
647 i915->pmu.base.add = i915_pmu_event_add;
648 i915->pmu.base.del = i915_pmu_event_del;
649 i915->pmu.base.start = i915_pmu_event_start;
650 i915->pmu.base.stop = i915_pmu_event_stop;
651 i915->pmu.base.read = i915_pmu_event_read;
652 i915->pmu.base.event_idx = i915_pmu_event_event_idx;
653
654 spin_lock_init(&i915->pmu.lock);
655 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
656 i915->pmu.timer.function = i915_sample;
657
658 ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
659 if (ret)
660 goto err;
661
662 ret = i915_pmu_register_cpuhp_state(i915);
663 if (ret)
664 goto err_unreg;
665
666 return;
667
668err_unreg:
669 perf_pmu_unregister(&i915->pmu.base);
670err:
671 i915->pmu.base.event_init = NULL;
672 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
673}
674
675void i915_pmu_unregister(struct drm_i915_private *i915)
676{
677 if (!i915->pmu.base.event_init)
678 return;
679
680 WARN_ON(i915->pmu.enable);
681
682 hrtimer_cancel(&i915->pmu.timer);
683
684 i915_pmu_unregister_cpuhp_state(i915);
685
686 perf_pmu_unregister(&i915->pmu.base);
687 i915->pmu.base.event_init = NULL;
688}