blob: d82d14ceb2e978462402b108301ae56fe499052e [file] [log] [blame]
Shawn Guo9fbbe682011-09-06 14:39:44 +08001/*
Zhengyu Shene76bdfd2016-09-19 12:57:29 -05002 * Copyright 2011,2016 Freescale Semiconductor, Inc.
Shawn Guo9fbbe682011-09-06 14:39:44 +08003 * Copyright 2011 Linaro Ltd.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
Zhengyu Shene76bdfd2016-09-19 12:57:29 -050013#include <linux/hrtimer.h>
Shawn Guo9fbbe682011-09-06 14:39:44 +080014#include <linux/init.h>
Zhengyu Shene76bdfd2016-09-19 12:57:29 -050015#include <linux/interrupt.h>
Shawn Guo9fbbe682011-09-06 14:39:44 +080016#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/of_device.h>
Zhengyu Shene76bdfd2016-09-19 12:57:29 -050021#include <linux/perf_event.h>
22#include <linux/slab.h>
Shawn Guo9fbbe682011-09-06 14:39:44 +080023
Fabio Estevam666c8842015-04-02 19:23:32 -030024#include "common.h"
25
Shawn Guo9fbbe682011-09-06 14:39:44 +080026#define MMDC_MAPSR 0x404
27#define BP_MMDC_MAPSR_PSD 0
28#define BP_MMDC_MAPSR_PSS 4
29
Anson Huangec336b22014-09-17 11:11:45 +080030#define MMDC_MDMISC 0x18
31#define BM_MMDC_MDMISC_DDR_TYPE 0x18
32#define BP_MMDC_MDMISC_DDR_TYPE 0x3
33
Zhengyu Shene76bdfd2016-09-19 12:57:29 -050034#define TOTAL_CYCLES 0x0
35#define BUSY_CYCLES 0x1
36#define READ_ACCESSES 0x2
37#define WRITE_ACCESSES 0x3
38#define READ_BYTES 0x4
39#define WRITE_BYTES 0x5
40
41/* Enables, resets, freezes, overflow profiling*/
42#define DBG_DIS 0x0
43#define DBG_EN 0x1
44#define DBG_RST 0x2
45#define PRF_FRZ 0x4
46#define CYC_OVF 0x8
47
48#define MMDC_MADPCR0 0x410
49#define MMDC_MADPSR0 0x418
50#define MMDC_MADPSR1 0x41C
51#define MMDC_MADPSR2 0x420
52#define MMDC_MADPSR3 0x424
53#define MMDC_MADPSR4 0x428
54#define MMDC_MADPSR5 0x42C
55
56#define MMDC_NUM_COUNTERS 6
57
58#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
59
Anson Huangec336b22014-09-17 11:11:45 +080060static int ddr_type;
61
Zhengyu Shene76bdfd2016-09-19 12:57:29 -050062#ifdef CONFIG_PERF_EVENTS
63
64static DEFINE_IDA(mmdc_ida);
65
66PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
67PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
68PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
69PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "config=0x03")
70PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
71PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
72PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
73PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
74PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
75PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
76
77struct mmdc_pmu {
78 struct pmu pmu;
79 void __iomem *mmdc_base;
80 cpumask_t cpu;
81 struct hrtimer hrtimer;
82 unsigned int active_events;
83 struct device *dev;
84 struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
85 struct hlist_node node;
86};
87
88/*
89 * Polling period is set to one second, overflow of total-cycles (the fastest
90 * increasing counter) takes ten seconds so one second is safe
91 */
92static unsigned int mmdc_pmu_poll_period_us = 1000000;
93
94module_param_named(pmu_pmu_poll_period_us, mmdc_pmu_poll_period_us, uint,
95 S_IRUGO | S_IWUSR);
96
97static ktime_t mmdc_pmu_timer_period(void)
98{
99 return ns_to_ktime((u64)mmdc_pmu_poll_period_us * 1000);
100}
101
102static ssize_t mmdc_pmu_cpumask_show(struct device *dev,
103 struct device_attribute *attr, char *buf)
104{
105 struct mmdc_pmu *pmu_mmdc = dev_get_drvdata(dev);
106
107 return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu);
108}
109
110static struct device_attribute mmdc_pmu_cpumask_attr =
111 __ATTR(cpumask, S_IRUGO, mmdc_pmu_cpumask_show, NULL);
112
113static struct attribute *mmdc_pmu_cpumask_attrs[] = {
114 &mmdc_pmu_cpumask_attr.attr,
115 NULL,
116};
117
118static struct attribute_group mmdc_pmu_cpumask_attr_group = {
119 .attrs = mmdc_pmu_cpumask_attrs,
120};
121
122static struct attribute *mmdc_pmu_events_attrs[] = {
123 &mmdc_pmu_total_cycles.attr.attr,
124 &mmdc_pmu_busy_cycles.attr.attr,
125 &mmdc_pmu_read_accesses.attr.attr,
126 &mmdc_pmu_write_accesses.attr.attr,
127 &mmdc_pmu_read_bytes.attr.attr,
128 &mmdc_pmu_read_bytes_unit.attr.attr,
129 &mmdc_pmu_read_bytes_scale.attr.attr,
130 &mmdc_pmu_write_bytes.attr.attr,
131 &mmdc_pmu_write_bytes_unit.attr.attr,
132 &mmdc_pmu_write_bytes_scale.attr.attr,
133 NULL,
134};
135
136static struct attribute_group mmdc_pmu_events_attr_group = {
137 .name = "events",
138 .attrs = mmdc_pmu_events_attrs,
139};
140
141PMU_FORMAT_ATTR(event, "config:0-63");
142static struct attribute *mmdc_pmu_format_attrs[] = {
143 &format_attr_event.attr,
144 NULL,
145};
146
147static struct attribute_group mmdc_pmu_format_attr_group = {
148 .name = "format",
149 .attrs = mmdc_pmu_format_attrs,
150};
151
152static const struct attribute_group *attr_groups[] = {
153 &mmdc_pmu_events_attr_group,
154 &mmdc_pmu_format_attr_group,
155 &mmdc_pmu_cpumask_attr_group,
156 NULL,
157};
158
159static u32 mmdc_pmu_read_counter(struct mmdc_pmu *pmu_mmdc, int cfg)
160{
161 void __iomem *mmdc_base, *reg;
162
163 mmdc_base = pmu_mmdc->mmdc_base;
164
165 switch (cfg) {
166 case TOTAL_CYCLES:
167 reg = mmdc_base + MMDC_MADPSR0;
168 break;
169 case BUSY_CYCLES:
170 reg = mmdc_base + MMDC_MADPSR1;
171 break;
172 case READ_ACCESSES:
173 reg = mmdc_base + MMDC_MADPSR2;
174 break;
175 case WRITE_ACCESSES:
176 reg = mmdc_base + MMDC_MADPSR3;
177 break;
178 case READ_BYTES:
179 reg = mmdc_base + MMDC_MADPSR4;
180 break;
181 case WRITE_BYTES:
182 reg = mmdc_base + MMDC_MADPSR5;
183 break;
184 default:
185 return WARN_ONCE(1,
186 "invalid configuration %d for mmdc counter", cfg);
187 }
188 return readl(reg);
189}
190
191static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
192{
193 struct mmdc_pmu *pmu_mmdc = hlist_entry_safe(node, struct mmdc_pmu, node);
194 int target;
195
196 if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu))
197 return 0;
198
199 target = cpumask_any_but(cpu_online_mask, cpu);
200 if (target >= nr_cpu_ids)
201 return 0;
202
203 perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
204 cpumask_set_cpu(target, &pmu_mmdc->cpu);
205
206 return 0;
207}
208
209static bool mmdc_pmu_group_event_is_valid(struct perf_event *event,
210 struct pmu *pmu,
211 unsigned long *used_counters)
212{
213 int cfg = event->attr.config;
214
215 if (is_software_event(event))
216 return true;
217
218 if (event->pmu != pmu)
219 return false;
220
221 return !test_and_set_bit(cfg, used_counters);
222}
223
224/*
225 * Each event has a single fixed-purpose counter, so we can only have a
226 * single active event for each at any point in time. Here we just check
227 * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
228 * event numbers are valid.
229 */
230static bool mmdc_pmu_group_is_valid(struct perf_event *event)
231{
232 struct pmu *pmu = event->pmu;
233 struct perf_event *leader = event->group_leader;
234 struct perf_event *sibling;
235 unsigned long counter_mask = 0;
236
237 set_bit(leader->attr.config, &counter_mask);
238
239 if (event != leader) {
240 if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask))
241 return false;
242 }
243
244 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
245 if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
246 return false;
247 }
248
249 return true;
250}
251
252static int mmdc_pmu_event_init(struct perf_event *event)
253{
254 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
255 int cfg = event->attr.config;
256
257 if (event->attr.type != event->pmu->type)
258 return -ENOENT;
259
260 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
261 return -EOPNOTSUPP;
262
263 if (event->cpu < 0) {
264 dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n");
265 return -EOPNOTSUPP;
266 }
267
268 if (event->attr.exclude_user ||
269 event->attr.exclude_kernel ||
270 event->attr.exclude_hv ||
271 event->attr.exclude_idle ||
272 event->attr.exclude_host ||
273 event->attr.exclude_guest ||
274 event->attr.sample_period)
275 return -EINVAL;
276
277 if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS)
278 return -EINVAL;
279
280 if (!mmdc_pmu_group_is_valid(event))
281 return -EINVAL;
282
283 event->cpu = cpumask_first(&pmu_mmdc->cpu);
284 return 0;
285}
286
287static void mmdc_pmu_event_update(struct perf_event *event)
288{
289 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
290 struct hw_perf_event *hwc = &event->hw;
291 u64 delta, prev_raw_count, new_raw_count;
292
293 do {
294 prev_raw_count = local64_read(&hwc->prev_count);
295 new_raw_count = mmdc_pmu_read_counter(pmu_mmdc,
296 event->attr.config);
297 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
298 new_raw_count) != prev_raw_count);
299
300 delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
301
302 local64_add(delta, &event->count);
303}
304
305static void mmdc_pmu_event_start(struct perf_event *event, int flags)
306{
307 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
308 struct hw_perf_event *hwc = &event->hw;
309 void __iomem *mmdc_base, *reg;
310
311 mmdc_base = pmu_mmdc->mmdc_base;
312 reg = mmdc_base + MMDC_MADPCR0;
313
314 /*
315 * hrtimer is required because mmdc does not provide an interrupt so
316 * polling is necessary
317 */
318 hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(),
319 HRTIMER_MODE_REL_PINNED);
320
321 local64_set(&hwc->prev_count, 0);
322
323 writel(DBG_RST, reg);
324 writel(DBG_EN, reg);
325}
326
327static int mmdc_pmu_event_add(struct perf_event *event, int flags)
328{
329 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
330 struct hw_perf_event *hwc = &event->hw;
331
332 int cfg = event->attr.config;
333
334 if (flags & PERF_EF_START)
335 mmdc_pmu_event_start(event, flags);
336
337 if (pmu_mmdc->mmdc_events[cfg] != NULL)
338 return -EAGAIN;
339
340 pmu_mmdc->mmdc_events[cfg] = event;
341 pmu_mmdc->active_events++;
342
343 local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg));
344
345 return 0;
346}
347
348static void mmdc_pmu_event_stop(struct perf_event *event, int flags)
349{
350 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
351 void __iomem *mmdc_base, *reg;
352
353 mmdc_base = pmu_mmdc->mmdc_base;
354 reg = mmdc_base + MMDC_MADPCR0;
355
356 writel(PRF_FRZ, reg);
357 mmdc_pmu_event_update(event);
358}
359
360static void mmdc_pmu_event_del(struct perf_event *event, int flags)
361{
362 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
363 int cfg = event->attr.config;
364
365 pmu_mmdc->mmdc_events[cfg] = NULL;
366 pmu_mmdc->active_events--;
367
368 if (pmu_mmdc->active_events == 0)
369 hrtimer_cancel(&pmu_mmdc->hrtimer);
370
371 mmdc_pmu_event_stop(event, PERF_EF_UPDATE);
372}
373
374static void mmdc_pmu_overflow_handler(struct mmdc_pmu *pmu_mmdc)
375{
376 int i;
377
378 for (i = 0; i < MMDC_NUM_COUNTERS; i++) {
379 struct perf_event *event = pmu_mmdc->mmdc_events[i];
380
381 if (event)
382 mmdc_pmu_event_update(event);
383 }
384}
385
386static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
387{
388 struct mmdc_pmu *pmu_mmdc = container_of(hrtimer, struct mmdc_pmu,
389 hrtimer);
390
391 mmdc_pmu_overflow_handler(pmu_mmdc);
392 hrtimer_forward_now(hrtimer, mmdc_pmu_timer_period());
393
394 return HRTIMER_RESTART;
395}
396
397static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
398 void __iomem *mmdc_base, struct device *dev)
399{
400 int mmdc_num;
401
402 *pmu_mmdc = (struct mmdc_pmu) {
403 .pmu = (struct pmu) {
404 .task_ctx_nr = perf_invalid_context,
405 .attr_groups = attr_groups,
406 .event_init = mmdc_pmu_event_init,
407 .add = mmdc_pmu_event_add,
408 .del = mmdc_pmu_event_del,
409 .start = mmdc_pmu_event_start,
410 .stop = mmdc_pmu_event_stop,
411 .read = mmdc_pmu_event_update,
412 },
413 .mmdc_base = mmdc_base,
414 .dev = dev,
415 .active_events = 0,
416 };
417
418 mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
419
420 return mmdc_num;
421}
422
423static int imx_mmdc_remove(struct platform_device *pdev)
424{
425 struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
426
427 perf_pmu_unregister(&pmu_mmdc->pmu);
428 cpuhp_remove_state_nocalls(CPUHP_ONLINE);
429 kfree(pmu_mmdc);
430 return 0;
431}
432
433static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base)
434{
435 struct mmdc_pmu *pmu_mmdc;
436 char *name;
437 int mmdc_num;
438 int ret;
439
440 pmu_mmdc = kzalloc(sizeof(*pmu_mmdc), GFP_KERNEL);
441 if (!pmu_mmdc) {
442 pr_err("failed to allocate PMU device!\n");
443 return -ENOMEM;
444 }
445
446 mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
447 if (mmdc_num == 0)
448 name = "mmdc";
449 else
450 name = devm_kasprintf(&pdev->dev,
451 GFP_KERNEL, "mmdc%d", mmdc_num);
452
453 hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
454 HRTIMER_MODE_REL);
455 pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
456
457 cpuhp_state_add_instance_nocalls(CPUHP_ONLINE,
458 &pmu_mmdc->node);
459 cpumask_set_cpu(smp_processor_id(), &pmu_mmdc->cpu);
460 ret = cpuhp_setup_state_multi(CPUHP_AP_NOTIFY_ONLINE,
461 "MMDC_ONLINE", NULL,
462 mmdc_pmu_offline_cpu);
463 if (ret) {
464 pr_err("cpuhp_setup_state_multi failure\n");
465 goto pmu_register_err;
466 }
467
468 ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
469 platform_set_drvdata(pdev, pmu_mmdc);
470 if (ret)
471 goto pmu_register_err;
472 return 0;
473
474pmu_register_err:
475 pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
476 hrtimer_cancel(&pmu_mmdc->hrtimer);
477 kfree(pmu_mmdc);
478 return ret;
479}
480
481#else
482#define imx_mmdc_remove NULL
483#define imx_mmdc_perf_init(pdev, mmdc_base) 0
484#endif
485
Greg Kroah-Hartman351a1022012-12-21 14:02:24 -0800486static int imx_mmdc_probe(struct platform_device *pdev)
Shawn Guo9fbbe682011-09-06 14:39:44 +0800487{
488 struct device_node *np = pdev->dev.of_node;
489 void __iomem *mmdc_base, *reg;
490 u32 val;
491 int timeout = 0x400;
492
493 mmdc_base = of_iomap(np, 0);
494 WARN_ON(!mmdc_base);
495
Anson Huangec336b22014-09-17 11:11:45 +0800496 reg = mmdc_base + MMDC_MDMISC;
497 /* Get ddr type */
498 val = readl_relaxed(reg);
499 ddr_type = (val & BM_MMDC_MDMISC_DDR_TYPE) >>
500 BP_MMDC_MDMISC_DDR_TYPE;
501
Shawn Guo9fbbe682011-09-06 14:39:44 +0800502 reg = mmdc_base + MMDC_MAPSR;
503
504 /* Enable automatic power saving */
505 val = readl_relaxed(reg);
506 val &= ~(1 << BP_MMDC_MAPSR_PSD);
507 writel_relaxed(val, reg);
508
509 /* Ensure it's successfully enabled */
510 while (!(readl_relaxed(reg) & 1 << BP_MMDC_MAPSR_PSS) && --timeout)
511 cpu_relax();
512
513 if (unlikely(!timeout)) {
514 pr_warn("%s: failed to enable automatic power saving\n",
515 __func__);
516 return -EBUSY;
517 }
518
Zhengyu Shene76bdfd2016-09-19 12:57:29 -0500519 return imx_mmdc_perf_init(pdev, mmdc_base);
Shawn Guo9fbbe682011-09-06 14:39:44 +0800520}
521
Anson Huangec336b22014-09-17 11:11:45 +0800522int imx_mmdc_get_ddr_type(void)
523{
524 return ddr_type;
525}
526
Uwe Kleine-König444d2d32015-02-18 21:19:56 +0100527static const struct of_device_id imx_mmdc_dt_ids[] = {
Shawn Guo9fbbe682011-09-06 14:39:44 +0800528 { .compatible = "fsl,imx6q-mmdc", },
529 { /* sentinel */ }
530};
531
532static struct platform_driver imx_mmdc_driver = {
533 .driver = {
534 .name = "imx-mmdc",
Shawn Guo9fbbe682011-09-06 14:39:44 +0800535 .of_match_table = imx_mmdc_dt_ids,
536 },
537 .probe = imx_mmdc_probe,
Zhengyu Shene76bdfd2016-09-19 12:57:29 -0500538 .remove = imx_mmdc_remove,
Shawn Guo9fbbe682011-09-06 14:39:44 +0800539};
540
541static int __init imx_mmdc_init(void)
542{
543 return platform_driver_register(&imx_mmdc_driver);
544}
545postcore_initcall(imx_mmdc_init);