blob: 97bd66011cf4d000610322912e0942eaf5dfc472 [file] [log] [blame]
Olav Haugan99660ca2012-12-04 13:30:41 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/iommu.h>
18#include <linux/slab.h>
19#include <linux/device.h>
20#include <linux/interrupt.h>
21#include <linux/bitops.h>
22#include <linux/debugfs.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080023#include <mach/iommu_hw-v1.h>
Olav Haugan99660ca2012-12-04 13:30:41 -080024#include <mach/iommu.h>
25#include <mach/iommu_perfmon.h>
26
27#define PMCR_P_MASK (0x1)
28#define PMCR_P_SHIFT (1)
29#define PMCR_P (PMCR_P_MASK << PMCR_P_SHIFT)
30#define PMCFGR_NCG_MASK (0xFF)
31#define PMCFGR_NCG_SHIFT (24)
32#define PMCFGR_NCG (PMCFGR_NCG_MASK << PMCFGR_NCG_SHIFT)
33#define PMCFGR_N_MASK (0xFF)
34#define PMCFGR_N_SHIFT (0)
35#define PMCFGR_N (PMCFGR_N_MASK << PMCFGR_N_SHIFT)
36#define CR_E 0x1
37#define CGCR_CEN 0x800
38#define CGCR_CEN_SHFT (1 << 11)
39#define PMCGCR_CGNC_MASK (0x0F)
40#define PMCGCR_CGNC_SHIFT (24)
41#define PMCGCR_CGNC (PMCGCR_CGNC_MASK << PMCGCR_CGNC_SHIFT)
42#define PMCGCR_(group) (PMCGCR_N + group*4)
43
44#define PMOVSCLR_(n) (PMOVSCLR_N + n*4)
45#define PMCNTENSET_(n) (PMCNTENSET_N + n*4)
46#define PMCNTENCLR_(n) (PMCNTENCLR_N + n*4)
47#define PMINTENSET_(n) (PMINTENSET_N + n*4)
48#define PMINTENCLR_(n) (PMINTENCLR_N + n*4)
49
50#define PMEVCNTR_(n) (PMEVCNTR_N + n*4)
51#define PMEVTYPER_(n) (PMEVTYPER_N + n*4)
52
53static LIST_HEAD(iommu_list);
54static struct dentry *msm_iommu_root_debugfs_dir;
55static const char *NO_EVENT_CLASS_NAME = "none";
56static int NO_EVENT_CLASS = -1;
57static const unsigned int MAX_EVEN_CLASS_NAME_LEN = 36;
58
59struct event_class {
60 unsigned int event_number;
61 const char *desc;
62};
63
64static struct event_class pmu_event_classes[] = {
65 { 0x00, "cycle_count" },
66 { 0x01, "cycle_count64" },
67 { 0x08, "tlb_refill" },
68 { 0x09, "tlb_refill_read" },
69 { 0x0A, "tlb_refill_write" },
70 { 0x10, "access" },
71 { 0x11, "access_read" },
72 { 0x12, "access_write" },
Olav Haugan99660ca2012-12-04 13:30:41 -080073 { 0x80, "full_misses" },
74 { 0x81, "partial_miss_1lbfb_hit" },
75 { 0x82, "partial_miss_2lbfb_hit" },
76 { 0x83, "full_hit" },
77 { 0x90, "pred_req_full_miss" },
78 { 0x91, "pred_req_partial_miss_1lbfb_hit" },
79 { 0x92, "pred_req_partial_miss_2lbfb_hit" },
80 { 0xb0, "tot_num_miss_axi_htw_read_req" },
81 { 0xb1, "tot_num_pred_axi_htw_read_req" },
82};
83
84static unsigned int iommu_pm_is_hw_access_OK(const struct iommu_pmon *pmon)
85{
86 return pmon->enabled && (pmon->iommu_attach_count > 0);
87}
88
89static unsigned int iommu_pm_create_sup_cls_str(char **buf,
Olav Haugan0c2d9322013-01-31 18:35:30 -080090 struct iommu_pmon *pmon)
Olav Haugan99660ca2012-12-04 13:30:41 -080091{
Olav Haugan0c2d9322013-01-31 18:35:30 -080092 unsigned long buf_size = ARRAY_SIZE(pmu_event_classes) *
93 MAX_EVEN_CLASS_NAME_LEN;
Olav Haugan99660ca2012-12-04 13:30:41 -080094 unsigned int pos = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -080095 unsigned int nevent_cls = pmon->nevent_cls_supported;
Olav Haugan99660ca2012-12-04 13:30:41 -080096
97 *buf = kzalloc(buf_size, GFP_KERNEL);
98 if (*buf) {
Olav Haugan0c2d9322013-01-31 18:35:30 -080099 unsigned int j;
Olav Haugan99660ca2012-12-04 13:30:41 -0800100 int i;
101 struct event_class *ptr;
102 size_t array_len = ARRAY_SIZE(pmu_event_classes);
103 ptr = pmu_event_classes;
104
Olav Haugan0c2d9322013-01-31 18:35:30 -0800105 for (j = 0; j < nevent_cls; ++j) {
106 for (i = 0; i < array_len; ++i) {
107
108 if (ptr[i].event_number !=
109 pmon->event_cls_supported[j])
110 continue;
111
Olav Haugan99660ca2012-12-04 13:30:41 -0800112 if (pos < buf_size) {
113 pos += snprintf(&(*buf)[pos],
114 buf_size-pos,
115 "[%u] %s\n",
116 ptr[i].event_number,
117 ptr[i].desc);
118 }
Olav Haugan0c2d9322013-01-31 18:35:30 -0800119 break;
Olav Haugan99660ca2012-12-04 13:30:41 -0800120 }
121 }
122 }
123 return pos;
124}
125
126static const char *iommu_pm_find_event_class_name(int event_class)
127{
128 size_t array_len;
129 struct event_class *ptr;
130 int i;
131 const char *event_class_name = NO_EVENT_CLASS_NAME;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800132 if (event_class < 0)
Olav Haugan99660ca2012-12-04 13:30:41 -0800133 goto out;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800134
135 array_len = ARRAY_SIZE(pmu_event_classes);
136 ptr = pmu_event_classes;
Olav Haugan99660ca2012-12-04 13:30:41 -0800137
138 for (i = 0; i < array_len; ++i) {
139 if (ptr[i].event_number == event_class) {
140 event_class_name = ptr[i].desc;
141 break;
142 }
143 }
144
145out:
146 return event_class_name;
147}
148
149static int iommu_pm_find_event_class(const char *event_class_name)
150{
151 size_t array_len;
152 struct event_class *ptr;
153 int i;
154 int event_class = NO_EVENT_CLASS;
155
156 if (strcmp(event_class_name, NO_EVENT_CLASS_NAME) == 0)
157 goto out;
158
159 array_len = ARRAY_SIZE(pmu_event_classes);
160 ptr = pmu_event_classes;
161
162 for (i = 0; i < array_len; ++i) {
163 if (strcmp(ptr[i].desc, event_class_name) == 0) {
164 event_class = ptr[i].event_number;
165 goto out;
166 }
167 }
168
Olav Haugan99660ca2012-12-04 13:30:41 -0800169out:
170 return event_class;
171}
172
173static inline void iommu_pm_add_to_iommu_list(struct iommu_pmon *iommu_pmon)
174{
175 list_add(&iommu_pmon->iommu_list, &iommu_list);
176}
177
178static inline void iommu_pm_del_from_iommu_list(struct iommu_pmon *iommu_pmon)
179{
180 list_del(&iommu_pmon->iommu_list);
181}
182
183static struct iommu_pmon *iommu_pm_get_pm_by_dev(struct device *dev)
184{
185 struct iommu_pmon *pmon;
186 struct iommu_info *info;
187 struct list_head *ent;
188 list_for_each(ent, &iommu_list) {
189 pmon = list_entry(ent, struct iommu_pmon, iommu_list);
190 info = &pmon->iommu;
191 if (dev == info->iommu_dev)
192 return pmon;
193 }
194 return NULL;
195}
196
197static void iommu_pm_grp_enable(struct iommu_info *iommu, unsigned int grp_no)
198{
199 unsigned int pmcgcr;
200 pmcgcr = readl_relaxed(iommu->base + PMCGCR_(grp_no));
201 pmcgcr |= CGCR_CEN;
202 writel_relaxed(pmcgcr, iommu->base + PMCGCR_(grp_no));
203}
204
205static void iommu_pm_grp_disable(struct iommu_info *iommu, unsigned int grp_no)
206{
207 unsigned int pmcgcr;
208 pmcgcr = readl_relaxed(iommu->base + PMCGCR_(grp_no));
209 pmcgcr &= ~CGCR_CEN;
210 writel_relaxed(pmcgcr, iommu->base + PMCGCR_(grp_no));
211}
212
213static void iommu_pm_enable(struct iommu_info *iommu)
214{
215 unsigned int pmcr;
216 pmcr = readl_relaxed(iommu->base + PMCR);
217 pmcr |= CR_E;
218 writel_relaxed(pmcr, iommu->base + PMCR);
219}
220
Olav Haugan0c2d9322013-01-31 18:35:30 -0800221static void iommu_pm_disable(struct iommu_info *iommu)
Olav Haugan99660ca2012-12-04 13:30:41 -0800222{
223 unsigned int pmcr;
224 pmcr = readl_relaxed(iommu->base + PMCR);
225 pmcr &= ~CR_E;
226 writel_relaxed(pmcr, iommu->base + PMCR);
227}
228
229static void iommu_pm_reset_counters(const struct iommu_info *iommu)
230{
231 unsigned int pmcr;
232 pmcr = readl_relaxed(iommu->base + PMCR);
233 pmcr |= PMCR_P;
234 writel_relaxed(pmcr, iommu->base + PMCR);
235}
236
237static void iommu_pm_check_for_overflow(struct iommu_pmon *pmon)
238{
239 struct iommu_pmon_counter *counter;
240 struct iommu_info *iommu = &pmon->iommu;
241 unsigned int reg_no = 0;
242 unsigned int bit_no;
243 unsigned int reg_value;
244 unsigned int i;
245 unsigned int j;
246 unsigned int curr_reg = 0;
247
248 reg_value = readl_relaxed(iommu->base + PMOVSCLR_(curr_reg));
249
250 for (i = 0; i < pmon->num_groups; ++i) {
251 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
252 for (j = 0; j < cnt_grp->num_counters; ++j) {
253 counter = &cnt_grp->counters[j];
254 reg_no = counter->absolute_counter_no / 32;
255 bit_no = counter->absolute_counter_no % 32;
256 if (reg_no != curr_reg) {
257 /* Clear overflow bits */
258 writel_relaxed(reg_value, iommu->base +
259 PMOVSCLR_(reg_no));
260 curr_reg = reg_no;
261 reg_value = readl_relaxed(iommu->base +
262 PMOVSCLR_(curr_reg));
263 }
264
265 if (counter->enabled) {
266 if (reg_value & (1 << bit_no))
267 counter->overflow_count++;
268 }
269 }
270 }
271
272 /* Clear overflow */
273 writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
274}
275
276irqreturn_t iommu_pm_evt_ovfl_int_handler(int irq, void *dev_id)
277{
278 struct iommu_pmon *pmon = dev_id;
279 struct iommu_info *iommu = &pmon->iommu;
280
281 mutex_lock(&pmon->lock);
282
283 if (!iommu_pm_is_hw_access_OK(pmon)) {
284 mutex_unlock(&pmon->lock);
285 goto out;
286 }
287
288 iommu->ops->iommu_lock_acquire();
289 iommu_pm_check_for_overflow(pmon);
290 iommu->ops->iommu_lock_release();
291
292 mutex_unlock(&pmon->lock);
293
294out:
295 return IRQ_HANDLED;
296}
297
298static void iommu_pm_counter_enable(struct iommu_info *iommu,
299 struct iommu_pmon_counter *counter)
300{
301 unsigned int reg_no = counter->absolute_counter_no / 32;
302 unsigned int bit_no = counter->absolute_counter_no % 32;
303 unsigned int reg_value;
304
305 /* Clear overflow of counter */
306 reg_value = 1 << bit_no;
307 writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
308
309 /* Enable counter */
310 writel_relaxed(reg_value, iommu->base + PMCNTENSET_(reg_no));
311 counter->enabled = 1;
312}
313
314static void iommu_pm_counter_disable(struct iommu_info *iommu,
315 struct iommu_pmon_counter *counter)
316{
317 unsigned int reg_no = counter->absolute_counter_no / 32;
318 unsigned int bit_no = counter->absolute_counter_no % 32;
319 unsigned int reg_value;
320
321 counter->enabled = 0;
322
323 /* Disable counter */
324 reg_value = 1 << bit_no;
325 writel_relaxed(reg_value, iommu->base + PMCNTENCLR_(reg_no));
326
327 /* Clear overflow of counter */
328 writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
329}
330
331/*
332 * Must be called after iommu_start_access() is called
333 */
334static void iommu_pm_ovfl_int_enable(struct iommu_info *iommu,
335 const struct iommu_pmon_counter *counter)
336{
337 unsigned int reg_no = counter->absolute_counter_no / 32;
338 unsigned int bit_no = counter->absolute_counter_no % 32;
339 unsigned int reg_value;
340
341 /* Enable overflow interrupt for counter */
342 reg_value = (1 << bit_no);
343 writel_relaxed(reg_value, iommu->base + PMINTENSET_(reg_no));
344}
345
346/*
347 * Must be called after iommu_start_access() is called
348 */
349static void iommu_pm_ovfl_int_disable(struct iommu_info *iommu,
350 const struct iommu_pmon_counter *counter)
351{
352 unsigned int reg_no = counter->absolute_counter_no / 32;
353 unsigned int bit_no = counter->absolute_counter_no % 32;
354 unsigned int reg_value;
355
356 /* Disable overflow interrupt for counter */
357 reg_value = 1 << bit_no;
358 writel_relaxed(reg_value, iommu->base + PMINTENCLR_(reg_no));
359}
360
361static void iommu_pm_set_event_type(struct iommu_pmon *pmon,
362 struct iommu_pmon_counter *counter)
363{
364 int event_class;
365 unsigned int count_no;
366 struct iommu_info *iommu = &pmon->iommu;
367
368 event_class = counter->current_event_class;
369 count_no = counter->absolute_counter_no;
370
371 if (event_class == NO_EVENT_CLASS) {
372 if (iommu_pm_is_hw_access_OK(pmon)) {
373 iommu->ops->iommu_lock_acquire();
374 iommu_pm_counter_disable(iommu, counter);
375 iommu_pm_ovfl_int_disable(iommu, counter);
376 writel_relaxed(0, iommu->base + PMEVTYPER_(count_no));
377 iommu->ops->iommu_lock_release();
378 }
379 counter->overflow_count = 0;
380 counter->value = 0;
381 } else {
382 counter->overflow_count = 0;
383 counter->value = 0;
384 if (iommu_pm_is_hw_access_OK(pmon)) {
385 iommu->ops->iommu_lock_acquire();
386 writel_relaxed(event_class,
387 iommu->base + PMEVTYPER_(count_no));
388 iommu_pm_ovfl_int_enable(iommu, counter);
389 iommu_pm_counter_enable(iommu, counter);
390 iommu->ops->iommu_lock_release();
391 }
392 }
393}
394
395static void iommu_pm_reset_counts(struct iommu_pmon *pmon)
396{
397 unsigned int i;
398 unsigned int j;
399 for (i = 0; i < pmon->num_groups; ++i) {
400 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
401 for (j = 0; j < cnt_grp->num_counters; ++j) {
402 cnt_grp->counters[j].value = 0;
403 cnt_grp->counters[j].overflow_count = 0;
404 }
405 }
406}
407
408static unsigned int iommu_pm_read_counter(struct iommu_pmon_counter *counter)
409{
410 struct iommu_pmon *pmon = counter->cnt_group->pmon;
411 struct iommu_info *info = &pmon->iommu;
412 unsigned int cnt_no = counter->absolute_counter_no;
413 unsigned int pmevcntr;
414
415 pmevcntr = readl_relaxed(info->base + PMEVCNTR_(cnt_no));
416
417 return pmevcntr;
418
419}
420
421static void iommu_pm_set_all_counters(struct iommu_pmon *pmon)
422{
423 unsigned int i;
424 unsigned int j;
425 for (i = 0; i < pmon->num_groups; ++i) {
426 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
427 for (j = 0; j < cnt_grp->num_counters; ++j)
428 iommu_pm_set_event_type(pmon, &cnt_grp->counters[j]);
429 }
430}
431
432static void iommu_pm_read_all_counters(struct iommu_pmon *pmon)
433{
434 unsigned int i;
435 unsigned int j;
436 for (i = 0; i < pmon->num_groups; ++i) {
437 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
438 for (j = 0; j < cnt_grp->num_counters; ++j) {
439 struct iommu_pmon_counter *counter;
440 counter = &cnt_grp->counters[j];
441 counter->value = iommu_pm_read_counter(counter);
442 }
443 }
444}
445
446static void iommu_pm_on(struct iommu_pmon *pmon)
447{
448 unsigned int i;
449 struct iommu_info *iommu = &pmon->iommu;
450 struct msm_iommu_drvdata *iommu_drvdata =
451 dev_get_drvdata(iommu->iommu_dev);
452
453 iommu->ops->iommu_power_on(iommu_drvdata);
454
455 iommu_pm_reset_counts(pmon);
456
457 pmon->enabled = 1;
458
459 iommu_pm_set_all_counters(pmon);
460
461 iommu->ops->iommu_lock_acquire();
462
463 /* enable all counter group */
464 for (i = 0; i < pmon->num_groups; ++i)
465 iommu_pm_grp_enable(iommu, i);
466
467 /* enable global counters */
468 iommu_pm_enable(iommu);
469 iommu->ops->iommu_lock_release();
470
471 pr_info("%s: TLB performance monitoring turned ON\n",
472 pmon->iommu.iommu_name);
473}
474
475static void iommu_pm_off(struct iommu_pmon *pmon)
476{
477 unsigned int i;
478 struct iommu_info *iommu = &pmon->iommu;
479 struct msm_iommu_drvdata *iommu_drvdata =
480 dev_get_drvdata(iommu->iommu_dev);
481
482 pmon->enabled = 0;
483
484 iommu->ops->iommu_lock_acquire();
485
486 /* disable global counters */
487 iommu_pm_disable(iommu);
488
489 /* Check if we overflowed just before turning off pmon */
490 iommu_pm_check_for_overflow(pmon);
491
492 /* disable all counter group */
493 for (i = 0; i < pmon->num_groups; ++i)
494 iommu_pm_grp_disable(iommu, i);
495
496 /* Update cached copy of counters before turning off power */
497 iommu_pm_read_all_counters(pmon);
498
499 iommu->ops->iommu_lock_release();
500 iommu->ops->iommu_power_off(iommu_drvdata);
501
502 pr_info("%s: TLB performance monitoring turned OFF\n",
503 pmon->iommu.iommu_name);
504}
505
Olav Haugan99660ca2012-12-04 13:30:41 -0800506static int iommu_pm_debug_open(struct inode *inode, struct file *file)
507{
508 file->private_data = inode->i_private;
509 return 0;
510}
511
512static ssize_t iommu_pm_count_value_read(struct file *fp,
513 char __user *user_buff,
514 size_t count, loff_t *pos)
515{
516 size_t rd_cnt;
517 unsigned long long full_count;
518
519 struct iommu_pmon_counter *counter = fp->private_data;
520 struct iommu_pmon *pmon = counter->cnt_group->pmon;
521 struct iommu_info *iommu = &pmon->iommu;
522 char buf[50];
523 size_t len;
524
525 mutex_lock(&pmon->lock);
526
527 if (iommu_pm_is_hw_access_OK(pmon)) {
528 iommu->ops->iommu_lock_acquire();
529 counter->value = iommu_pm_read_counter(counter);
530 iommu->ops->iommu_lock_release();
531 }
532 full_count = (unsigned long long) counter->value +
533 ((unsigned long long)counter->overflow_count *
534 0x100000000ULL);
535
536 len = snprintf(buf, 50, "%llu\n", full_count);
537 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
538 mutex_unlock(&pmon->lock);
539
540 return rd_cnt;
541}
542
543static const struct file_operations cnt_value_file_ops = {
544 .open = iommu_pm_debug_open,
545 .read = iommu_pm_count_value_read,
546};
547
548static ssize_t iommu_pm_event_class_read(struct file *fp,
549 char __user *user_buff,
550 size_t count, loff_t *pos)
551{
552 size_t rd_cnt;
553 struct iommu_pmon_counter *counter = fp->private_data;
554 struct iommu_pmon *pmon = counter->cnt_group->pmon;
555 char buf[50];
556 const char *event_class_name;
557 size_t len;
558
559 mutex_lock(&pmon->lock);
560 event_class_name = iommu_pm_find_event_class_name(
561 counter->current_event_class);
562 len = snprintf(buf, 50, "%s\n", event_class_name);
563
564 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
565 mutex_unlock(&pmon->lock);
566 return rd_cnt;
567}
568
569static ssize_t iommu_pm_event_class_write(struct file *fp,
570 const char __user *user_buff,
571 size_t count, loff_t *pos)
572{
573 size_t wr_cnt;
574 char buf[50];
575 size_t buf_size = sizeof(buf);
576 struct iommu_pmon_counter *counter = fp->private_data;
577 struct iommu_pmon *pmon = counter->cnt_group->pmon;
578 int current_event_class;
579
580 if ((count + *pos) >= buf_size)
581 return -EINVAL;
582
583 mutex_lock(&pmon->lock);
584 current_event_class = counter->current_event_class;
585 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
586 if (wr_cnt >= 1) {
587 int rv;
588 long value;
589 buf[wr_cnt-1] = '\0';
Olav Haugan0c2d9322013-01-31 18:35:30 -0800590 rv = kstrtol(buf, 10, &value);
Olav Haugan99660ca2012-12-04 13:30:41 -0800591 if (!rv) {
592 counter->current_event_class =
593 iommu_pm_find_event_class(
594 iommu_pm_find_event_class_name(value));
595 } else {
596 counter->current_event_class =
597 iommu_pm_find_event_class(buf);
598 } }
599
600 if (current_event_class != counter->current_event_class)
601 iommu_pm_set_event_type(pmon, counter);
602
603 mutex_unlock(&pmon->lock);
604 return wr_cnt;
605}
606
607static const struct file_operations event_class_file_ops = {
608 .open = iommu_pm_debug_open,
609 .read = iommu_pm_event_class_read,
610 .write = iommu_pm_event_class_write,
611};
612
613static ssize_t iommu_reset_counters_write(struct file *fp,
614 const char __user *user_buff,
615 size_t count, loff_t *pos)
616{
617 size_t wr_cnt;
618 char buf[10];
619 size_t buf_size = sizeof(buf);
620 struct iommu_pmon *pmon = fp->private_data;
621 struct iommu_info *iommu = &pmon->iommu;
622
623 if ((count + *pos) >= buf_size)
624 return -EINVAL;
625
626 mutex_lock(&pmon->lock);
627 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
628 if (wr_cnt >= 1) {
629 unsigned long cmd = 0;
630 int rv;
631 buf[wr_cnt-1] = '\0';
632 rv = kstrtoul(buf, 10, &cmd);
633 if (!rv && (cmd == 1)) {
634 if (iommu_pm_is_hw_access_OK(pmon)) {
635 iommu->ops->iommu_lock_acquire();
636 iommu_pm_reset_counters(&pmon->iommu);
637 iommu->ops->iommu_lock_release();
638 }
639 iommu_pm_reset_counts(pmon);
640 pr_info("TLB performance counters reset\n");
641 } else {
642 pr_err("Unknown performance monitor command: %lu\n",
643 cmd);
644 }
645 }
646 mutex_unlock(&pmon->lock);
647 return wr_cnt;
648}
649
650static const struct file_operations reset_file_ops = {
651 .open = iommu_pm_debug_open,
652 .write = iommu_reset_counters_write,
653};
654
655static ssize_t iommu_pm_enable_counters_read(struct file *fp,
656 char __user *user_buff,
657 size_t count, loff_t *pos)
658{
659 size_t rd_cnt;
660 char buf[5];
661 size_t len;
662 struct iommu_pmon *pmon = fp->private_data;
663
664 mutex_lock(&pmon->lock);
665 len = snprintf(buf, 5, "%u\n", pmon->enabled);
666 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
667 mutex_unlock(&pmon->lock);
668 return rd_cnt;
669}
670
671static ssize_t iommu_pm_enable_counters_write(struct file *fp,
672 const char __user *user_buff,
673 size_t count, loff_t *pos)
674{
675 size_t wr_cnt;
676 char buf[10];
677 size_t buf_size = sizeof(buf);
678 struct iommu_pmon *pmon = fp->private_data;
679
680 if ((count + *pos) >= buf_size)
681 return -EINVAL;
682
683 mutex_lock(&pmon->lock);
684 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
685 if (wr_cnt >= 1) {
686 unsigned long cmd;
687 int rv;
688 buf[wr_cnt-1] = '\0';
689 rv = kstrtoul(buf, 10, &cmd);
690 if (!rv && (cmd < 2)) {
691 if (pmon->enabled == 1 && cmd == 0) {
692 if (pmon->iommu_attach_count > 0)
693 iommu_pm_off(pmon);
694 } else if (pmon->enabled == 0 && cmd == 1) {
695 /* We can only turn on perf. monitoring if
696 * iommu is attached. Delay turning on perf.
697 * monitoring until we are attached.
698 */
699 if (pmon->iommu_attach_count > 0)
700 iommu_pm_on(pmon);
701 else
702 pmon->enabled = 1;
703 }
704 } else {
705 pr_err("Unknown performance monitor command: %lu\n",
706 cmd);
707 }
708 }
709 mutex_unlock(&pmon->lock);
710 return wr_cnt;
711}
712
713static const struct file_operations event_enable_file_ops = {
714 .open = iommu_pm_debug_open,
715 .read = iommu_pm_enable_counters_read,
716 .write = iommu_pm_enable_counters_write,
717};
718
719static ssize_t iommu_pm_avail_event_cls_read(struct file *fp,
720 char __user *user_buff,
721 size_t count, loff_t *pos)
722{
723 size_t rd_cnt = 0;
724 struct iommu_pmon *pmon = fp->private_data;
725 char *buf;
726 size_t len;
727
728 mutex_lock(&pmon->lock);
729
Olav Haugan0c2d9322013-01-31 18:35:30 -0800730 len = iommu_pm_create_sup_cls_str(&buf, pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800731 if (buf) {
732 rd_cnt = simple_read_from_buffer(user_buff, count, pos,
733 buf, len);
734 kfree(buf);
735 }
736 mutex_unlock(&pmon->lock);
737 return rd_cnt;
738}
739
740static const struct file_operations available_event_cls_file_ops = {
741 .open = iommu_pm_debug_open,
742 .read = iommu_pm_avail_event_cls_read,
743};
744
745
746
747static int iommu_pm_create_grp_debugfs_counters_hierarchy(
748 struct iommu_pmon_cnt_group *cnt_grp,
749 unsigned int *abs_counter_no)
750{
751 int ret = 0;
752 int j;
753 char name[20];
754
755 for (j = 0; j < cnt_grp->num_counters; ++j) {
756 struct dentry *grp_dir = cnt_grp->group_dir;
757 struct dentry *counter_dir;
758 cnt_grp->counters[j].cnt_group = cnt_grp;
759 cnt_grp->counters[j].counter_no = j;
760 cnt_grp->counters[j].absolute_counter_no = *abs_counter_no;
761 (*abs_counter_no)++;
762 cnt_grp->counters[j].value = 0;
763 cnt_grp->counters[j].overflow_count = 0;
764 cnt_grp->counters[j].current_event_class = NO_EVENT_CLASS;
765
766 snprintf(name, 20, "counter%u", j);
767
768 counter_dir = debugfs_create_dir(name, grp_dir);
769
770 if (IS_ERR_OR_NULL(counter_dir)) {
771 pr_err("unable to create counter debugfs dir %s\n",
772 name);
773 ret = -ENOMEM;
774 goto out;
775 }
776
777 cnt_grp->counters[j].counter_dir = counter_dir;
778
779 if (!debugfs_create_file("value", 0644, counter_dir,
780 &cnt_grp->counters[j],
781 &cnt_value_file_ops)) {
782 ret = -EIO;
783 goto out;
784 }
785
786 if (!debugfs_create_file("current_event_class", 0644,
787 counter_dir, &cnt_grp->counters[j],
788 &event_class_file_ops)) {
789 ret = -EIO;
790 goto out;
791 }
792 }
793out:
794 return ret;
795}
796
797static int iommu_pm_create_group_debugfs_hierarchy(struct iommu_info *iommu,
798 struct iommu_pmon *pmon_entry)
799{
800 int i;
801 int ret = 0;
802 char name[20];
803 unsigned int abs_counter_no = 0;
804
805 for (i = 0; i < pmon_entry->num_groups; ++i) {
806 pmon_entry->cnt_grp[i].pmon = pmon_entry;
807 pmon_entry->cnt_grp[i].grp_no = i;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800808 pmon_entry->cnt_grp[i].num_counters = pmon_entry->num_counters;
Olav Haugan99660ca2012-12-04 13:30:41 -0800809 pmon_entry->cnt_grp[i].counters =
810 kzalloc(sizeof(*pmon_entry->cnt_grp[i].counters)
811 * pmon_entry->cnt_grp[i].num_counters, GFP_KERNEL);
812
813 if (!pmon_entry->cnt_grp[i].counters) {
814 pr_err("Unable to allocate memory for counters\n");
815 ret = -ENOMEM;
816 goto out;
817 }
818 snprintf(name, 20, "group%u", i);
819 pmon_entry->cnt_grp[i].group_dir = debugfs_create_dir(name,
820 pmon_entry->iommu_dir);
821 if (IS_ERR_OR_NULL(pmon_entry->cnt_grp[i].group_dir)) {
822 pr_err("unable to create group debugfs dir %s\n", name);
823 ret = -ENOMEM;
824 goto out;
825 }
826
827 ret = iommu_pm_create_grp_debugfs_counters_hierarchy(
828 &pmon_entry->cnt_grp[i],
829 &abs_counter_no);
830 if (ret)
831 goto out;
832 }
833out:
834 return ret;
835}
836
Olav Haugan0c2d9322013-01-31 18:35:30 -0800837int msm_iommu_pm_iommu_register(struct iommu_pmon *pmon_entry)
Olav Haugan99660ca2012-12-04 13:30:41 -0800838{
Olav Haugan99660ca2012-12-04 13:30:41 -0800839 int ret = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800840 struct iommu_info *iommu = &pmon_entry->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800841 int i;
842
843 if (!iommu->ops || !iommu->iommu_name || !iommu->base
844 || !iommu->iommu_dev) {
845 ret = -EINVAL;
846 goto out;
847 }
848
849 if (!msm_iommu_root_debugfs_dir) {
850 msm_iommu_root_debugfs_dir = debugfs_create_dir("iommu", NULL);
851 if (IS_ERR_OR_NULL(msm_iommu_root_debugfs_dir)) {
852 pr_err("Failed creating iommu debugfs dir \"iommu\"\n");
853 ret = -EIO;
854 goto out;
855 }
856 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800857
Olav Haugan99660ca2012-12-04 13:30:41 -0800858 pmon_entry->cnt_grp = kzalloc(sizeof(*pmon_entry->cnt_grp)
859 * pmon_entry->num_groups, GFP_KERNEL);
860 if (!pmon_entry->cnt_grp) {
861 pr_err("Unable to allocate memory for counter groups\n");
862 ret = -ENOMEM;
863 goto file_err;
864 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800865 pmon_entry->iommu_dir = debugfs_create_dir(iommu->iommu_name,
866 msm_iommu_root_debugfs_dir);
867 if (IS_ERR_OR_NULL(pmon_entry->iommu_dir)) {
868 pr_err("unable to create iommu debugfs dir %s\n",
869 iommu->iommu_name);
870 ret = -ENOMEM;
871 goto free_mem;
872 }
873
874 if (!debugfs_create_file("reset_counters", 0644,
875 pmon_entry->iommu_dir, pmon_entry, &reset_file_ops)) {
876 ret = -EIO;
877 goto free_mem;
878 }
879
880 if (!debugfs_create_file("enable_counters", 0644,
881 pmon_entry->iommu_dir, pmon_entry, &event_enable_file_ops)) {
882 ret = -EIO;
883 goto free_mem;
884 }
885
886 if (!debugfs_create_file("available_event_classes", 0644,
887 pmon_entry->iommu_dir, pmon_entry,
888 &available_event_cls_file_ops)) {
889 ret = -EIO;
890 goto free_mem;
891 }
892
893 ret = iommu_pm_create_group_debugfs_hierarchy(iommu, pmon_entry);
894 if (ret)
895 goto free_mem;
896
Olav Haugan99660ca2012-12-04 13:30:41 -0800897 if (iommu->evt_irq > 0) {
898 ret = request_threaded_irq(iommu->evt_irq, NULL,
899 iommu_pm_evt_ovfl_int_handler,
900 IRQF_ONESHOT | IRQF_SHARED,
901 "msm_iommu_nonsecure_irq", pmon_entry);
902 if (ret) {
903 pr_err("Request IRQ %d failed with ret=%d\n",
904 iommu->evt_irq,
905 ret);
906 goto free_mem;
907 }
908 } else {
909 pr_info("%s: Overflow interrupt not available\n", __func__);
910 }
911
Olav Haugan0c2d9322013-01-31 18:35:30 -0800912 dev_dbg(iommu->iommu_dev, "%s iommu registered\n", iommu->iommu_name);
Olav Haugan99660ca2012-12-04 13:30:41 -0800913
914 goto out;
915free_mem:
916 if (pmon_entry->cnt_grp) {
917 for (i = 0; i < pmon_entry->num_groups; ++i) {
918 kfree(pmon_entry->cnt_grp[i].counters);
919 pmon_entry->cnt_grp[i].counters = 0;
920 }
921 }
922 kfree(pmon_entry->cnt_grp);
923 pmon_entry->cnt_grp = 0;
924file_err:
925 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
926out:
927 return ret;
928}
929EXPORT_SYMBOL(msm_iommu_pm_iommu_register);
930
931void msm_iommu_pm_iommu_unregister(struct device *dev)
932{
933 int i;
934 struct iommu_pmon *pmon_entry = iommu_pm_get_pm_by_dev(dev);
935
936 if (!pmon_entry)
937 return;
938
939 free_irq(pmon_entry->iommu.evt_irq, pmon_entry->iommu.iommu_dev);
940
941 if (!pmon_entry)
942 goto remove_debugfs;
943
944 if (pmon_entry->cnt_grp) {
945 for (i = 0; i < pmon_entry->num_groups; ++i)
946 kfree(pmon_entry->cnt_grp[i].counters);
947 }
948
949 kfree(pmon_entry->cnt_grp);
950
951remove_debugfs:
952 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
953
954 return;
955}
956EXPORT_SYMBOL(msm_iommu_pm_iommu_unregister);
957
Olav Haugan0c2d9322013-01-31 18:35:30 -0800958struct iommu_pmon *msm_iommu_pm_alloc(struct device *dev)
Olav Haugan99660ca2012-12-04 13:30:41 -0800959{
960 struct iommu_pmon *pmon_entry;
961 struct iommu_info *info;
962 pmon_entry = devm_kzalloc(dev, sizeof(*pmon_entry), GFP_KERNEL);
963 if (!pmon_entry)
964 return NULL;
965 info = &pmon_entry->iommu;
966 info->iommu_dev = dev;
967 mutex_init(&pmon_entry->lock);
968 iommu_pm_add_to_iommu_list(pmon_entry);
Olav Haugan0c2d9322013-01-31 18:35:30 -0800969 return pmon_entry;
Olav Haugan99660ca2012-12-04 13:30:41 -0800970}
971EXPORT_SYMBOL(msm_iommu_pm_alloc);
972
973void msm_iommu_pm_free(struct device *dev)
974{
975 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
976 if (pmon)
977 iommu_pm_del_from_iommu_list(pmon);
978}
979EXPORT_SYMBOL(msm_iommu_pm_free);
980
981void msm_iommu_attached(struct device *dev)
982{
983 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
984 if (pmon) {
985 mutex_lock(&pmon->lock);
986 ++pmon->iommu_attach_count;
987 if (pmon->iommu_attach_count == 1) {
988 /* If perf. mon was enabled before we attached we do
989 * the actual after we attach.
990 */
991 if (pmon->enabled)
992 iommu_pm_on(pmon);
993 }
994 mutex_unlock(&pmon->lock);
995 }
996}
997EXPORT_SYMBOL(msm_iommu_attached);
998
999void msm_iommu_detached(struct device *dev)
1000{
1001 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
1002 if (pmon) {
1003 mutex_lock(&pmon->lock);
1004 if (pmon->iommu_attach_count == 1) {
1005 /* If perf. mon is still enabled we have to disable
1006 * before we do the detach.
1007 */
1008 if (pmon->enabled)
1009 iommu_pm_off(pmon);
1010 }
1011 BUG_ON(pmon->iommu_attach_count == 0);
1012 --pmon->iommu_attach_count;
1013 mutex_unlock(&pmon->lock);
1014 }
1015}
1016EXPORT_SYMBOL(msm_iommu_detached);
1017