blob: 4dbe0f4ee2e8c6e633f42219cfc8d3087c0007e9 [file] [log] [blame]
Olav Haugan99660ca2012-12-04 13:30:41 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/iommu.h>
18#include <linux/slab.h>
19#include <linux/device.h>
20#include <linux/interrupt.h>
21#include <linux/bitops.h>
22#include <linux/debugfs.h>
Olav Haugan99660ca2012-12-04 13:30:41 -080023#include <mach/iommu.h>
24#include <mach/iommu_perfmon.h>
25
Olav Haugan99660ca2012-12-04 13:30:41 -080026static LIST_HEAD(iommu_list);
27static struct dentry *msm_iommu_root_debugfs_dir;
28static const char *NO_EVENT_CLASS_NAME = "none";
29static int NO_EVENT_CLASS = -1;
30static const unsigned int MAX_EVEN_CLASS_NAME_LEN = 36;
31
32struct event_class {
33 unsigned int event_number;
34 const char *desc;
35};
36
37static struct event_class pmu_event_classes[] = {
38 { 0x00, "cycle_count" },
39 { 0x01, "cycle_count64" },
40 { 0x08, "tlb_refill" },
41 { 0x09, "tlb_refill_read" },
42 { 0x0A, "tlb_refill_write" },
43 { 0x10, "access" },
44 { 0x11, "access_read" },
45 { 0x12, "access_write" },
Olav Haugan99660ca2012-12-04 13:30:41 -080046 { 0x80, "full_misses" },
47 { 0x81, "partial_miss_1lbfb_hit" },
48 { 0x82, "partial_miss_2lbfb_hit" },
49 { 0x83, "full_hit" },
50 { 0x90, "pred_req_full_miss" },
51 { 0x91, "pred_req_partial_miss_1lbfb_hit" },
52 { 0x92, "pred_req_partial_miss_2lbfb_hit" },
53 { 0xb0, "tot_num_miss_axi_htw_read_req" },
54 { 0xb1, "tot_num_pred_axi_htw_read_req" },
55};
56
Olav Haugan99660ca2012-12-04 13:30:41 -080057static unsigned int iommu_pm_create_sup_cls_str(char **buf,
Olav Haugan0c2d9322013-01-31 18:35:30 -080058 struct iommu_pmon *pmon)
Olav Haugan99660ca2012-12-04 13:30:41 -080059{
Olav Haugan0c2d9322013-01-31 18:35:30 -080060 unsigned long buf_size = ARRAY_SIZE(pmu_event_classes) *
61 MAX_EVEN_CLASS_NAME_LEN;
Olav Haugan99660ca2012-12-04 13:30:41 -080062 unsigned int pos = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -080063 unsigned int nevent_cls = pmon->nevent_cls_supported;
Olav Haugan99660ca2012-12-04 13:30:41 -080064
65 *buf = kzalloc(buf_size, GFP_KERNEL);
66 if (*buf) {
Olav Haugan0c2d9322013-01-31 18:35:30 -080067 unsigned int j;
Olav Haugan99660ca2012-12-04 13:30:41 -080068 int i;
69 struct event_class *ptr;
70 size_t array_len = ARRAY_SIZE(pmu_event_classes);
71 ptr = pmu_event_classes;
72
Olav Haugan0c2d9322013-01-31 18:35:30 -080073 for (j = 0; j < nevent_cls; ++j) {
74 for (i = 0; i < array_len; ++i) {
75
76 if (ptr[i].event_number !=
77 pmon->event_cls_supported[j])
78 continue;
79
Olav Haugan99660ca2012-12-04 13:30:41 -080080 if (pos < buf_size) {
81 pos += snprintf(&(*buf)[pos],
82 buf_size-pos,
83 "[%u] %s\n",
84 ptr[i].event_number,
85 ptr[i].desc);
86 }
Olav Haugan0c2d9322013-01-31 18:35:30 -080087 break;
Olav Haugan99660ca2012-12-04 13:30:41 -080088 }
89 }
90 }
91 return pos;
92}
93
94static const char *iommu_pm_find_event_class_name(int event_class)
95{
96 size_t array_len;
97 struct event_class *ptr;
98 int i;
99 const char *event_class_name = NO_EVENT_CLASS_NAME;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800100 if (event_class < 0)
Olav Haugan99660ca2012-12-04 13:30:41 -0800101 goto out;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800102
103 array_len = ARRAY_SIZE(pmu_event_classes);
104 ptr = pmu_event_classes;
Olav Haugan99660ca2012-12-04 13:30:41 -0800105
106 for (i = 0; i < array_len; ++i) {
107 if (ptr[i].event_number == event_class) {
108 event_class_name = ptr[i].desc;
109 break;
110 }
111 }
112
113out:
114 return event_class_name;
115}
116
117static int iommu_pm_find_event_class(const char *event_class_name)
118{
119 size_t array_len;
120 struct event_class *ptr;
121 int i;
122 int event_class = NO_EVENT_CLASS;
123
124 if (strcmp(event_class_name, NO_EVENT_CLASS_NAME) == 0)
125 goto out;
126
127 array_len = ARRAY_SIZE(pmu_event_classes);
128 ptr = pmu_event_classes;
129
130 for (i = 0; i < array_len; ++i) {
131 if (strcmp(ptr[i].desc, event_class_name) == 0) {
132 event_class = ptr[i].event_number;
133 goto out;
134 }
135 }
136
Olav Haugan99660ca2012-12-04 13:30:41 -0800137out:
138 return event_class;
139}
140
141static inline void iommu_pm_add_to_iommu_list(struct iommu_pmon *iommu_pmon)
142{
143 list_add(&iommu_pmon->iommu_list, &iommu_list);
144}
145
146static inline void iommu_pm_del_from_iommu_list(struct iommu_pmon *iommu_pmon)
147{
148 list_del(&iommu_pmon->iommu_list);
149}
150
151static struct iommu_pmon *iommu_pm_get_pm_by_dev(struct device *dev)
152{
153 struct iommu_pmon *pmon;
154 struct iommu_info *info;
155 struct list_head *ent;
156 list_for_each(ent, &iommu_list) {
157 pmon = list_entry(ent, struct iommu_pmon, iommu_list);
158 info = &pmon->iommu;
159 if (dev == info->iommu_dev)
160 return pmon;
161 }
162 return NULL;
163}
164
Olav Haugan99660ca2012-12-04 13:30:41 -0800165static void iommu_pm_set_event_type(struct iommu_pmon *pmon,
166 struct iommu_pmon_counter *counter)
167{
168 int event_class;
169 unsigned int count_no;
170 struct iommu_info *iommu = &pmon->iommu;
171
172 event_class = counter->current_event_class;
173 count_no = counter->absolute_counter_no;
174
175 if (event_class == NO_EVENT_CLASS) {
Olav Hauganef69e892013-02-04 13:47:08 -0800176 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800177 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800178 iommu->hw_ops->counter_disable(iommu, counter);
179 iommu->hw_ops->ovfl_int_disable(iommu, counter);
180 iommu->hw_ops->set_event_class(pmon, count_no, 0);
Olav Haugan99660ca2012-12-04 13:30:41 -0800181 iommu->ops->iommu_lock_release();
182 }
183 counter->overflow_count = 0;
184 counter->value = 0;
185 } else {
186 counter->overflow_count = 0;
187 counter->value = 0;
Olav Hauganef69e892013-02-04 13:47:08 -0800188 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800189 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800190 iommu->hw_ops->set_event_class(pmon, count_no,
191 event_class);
192 iommu->hw_ops->ovfl_int_enable(iommu, counter);
193 iommu->hw_ops->counter_enable(iommu, counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800194 iommu->ops->iommu_lock_release();
195 }
196 }
197}
198
199static void iommu_pm_reset_counts(struct iommu_pmon *pmon)
200{
201 unsigned int i;
202 unsigned int j;
203 for (i = 0; i < pmon->num_groups; ++i) {
204 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
205 for (j = 0; j < cnt_grp->num_counters; ++j) {
206 cnt_grp->counters[j].value = 0;
207 cnt_grp->counters[j].overflow_count = 0;
208 }
209 }
210}
211
Olav Haugan99660ca2012-12-04 13:30:41 -0800212static void iommu_pm_set_all_counters(struct iommu_pmon *pmon)
213{
214 unsigned int i;
215 unsigned int j;
216 for (i = 0; i < pmon->num_groups; ++i) {
217 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
218 for (j = 0; j < cnt_grp->num_counters; ++j)
219 iommu_pm_set_event_type(pmon, &cnt_grp->counters[j]);
220 }
221}
222
223static void iommu_pm_read_all_counters(struct iommu_pmon *pmon)
224{
225 unsigned int i;
226 unsigned int j;
Olav Hauganef69e892013-02-04 13:47:08 -0800227 struct iommu_info *iommu = &pmon->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800228 for (i = 0; i < pmon->num_groups; ++i) {
229 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
230 for (j = 0; j < cnt_grp->num_counters; ++j) {
231 struct iommu_pmon_counter *counter;
232 counter = &cnt_grp->counters[j];
Olav Hauganef69e892013-02-04 13:47:08 -0800233 counter->value = iommu->hw_ops->read_counter(counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800234 }
235 }
236}
237
238static void iommu_pm_on(struct iommu_pmon *pmon)
239{
240 unsigned int i;
241 struct iommu_info *iommu = &pmon->iommu;
242 struct msm_iommu_drvdata *iommu_drvdata =
243 dev_get_drvdata(iommu->iommu_dev);
244
245 iommu->ops->iommu_power_on(iommu_drvdata);
246
247 iommu_pm_reset_counts(pmon);
248
249 pmon->enabled = 1;
250
251 iommu_pm_set_all_counters(pmon);
252
253 iommu->ops->iommu_lock_acquire();
254
255 /* enable all counter group */
256 for (i = 0; i < pmon->num_groups; ++i)
Olav Hauganef69e892013-02-04 13:47:08 -0800257 iommu->hw_ops->grp_enable(iommu, i);
Olav Haugan99660ca2012-12-04 13:30:41 -0800258
259 /* enable global counters */
Olav Hauganef69e892013-02-04 13:47:08 -0800260 iommu->hw_ops->enable_pm(iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800261 iommu->ops->iommu_lock_release();
262
263 pr_info("%s: TLB performance monitoring turned ON\n",
264 pmon->iommu.iommu_name);
265}
266
267static void iommu_pm_off(struct iommu_pmon *pmon)
268{
269 unsigned int i;
270 struct iommu_info *iommu = &pmon->iommu;
271 struct msm_iommu_drvdata *iommu_drvdata =
272 dev_get_drvdata(iommu->iommu_dev);
273
274 pmon->enabled = 0;
275
276 iommu->ops->iommu_lock_acquire();
277
278 /* disable global counters */
Olav Hauganef69e892013-02-04 13:47:08 -0800279 iommu->hw_ops->disable_pm(iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800280
281 /* Check if we overflowed just before turning off pmon */
Olav Hauganef69e892013-02-04 13:47:08 -0800282 iommu->hw_ops->check_for_overflow(pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800283
284 /* disable all counter group */
285 for (i = 0; i < pmon->num_groups; ++i)
Olav Hauganef69e892013-02-04 13:47:08 -0800286 iommu->hw_ops->grp_disable(iommu, i);
Olav Haugan99660ca2012-12-04 13:30:41 -0800287
288 /* Update cached copy of counters before turning off power */
289 iommu_pm_read_all_counters(pmon);
290
291 iommu->ops->iommu_lock_release();
292 iommu->ops->iommu_power_off(iommu_drvdata);
293
294 pr_info("%s: TLB performance monitoring turned OFF\n",
295 pmon->iommu.iommu_name);
296}
297
Olav Haugan99660ca2012-12-04 13:30:41 -0800298static int iommu_pm_debug_open(struct inode *inode, struct file *file)
299{
300 file->private_data = inode->i_private;
301 return 0;
302}
303
304static ssize_t iommu_pm_count_value_read(struct file *fp,
305 char __user *user_buff,
306 size_t count, loff_t *pos)
307{
308 size_t rd_cnt;
309 unsigned long long full_count;
310
311 struct iommu_pmon_counter *counter = fp->private_data;
312 struct iommu_pmon *pmon = counter->cnt_group->pmon;
313 struct iommu_info *iommu = &pmon->iommu;
314 char buf[50];
315 size_t len;
316
317 mutex_lock(&pmon->lock);
318
Olav Hauganef69e892013-02-04 13:47:08 -0800319 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800320 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800321 counter->value = iommu->hw_ops->read_counter(counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800322 iommu->ops->iommu_lock_release();
323 }
324 full_count = (unsigned long long) counter->value +
325 ((unsigned long long)counter->overflow_count *
326 0x100000000ULL);
327
328 len = snprintf(buf, 50, "%llu\n", full_count);
329 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
330 mutex_unlock(&pmon->lock);
331
332 return rd_cnt;
333}
334
335static const struct file_operations cnt_value_file_ops = {
336 .open = iommu_pm_debug_open,
337 .read = iommu_pm_count_value_read,
338};
339
340static ssize_t iommu_pm_event_class_read(struct file *fp,
341 char __user *user_buff,
342 size_t count, loff_t *pos)
343{
344 size_t rd_cnt;
345 struct iommu_pmon_counter *counter = fp->private_data;
346 struct iommu_pmon *pmon = counter->cnt_group->pmon;
347 char buf[50];
348 const char *event_class_name;
349 size_t len;
350
351 mutex_lock(&pmon->lock);
352 event_class_name = iommu_pm_find_event_class_name(
353 counter->current_event_class);
354 len = snprintf(buf, 50, "%s\n", event_class_name);
355
356 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
357 mutex_unlock(&pmon->lock);
358 return rd_cnt;
359}
360
361static ssize_t iommu_pm_event_class_write(struct file *fp,
362 const char __user *user_buff,
363 size_t count, loff_t *pos)
364{
365 size_t wr_cnt;
366 char buf[50];
367 size_t buf_size = sizeof(buf);
368 struct iommu_pmon_counter *counter = fp->private_data;
369 struct iommu_pmon *pmon = counter->cnt_group->pmon;
370 int current_event_class;
371
372 if ((count + *pos) >= buf_size)
373 return -EINVAL;
374
375 mutex_lock(&pmon->lock);
376 current_event_class = counter->current_event_class;
377 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
378 if (wr_cnt >= 1) {
379 int rv;
380 long value;
381 buf[wr_cnt-1] = '\0';
Olav Haugan0c2d9322013-01-31 18:35:30 -0800382 rv = kstrtol(buf, 10, &value);
Olav Haugan99660ca2012-12-04 13:30:41 -0800383 if (!rv) {
384 counter->current_event_class =
385 iommu_pm_find_event_class(
386 iommu_pm_find_event_class_name(value));
387 } else {
388 counter->current_event_class =
389 iommu_pm_find_event_class(buf);
390 } }
391
392 if (current_event_class != counter->current_event_class)
393 iommu_pm_set_event_type(pmon, counter);
394
395 mutex_unlock(&pmon->lock);
396 return wr_cnt;
397}
398
399static const struct file_operations event_class_file_ops = {
400 .open = iommu_pm_debug_open,
401 .read = iommu_pm_event_class_read,
402 .write = iommu_pm_event_class_write,
403};
404
405static ssize_t iommu_reset_counters_write(struct file *fp,
406 const char __user *user_buff,
407 size_t count, loff_t *pos)
408{
409 size_t wr_cnt;
410 char buf[10];
411 size_t buf_size = sizeof(buf);
412 struct iommu_pmon *pmon = fp->private_data;
413 struct iommu_info *iommu = &pmon->iommu;
414
415 if ((count + *pos) >= buf_size)
416 return -EINVAL;
417
418 mutex_lock(&pmon->lock);
419 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
420 if (wr_cnt >= 1) {
421 unsigned long cmd = 0;
422 int rv;
423 buf[wr_cnt-1] = '\0';
424 rv = kstrtoul(buf, 10, &cmd);
425 if (!rv && (cmd == 1)) {
Olav Hauganef69e892013-02-04 13:47:08 -0800426 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800427 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800428 iommu->hw_ops->reset_counters(&pmon->iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800429 iommu->ops->iommu_lock_release();
430 }
431 iommu_pm_reset_counts(pmon);
432 pr_info("TLB performance counters reset\n");
433 } else {
434 pr_err("Unknown performance monitor command: %lu\n",
435 cmd);
436 }
437 }
438 mutex_unlock(&pmon->lock);
439 return wr_cnt;
440}
441
442static const struct file_operations reset_file_ops = {
443 .open = iommu_pm_debug_open,
444 .write = iommu_reset_counters_write,
445};
446
447static ssize_t iommu_pm_enable_counters_read(struct file *fp,
448 char __user *user_buff,
449 size_t count, loff_t *pos)
450{
451 size_t rd_cnt;
452 char buf[5];
453 size_t len;
454 struct iommu_pmon *pmon = fp->private_data;
455
456 mutex_lock(&pmon->lock);
457 len = snprintf(buf, 5, "%u\n", pmon->enabled);
458 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
459 mutex_unlock(&pmon->lock);
460 return rd_cnt;
461}
462
463static ssize_t iommu_pm_enable_counters_write(struct file *fp,
464 const char __user *user_buff,
465 size_t count, loff_t *pos)
466{
467 size_t wr_cnt;
468 char buf[10];
469 size_t buf_size = sizeof(buf);
470 struct iommu_pmon *pmon = fp->private_data;
471
472 if ((count + *pos) >= buf_size)
473 return -EINVAL;
474
475 mutex_lock(&pmon->lock);
476 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
477 if (wr_cnt >= 1) {
478 unsigned long cmd;
479 int rv;
480 buf[wr_cnt-1] = '\0';
481 rv = kstrtoul(buf, 10, &cmd);
482 if (!rv && (cmd < 2)) {
483 if (pmon->enabled == 1 && cmd == 0) {
484 if (pmon->iommu_attach_count > 0)
485 iommu_pm_off(pmon);
486 } else if (pmon->enabled == 0 && cmd == 1) {
487 /* We can only turn on perf. monitoring if
488 * iommu is attached. Delay turning on perf.
489 * monitoring until we are attached.
490 */
491 if (pmon->iommu_attach_count > 0)
492 iommu_pm_on(pmon);
493 else
494 pmon->enabled = 1;
495 }
496 } else {
497 pr_err("Unknown performance monitor command: %lu\n",
498 cmd);
499 }
500 }
501 mutex_unlock(&pmon->lock);
502 return wr_cnt;
503}
504
505static const struct file_operations event_enable_file_ops = {
506 .open = iommu_pm_debug_open,
507 .read = iommu_pm_enable_counters_read,
508 .write = iommu_pm_enable_counters_write,
509};
510
511static ssize_t iommu_pm_avail_event_cls_read(struct file *fp,
512 char __user *user_buff,
513 size_t count, loff_t *pos)
514{
515 size_t rd_cnt = 0;
516 struct iommu_pmon *pmon = fp->private_data;
517 char *buf;
518 size_t len;
519
520 mutex_lock(&pmon->lock);
521
Olav Haugan0c2d9322013-01-31 18:35:30 -0800522 len = iommu_pm_create_sup_cls_str(&buf, pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800523 if (buf) {
524 rd_cnt = simple_read_from_buffer(user_buff, count, pos,
525 buf, len);
526 kfree(buf);
527 }
528 mutex_unlock(&pmon->lock);
529 return rd_cnt;
530}
531
532static const struct file_operations available_event_cls_file_ops = {
533 .open = iommu_pm_debug_open,
534 .read = iommu_pm_avail_event_cls_read,
535};
536
537
538
539static int iommu_pm_create_grp_debugfs_counters_hierarchy(
540 struct iommu_pmon_cnt_group *cnt_grp,
541 unsigned int *abs_counter_no)
542{
543 int ret = 0;
544 int j;
545 char name[20];
546
547 for (j = 0; j < cnt_grp->num_counters; ++j) {
548 struct dentry *grp_dir = cnt_grp->group_dir;
549 struct dentry *counter_dir;
550 cnt_grp->counters[j].cnt_group = cnt_grp;
551 cnt_grp->counters[j].counter_no = j;
552 cnt_grp->counters[j].absolute_counter_no = *abs_counter_no;
553 (*abs_counter_no)++;
554 cnt_grp->counters[j].value = 0;
555 cnt_grp->counters[j].overflow_count = 0;
556 cnt_grp->counters[j].current_event_class = NO_EVENT_CLASS;
557
558 snprintf(name, 20, "counter%u", j);
559
560 counter_dir = debugfs_create_dir(name, grp_dir);
561
562 if (IS_ERR_OR_NULL(counter_dir)) {
563 pr_err("unable to create counter debugfs dir %s\n",
564 name);
565 ret = -ENOMEM;
566 goto out;
567 }
568
569 cnt_grp->counters[j].counter_dir = counter_dir;
570
571 if (!debugfs_create_file("value", 0644, counter_dir,
572 &cnt_grp->counters[j],
573 &cnt_value_file_ops)) {
574 ret = -EIO;
575 goto out;
576 }
577
578 if (!debugfs_create_file("current_event_class", 0644,
579 counter_dir, &cnt_grp->counters[j],
580 &event_class_file_ops)) {
581 ret = -EIO;
582 goto out;
583 }
584 }
585out:
586 return ret;
587}
588
589static int iommu_pm_create_group_debugfs_hierarchy(struct iommu_info *iommu,
590 struct iommu_pmon *pmon_entry)
591{
592 int i;
593 int ret = 0;
594 char name[20];
595 unsigned int abs_counter_no = 0;
596
597 for (i = 0; i < pmon_entry->num_groups; ++i) {
598 pmon_entry->cnt_grp[i].pmon = pmon_entry;
599 pmon_entry->cnt_grp[i].grp_no = i;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800600 pmon_entry->cnt_grp[i].num_counters = pmon_entry->num_counters;
Olav Haugan99660ca2012-12-04 13:30:41 -0800601 pmon_entry->cnt_grp[i].counters =
602 kzalloc(sizeof(*pmon_entry->cnt_grp[i].counters)
603 * pmon_entry->cnt_grp[i].num_counters, GFP_KERNEL);
604
605 if (!pmon_entry->cnt_grp[i].counters) {
606 pr_err("Unable to allocate memory for counters\n");
607 ret = -ENOMEM;
608 goto out;
609 }
610 snprintf(name, 20, "group%u", i);
611 pmon_entry->cnt_grp[i].group_dir = debugfs_create_dir(name,
612 pmon_entry->iommu_dir);
613 if (IS_ERR_OR_NULL(pmon_entry->cnt_grp[i].group_dir)) {
614 pr_err("unable to create group debugfs dir %s\n", name);
615 ret = -ENOMEM;
616 goto out;
617 }
618
619 ret = iommu_pm_create_grp_debugfs_counters_hierarchy(
620 &pmon_entry->cnt_grp[i],
621 &abs_counter_no);
622 if (ret)
623 goto out;
624 }
625out:
626 return ret;
627}
628
Olav Haugan0c2d9322013-01-31 18:35:30 -0800629int msm_iommu_pm_iommu_register(struct iommu_pmon *pmon_entry)
Olav Haugan99660ca2012-12-04 13:30:41 -0800630{
Olav Haugan99660ca2012-12-04 13:30:41 -0800631 int ret = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800632 struct iommu_info *iommu = &pmon_entry->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800633 int i;
634
635 if (!iommu->ops || !iommu->iommu_name || !iommu->base
636 || !iommu->iommu_dev) {
637 ret = -EINVAL;
638 goto out;
639 }
640
641 if (!msm_iommu_root_debugfs_dir) {
642 msm_iommu_root_debugfs_dir = debugfs_create_dir("iommu", NULL);
643 if (IS_ERR_OR_NULL(msm_iommu_root_debugfs_dir)) {
644 pr_err("Failed creating iommu debugfs dir \"iommu\"\n");
645 ret = -EIO;
646 goto out;
647 }
648 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800649
Olav Haugan99660ca2012-12-04 13:30:41 -0800650 pmon_entry->cnt_grp = kzalloc(sizeof(*pmon_entry->cnt_grp)
651 * pmon_entry->num_groups, GFP_KERNEL);
652 if (!pmon_entry->cnt_grp) {
653 pr_err("Unable to allocate memory for counter groups\n");
654 ret = -ENOMEM;
655 goto file_err;
656 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800657 pmon_entry->iommu_dir = debugfs_create_dir(iommu->iommu_name,
658 msm_iommu_root_debugfs_dir);
659 if (IS_ERR_OR_NULL(pmon_entry->iommu_dir)) {
660 pr_err("unable to create iommu debugfs dir %s\n",
661 iommu->iommu_name);
662 ret = -ENOMEM;
663 goto free_mem;
664 }
665
666 if (!debugfs_create_file("reset_counters", 0644,
667 pmon_entry->iommu_dir, pmon_entry, &reset_file_ops)) {
668 ret = -EIO;
669 goto free_mem;
670 }
671
672 if (!debugfs_create_file("enable_counters", 0644,
673 pmon_entry->iommu_dir, pmon_entry, &event_enable_file_ops)) {
674 ret = -EIO;
675 goto free_mem;
676 }
677
678 if (!debugfs_create_file("available_event_classes", 0644,
679 pmon_entry->iommu_dir, pmon_entry,
680 &available_event_cls_file_ops)) {
681 ret = -EIO;
682 goto free_mem;
683 }
684
685 ret = iommu_pm_create_group_debugfs_hierarchy(iommu, pmon_entry);
686 if (ret)
687 goto free_mem;
688
Olav Haugan99660ca2012-12-04 13:30:41 -0800689 if (iommu->evt_irq > 0) {
690 ret = request_threaded_irq(iommu->evt_irq, NULL,
Olav Hauganef69e892013-02-04 13:47:08 -0800691 iommu->hw_ops->evt_ovfl_int_handler,
Olav Haugan99660ca2012-12-04 13:30:41 -0800692 IRQF_ONESHOT | IRQF_SHARED,
693 "msm_iommu_nonsecure_irq", pmon_entry);
694 if (ret) {
695 pr_err("Request IRQ %d failed with ret=%d\n",
696 iommu->evt_irq,
697 ret);
698 goto free_mem;
699 }
700 } else {
701 pr_info("%s: Overflow interrupt not available\n", __func__);
702 }
703
Olav Haugan0c2d9322013-01-31 18:35:30 -0800704 dev_dbg(iommu->iommu_dev, "%s iommu registered\n", iommu->iommu_name);
Olav Haugan99660ca2012-12-04 13:30:41 -0800705
706 goto out;
707free_mem:
708 if (pmon_entry->cnt_grp) {
709 for (i = 0; i < pmon_entry->num_groups; ++i) {
710 kfree(pmon_entry->cnt_grp[i].counters);
711 pmon_entry->cnt_grp[i].counters = 0;
712 }
713 }
714 kfree(pmon_entry->cnt_grp);
715 pmon_entry->cnt_grp = 0;
716file_err:
717 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
718out:
719 return ret;
720}
721EXPORT_SYMBOL(msm_iommu_pm_iommu_register);
722
723void msm_iommu_pm_iommu_unregister(struct device *dev)
724{
725 int i;
726 struct iommu_pmon *pmon_entry = iommu_pm_get_pm_by_dev(dev);
727
728 if (!pmon_entry)
729 return;
730
731 free_irq(pmon_entry->iommu.evt_irq, pmon_entry->iommu.iommu_dev);
732
733 if (!pmon_entry)
734 goto remove_debugfs;
735
736 if (pmon_entry->cnt_grp) {
737 for (i = 0; i < pmon_entry->num_groups; ++i)
738 kfree(pmon_entry->cnt_grp[i].counters);
739 }
740
741 kfree(pmon_entry->cnt_grp);
742
743remove_debugfs:
744 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
745
746 return;
747}
748EXPORT_SYMBOL(msm_iommu_pm_iommu_unregister);
749
Olav Haugan0c2d9322013-01-31 18:35:30 -0800750struct iommu_pmon *msm_iommu_pm_alloc(struct device *dev)
Olav Haugan99660ca2012-12-04 13:30:41 -0800751{
752 struct iommu_pmon *pmon_entry;
753 struct iommu_info *info;
754 pmon_entry = devm_kzalloc(dev, sizeof(*pmon_entry), GFP_KERNEL);
755 if (!pmon_entry)
756 return NULL;
757 info = &pmon_entry->iommu;
758 info->iommu_dev = dev;
759 mutex_init(&pmon_entry->lock);
760 iommu_pm_add_to_iommu_list(pmon_entry);
Olav Haugan0c2d9322013-01-31 18:35:30 -0800761 return pmon_entry;
Olav Haugan99660ca2012-12-04 13:30:41 -0800762}
763EXPORT_SYMBOL(msm_iommu_pm_alloc);
764
765void msm_iommu_pm_free(struct device *dev)
766{
767 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
768 if (pmon)
769 iommu_pm_del_from_iommu_list(pmon);
770}
771EXPORT_SYMBOL(msm_iommu_pm_free);
772
773void msm_iommu_attached(struct device *dev)
774{
775 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
776 if (pmon) {
777 mutex_lock(&pmon->lock);
778 ++pmon->iommu_attach_count;
779 if (pmon->iommu_attach_count == 1) {
780 /* If perf. mon was enabled before we attached we do
781 * the actual after we attach.
782 */
783 if (pmon->enabled)
784 iommu_pm_on(pmon);
785 }
786 mutex_unlock(&pmon->lock);
787 }
788}
789EXPORT_SYMBOL(msm_iommu_attached);
790
791void msm_iommu_detached(struct device *dev)
792{
793 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
794 if (pmon) {
795 mutex_lock(&pmon->lock);
796 if (pmon->iommu_attach_count == 1) {
797 /* If perf. mon is still enabled we have to disable
798 * before we do the detach.
799 */
800 if (pmon->enabled)
801 iommu_pm_off(pmon);
802 }
803 BUG_ON(pmon->iommu_attach_count == 0);
804 --pmon->iommu_attach_count;
805 mutex_unlock(&pmon->lock);
806 }
807}
808EXPORT_SYMBOL(msm_iommu_detached);
809