blob: fee8a4ac583061538f75cddf507cb86647923e21 [file] [log] [blame]
Olav Haugan99660ca2012-12-04 13:30:41 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/iommu.h>
18#include <linux/slab.h>
19#include <linux/device.h>
20#include <linux/interrupt.h>
21#include <linux/bitops.h>
22#include <linux/debugfs.h>
Olav Haugan99660ca2012-12-04 13:30:41 -080023#include <mach/iommu.h>
24#include <mach/iommu_perfmon.h>
25
Olav Haugan99660ca2012-12-04 13:30:41 -080026static LIST_HEAD(iommu_list);
27static struct dentry *msm_iommu_root_debugfs_dir;
28static const char *NO_EVENT_CLASS_NAME = "none";
Olav Haugan99660ca2012-12-04 13:30:41 -080029static const unsigned int MAX_EVEN_CLASS_NAME_LEN = 36;
30
31struct event_class {
32 unsigned int event_number;
33 const char *desc;
34};
35
36static struct event_class pmu_event_classes[] = {
37 { 0x00, "cycle_count" },
38 { 0x01, "cycle_count64" },
39 { 0x08, "tlb_refill" },
40 { 0x09, "tlb_refill_read" },
41 { 0x0A, "tlb_refill_write" },
42 { 0x10, "access" },
43 { 0x11, "access_read" },
44 { 0x12, "access_write" },
Olav Haugan99660ca2012-12-04 13:30:41 -080045 { 0x80, "full_misses" },
46 { 0x81, "partial_miss_1lbfb_hit" },
47 { 0x82, "partial_miss_2lbfb_hit" },
48 { 0x83, "full_hit" },
49 { 0x90, "pred_req_full_miss" },
50 { 0x91, "pred_req_partial_miss_1lbfb_hit" },
51 { 0x92, "pred_req_partial_miss_2lbfb_hit" },
52 { 0xb0, "tot_num_miss_axi_htw_read_req" },
53 { 0xb1, "tot_num_pred_axi_htw_read_req" },
54};
55
Olav Haugan99660ca2012-12-04 13:30:41 -080056static unsigned int iommu_pm_create_sup_cls_str(char **buf,
Olav Haugan0c2d9322013-01-31 18:35:30 -080057 struct iommu_pmon *pmon)
Olav Haugan99660ca2012-12-04 13:30:41 -080058{
Olav Haugan0c2d9322013-01-31 18:35:30 -080059 unsigned long buf_size = ARRAY_SIZE(pmu_event_classes) *
60 MAX_EVEN_CLASS_NAME_LEN;
Olav Haugan99660ca2012-12-04 13:30:41 -080061 unsigned int pos = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -080062 unsigned int nevent_cls = pmon->nevent_cls_supported;
Olav Haugan99660ca2012-12-04 13:30:41 -080063
64 *buf = kzalloc(buf_size, GFP_KERNEL);
65 if (*buf) {
Olav Haugan0c2d9322013-01-31 18:35:30 -080066 unsigned int j;
Olav Haugan99660ca2012-12-04 13:30:41 -080067 int i;
68 struct event_class *ptr;
69 size_t array_len = ARRAY_SIZE(pmu_event_classes);
70 ptr = pmu_event_classes;
71
Olav Haugan0c2d9322013-01-31 18:35:30 -080072 for (j = 0; j < nevent_cls; ++j) {
73 for (i = 0; i < array_len; ++i) {
74
75 if (ptr[i].event_number !=
76 pmon->event_cls_supported[j])
77 continue;
78
Olav Haugan99660ca2012-12-04 13:30:41 -080079 if (pos < buf_size) {
80 pos += snprintf(&(*buf)[pos],
81 buf_size-pos,
82 "[%u] %s\n",
83 ptr[i].event_number,
84 ptr[i].desc);
85 }
Olav Haugan0c2d9322013-01-31 18:35:30 -080086 break;
Olav Haugan99660ca2012-12-04 13:30:41 -080087 }
88 }
89 }
90 return pos;
91}
92
93static const char *iommu_pm_find_event_class_name(int event_class)
94{
95 size_t array_len;
96 struct event_class *ptr;
97 int i;
98 const char *event_class_name = NO_EVENT_CLASS_NAME;
Olav Haugan0c2d9322013-01-31 18:35:30 -080099 if (event_class < 0)
Olav Haugan99660ca2012-12-04 13:30:41 -0800100 goto out;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800101
102 array_len = ARRAY_SIZE(pmu_event_classes);
103 ptr = pmu_event_classes;
Olav Haugan99660ca2012-12-04 13:30:41 -0800104
105 for (i = 0; i < array_len; ++i) {
106 if (ptr[i].event_number == event_class) {
107 event_class_name = ptr[i].desc;
108 break;
109 }
110 }
111
112out:
113 return event_class_name;
114}
115
116static int iommu_pm_find_event_class(const char *event_class_name)
117{
118 size_t array_len;
119 struct event_class *ptr;
120 int i;
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800121 int event_class = MSM_IOMMU_PMU_NO_EVENT_CLASS;
Olav Haugan99660ca2012-12-04 13:30:41 -0800122
123 if (strcmp(event_class_name, NO_EVENT_CLASS_NAME) == 0)
124 goto out;
125
126 array_len = ARRAY_SIZE(pmu_event_classes);
127 ptr = pmu_event_classes;
128
129 for (i = 0; i < array_len; ++i) {
130 if (strcmp(ptr[i].desc, event_class_name) == 0) {
131 event_class = ptr[i].event_number;
132 goto out;
133 }
134 }
135
Olav Haugan99660ca2012-12-04 13:30:41 -0800136out:
137 return event_class;
138}
139
140static inline void iommu_pm_add_to_iommu_list(struct iommu_pmon *iommu_pmon)
141{
142 list_add(&iommu_pmon->iommu_list, &iommu_list);
143}
144
145static inline void iommu_pm_del_from_iommu_list(struct iommu_pmon *iommu_pmon)
146{
147 list_del(&iommu_pmon->iommu_list);
148}
149
150static struct iommu_pmon *iommu_pm_get_pm_by_dev(struct device *dev)
151{
152 struct iommu_pmon *pmon;
153 struct iommu_info *info;
154 struct list_head *ent;
155 list_for_each(ent, &iommu_list) {
156 pmon = list_entry(ent, struct iommu_pmon, iommu_list);
157 info = &pmon->iommu;
158 if (dev == info->iommu_dev)
159 return pmon;
160 }
161 return NULL;
162}
163
Olav Haugan99660ca2012-12-04 13:30:41 -0800164static void iommu_pm_set_event_type(struct iommu_pmon *pmon,
165 struct iommu_pmon_counter *counter)
166{
167 int event_class;
168 unsigned int count_no;
169 struct iommu_info *iommu = &pmon->iommu;
170
171 event_class = counter->current_event_class;
172 count_no = counter->absolute_counter_no;
173
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800174 if (event_class == MSM_IOMMU_PMU_NO_EVENT_CLASS) {
Olav Hauganef69e892013-02-04 13:47:08 -0800175 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800176 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800177 iommu->hw_ops->counter_disable(iommu, counter);
178 iommu->hw_ops->ovfl_int_disable(iommu, counter);
179 iommu->hw_ops->set_event_class(pmon, count_no, 0);
Olav Haugan99660ca2012-12-04 13:30:41 -0800180 iommu->ops->iommu_lock_release();
181 }
182 counter->overflow_count = 0;
183 counter->value = 0;
184 } else {
185 counter->overflow_count = 0;
186 counter->value = 0;
Olav Hauganef69e892013-02-04 13:47:08 -0800187 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800188 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800189 iommu->hw_ops->set_event_class(pmon, count_no,
190 event_class);
191 iommu->hw_ops->ovfl_int_enable(iommu, counter);
192 iommu->hw_ops->counter_enable(iommu, counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800193 iommu->ops->iommu_lock_release();
194 }
195 }
196}
197
198static void iommu_pm_reset_counts(struct iommu_pmon *pmon)
199{
200 unsigned int i;
201 unsigned int j;
202 for (i = 0; i < pmon->num_groups; ++i) {
203 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
204 for (j = 0; j < cnt_grp->num_counters; ++j) {
205 cnt_grp->counters[j].value = 0;
206 cnt_grp->counters[j].overflow_count = 0;
207 }
208 }
209}
210
Olav Haugan99660ca2012-12-04 13:30:41 -0800211static void iommu_pm_set_all_counters(struct iommu_pmon *pmon)
212{
213 unsigned int i;
214 unsigned int j;
215 for (i = 0; i < pmon->num_groups; ++i) {
216 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
217 for (j = 0; j < cnt_grp->num_counters; ++j)
218 iommu_pm_set_event_type(pmon, &cnt_grp->counters[j]);
219 }
220}
221
222static void iommu_pm_read_all_counters(struct iommu_pmon *pmon)
223{
224 unsigned int i;
225 unsigned int j;
Olav Hauganef69e892013-02-04 13:47:08 -0800226 struct iommu_info *iommu = &pmon->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800227 for (i = 0; i < pmon->num_groups; ++i) {
228 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
229 for (j = 0; j < cnt_grp->num_counters; ++j) {
230 struct iommu_pmon_counter *counter;
231 counter = &cnt_grp->counters[j];
Olav Hauganef69e892013-02-04 13:47:08 -0800232 counter->value = iommu->hw_ops->read_counter(counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800233 }
234 }
235}
236
237static void iommu_pm_on(struct iommu_pmon *pmon)
238{
239 unsigned int i;
240 struct iommu_info *iommu = &pmon->iommu;
241 struct msm_iommu_drvdata *iommu_drvdata =
242 dev_get_drvdata(iommu->iommu_dev);
243
244 iommu->ops->iommu_power_on(iommu_drvdata);
Olav Hauganeece7e52013-04-02 10:22:21 -0700245 iommu->ops->iommu_clk_on(iommu_drvdata);
Olav Haugan99660ca2012-12-04 13:30:41 -0800246
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800247 /* Reset counters in HW */
248 iommu->ops->iommu_lock_acquire();
249 iommu->hw_ops->reset_counters(&pmon->iommu);
250 iommu->ops->iommu_lock_release();
251
252 /* Reset SW counters */
Olav Haugan99660ca2012-12-04 13:30:41 -0800253 iommu_pm_reset_counts(pmon);
254
255 pmon->enabled = 1;
256
257 iommu_pm_set_all_counters(pmon);
258
259 iommu->ops->iommu_lock_acquire();
260
261 /* enable all counter group */
262 for (i = 0; i < pmon->num_groups; ++i)
Olav Hauganef69e892013-02-04 13:47:08 -0800263 iommu->hw_ops->grp_enable(iommu, i);
Olav Haugan99660ca2012-12-04 13:30:41 -0800264
265 /* enable global counters */
Olav Hauganef69e892013-02-04 13:47:08 -0800266 iommu->hw_ops->enable_pm(iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800267 iommu->ops->iommu_lock_release();
268
269 pr_info("%s: TLB performance monitoring turned ON\n",
270 pmon->iommu.iommu_name);
271}
272
273static void iommu_pm_off(struct iommu_pmon *pmon)
274{
275 unsigned int i;
276 struct iommu_info *iommu = &pmon->iommu;
277 struct msm_iommu_drvdata *iommu_drvdata =
278 dev_get_drvdata(iommu->iommu_dev);
279
280 pmon->enabled = 0;
281
282 iommu->ops->iommu_lock_acquire();
283
284 /* disable global counters */
Olav Hauganef69e892013-02-04 13:47:08 -0800285 iommu->hw_ops->disable_pm(iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800286
287 /* Check if we overflowed just before turning off pmon */
Olav Hauganef69e892013-02-04 13:47:08 -0800288 iommu->hw_ops->check_for_overflow(pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800289
290 /* disable all counter group */
291 for (i = 0; i < pmon->num_groups; ++i)
Olav Hauganef69e892013-02-04 13:47:08 -0800292 iommu->hw_ops->grp_disable(iommu, i);
Olav Haugan99660ca2012-12-04 13:30:41 -0800293
294 /* Update cached copy of counters before turning off power */
295 iommu_pm_read_all_counters(pmon);
296
297 iommu->ops->iommu_lock_release();
Olav Hauganeece7e52013-04-02 10:22:21 -0700298 iommu->ops->iommu_clk_off(iommu_drvdata);
Olav Haugan99660ca2012-12-04 13:30:41 -0800299 iommu->ops->iommu_power_off(iommu_drvdata);
300
301 pr_info("%s: TLB performance monitoring turned OFF\n",
302 pmon->iommu.iommu_name);
303}
304
Olav Haugan99660ca2012-12-04 13:30:41 -0800305static int iommu_pm_debug_open(struct inode *inode, struct file *file)
306{
307 file->private_data = inode->i_private;
308 return 0;
309}
310
311static ssize_t iommu_pm_count_value_read(struct file *fp,
312 char __user *user_buff,
313 size_t count, loff_t *pos)
314{
315 size_t rd_cnt;
316 unsigned long long full_count;
317
318 struct iommu_pmon_counter *counter = fp->private_data;
319 struct iommu_pmon *pmon = counter->cnt_group->pmon;
320 struct iommu_info *iommu = &pmon->iommu;
321 char buf[50];
322 size_t len;
323
324 mutex_lock(&pmon->lock);
325
Olav Hauganef69e892013-02-04 13:47:08 -0800326 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800327 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800328 counter->value = iommu->hw_ops->read_counter(counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800329 iommu->ops->iommu_lock_release();
330 }
331 full_count = (unsigned long long) counter->value +
332 ((unsigned long long)counter->overflow_count *
333 0x100000000ULL);
334
335 len = snprintf(buf, 50, "%llu\n", full_count);
336 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
337 mutex_unlock(&pmon->lock);
338
339 return rd_cnt;
340}
341
342static const struct file_operations cnt_value_file_ops = {
343 .open = iommu_pm_debug_open,
344 .read = iommu_pm_count_value_read,
345};
346
347static ssize_t iommu_pm_event_class_read(struct file *fp,
348 char __user *user_buff,
349 size_t count, loff_t *pos)
350{
351 size_t rd_cnt;
352 struct iommu_pmon_counter *counter = fp->private_data;
353 struct iommu_pmon *pmon = counter->cnt_group->pmon;
354 char buf[50];
355 const char *event_class_name;
356 size_t len;
357
358 mutex_lock(&pmon->lock);
359 event_class_name = iommu_pm_find_event_class_name(
360 counter->current_event_class);
361 len = snprintf(buf, 50, "%s\n", event_class_name);
362
363 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
364 mutex_unlock(&pmon->lock);
365 return rd_cnt;
366}
367
368static ssize_t iommu_pm_event_class_write(struct file *fp,
369 const char __user *user_buff,
370 size_t count, loff_t *pos)
371{
372 size_t wr_cnt;
373 char buf[50];
374 size_t buf_size = sizeof(buf);
375 struct iommu_pmon_counter *counter = fp->private_data;
376 struct iommu_pmon *pmon = counter->cnt_group->pmon;
377 int current_event_class;
378
379 if ((count + *pos) >= buf_size)
380 return -EINVAL;
381
382 mutex_lock(&pmon->lock);
383 current_event_class = counter->current_event_class;
384 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
385 if (wr_cnt >= 1) {
386 int rv;
387 long value;
388 buf[wr_cnt-1] = '\0';
Olav Haugan0c2d9322013-01-31 18:35:30 -0800389 rv = kstrtol(buf, 10, &value);
Olav Haugan99660ca2012-12-04 13:30:41 -0800390 if (!rv) {
391 counter->current_event_class =
392 iommu_pm_find_event_class(
393 iommu_pm_find_event_class_name(value));
394 } else {
395 counter->current_event_class =
396 iommu_pm_find_event_class(buf);
397 } }
398
399 if (current_event_class != counter->current_event_class)
400 iommu_pm_set_event_type(pmon, counter);
401
402 mutex_unlock(&pmon->lock);
403 return wr_cnt;
404}
405
406static const struct file_operations event_class_file_ops = {
407 .open = iommu_pm_debug_open,
408 .read = iommu_pm_event_class_read,
409 .write = iommu_pm_event_class_write,
410};
411
412static ssize_t iommu_reset_counters_write(struct file *fp,
413 const char __user *user_buff,
414 size_t count, loff_t *pos)
415{
416 size_t wr_cnt;
417 char buf[10];
418 size_t buf_size = sizeof(buf);
419 struct iommu_pmon *pmon = fp->private_data;
420 struct iommu_info *iommu = &pmon->iommu;
421
422 if ((count + *pos) >= buf_size)
423 return -EINVAL;
424
425 mutex_lock(&pmon->lock);
426 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
427 if (wr_cnt >= 1) {
428 unsigned long cmd = 0;
429 int rv;
430 buf[wr_cnt-1] = '\0';
431 rv = kstrtoul(buf, 10, &cmd);
432 if (!rv && (cmd == 1)) {
Olav Hauganef69e892013-02-04 13:47:08 -0800433 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800434 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800435 iommu->hw_ops->reset_counters(&pmon->iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800436 iommu->ops->iommu_lock_release();
437 }
438 iommu_pm_reset_counts(pmon);
439 pr_info("TLB performance counters reset\n");
440 } else {
441 pr_err("Unknown performance monitor command: %lu\n",
442 cmd);
443 }
444 }
445 mutex_unlock(&pmon->lock);
446 return wr_cnt;
447}
448
449static const struct file_operations reset_file_ops = {
450 .open = iommu_pm_debug_open,
451 .write = iommu_reset_counters_write,
452};
453
454static ssize_t iommu_pm_enable_counters_read(struct file *fp,
455 char __user *user_buff,
456 size_t count, loff_t *pos)
457{
458 size_t rd_cnt;
459 char buf[5];
460 size_t len;
461 struct iommu_pmon *pmon = fp->private_data;
462
463 mutex_lock(&pmon->lock);
464 len = snprintf(buf, 5, "%u\n", pmon->enabled);
465 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
466 mutex_unlock(&pmon->lock);
467 return rd_cnt;
468}
469
470static ssize_t iommu_pm_enable_counters_write(struct file *fp,
471 const char __user *user_buff,
472 size_t count, loff_t *pos)
473{
474 size_t wr_cnt;
475 char buf[10];
476 size_t buf_size = sizeof(buf);
477 struct iommu_pmon *pmon = fp->private_data;
478
479 if ((count + *pos) >= buf_size)
480 return -EINVAL;
481
482 mutex_lock(&pmon->lock);
483 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
484 if (wr_cnt >= 1) {
485 unsigned long cmd;
486 int rv;
487 buf[wr_cnt-1] = '\0';
488 rv = kstrtoul(buf, 10, &cmd);
489 if (!rv && (cmd < 2)) {
490 if (pmon->enabled == 1 && cmd == 0) {
491 if (pmon->iommu_attach_count > 0)
492 iommu_pm_off(pmon);
493 } else if (pmon->enabled == 0 && cmd == 1) {
494 /* We can only turn on perf. monitoring if
495 * iommu is attached. Delay turning on perf.
496 * monitoring until we are attached.
497 */
498 if (pmon->iommu_attach_count > 0)
499 iommu_pm_on(pmon);
500 else
501 pmon->enabled = 1;
502 }
503 } else {
504 pr_err("Unknown performance monitor command: %lu\n",
505 cmd);
506 }
507 }
508 mutex_unlock(&pmon->lock);
509 return wr_cnt;
510}
511
512static const struct file_operations event_enable_file_ops = {
513 .open = iommu_pm_debug_open,
514 .read = iommu_pm_enable_counters_read,
515 .write = iommu_pm_enable_counters_write,
516};
517
518static ssize_t iommu_pm_avail_event_cls_read(struct file *fp,
519 char __user *user_buff,
520 size_t count, loff_t *pos)
521{
522 size_t rd_cnt = 0;
523 struct iommu_pmon *pmon = fp->private_data;
524 char *buf;
525 size_t len;
526
527 mutex_lock(&pmon->lock);
528
Olav Haugan0c2d9322013-01-31 18:35:30 -0800529 len = iommu_pm_create_sup_cls_str(&buf, pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800530 if (buf) {
531 rd_cnt = simple_read_from_buffer(user_buff, count, pos,
532 buf, len);
533 kfree(buf);
534 }
535 mutex_unlock(&pmon->lock);
536 return rd_cnt;
537}
538
539static const struct file_operations available_event_cls_file_ops = {
540 .open = iommu_pm_debug_open,
541 .read = iommu_pm_avail_event_cls_read,
542};
543
544
545
546static int iommu_pm_create_grp_debugfs_counters_hierarchy(
547 struct iommu_pmon_cnt_group *cnt_grp,
548 unsigned int *abs_counter_no)
549{
550 int ret = 0;
551 int j;
552 char name[20];
553
554 for (j = 0; j < cnt_grp->num_counters; ++j) {
555 struct dentry *grp_dir = cnt_grp->group_dir;
556 struct dentry *counter_dir;
557 cnt_grp->counters[j].cnt_group = cnt_grp;
558 cnt_grp->counters[j].counter_no = j;
559 cnt_grp->counters[j].absolute_counter_no = *abs_counter_no;
560 (*abs_counter_no)++;
561 cnt_grp->counters[j].value = 0;
562 cnt_grp->counters[j].overflow_count = 0;
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800563 cnt_grp->counters[j].current_event_class =
564 MSM_IOMMU_PMU_NO_EVENT_CLASS;
Olav Haugan99660ca2012-12-04 13:30:41 -0800565
566 snprintf(name, 20, "counter%u", j);
567
568 counter_dir = debugfs_create_dir(name, grp_dir);
569
570 if (IS_ERR_OR_NULL(counter_dir)) {
571 pr_err("unable to create counter debugfs dir %s\n",
572 name);
573 ret = -ENOMEM;
574 goto out;
575 }
576
577 cnt_grp->counters[j].counter_dir = counter_dir;
578
579 if (!debugfs_create_file("value", 0644, counter_dir,
580 &cnt_grp->counters[j],
581 &cnt_value_file_ops)) {
582 ret = -EIO;
583 goto out;
584 }
585
586 if (!debugfs_create_file("current_event_class", 0644,
587 counter_dir, &cnt_grp->counters[j],
588 &event_class_file_ops)) {
589 ret = -EIO;
590 goto out;
591 }
592 }
593out:
594 return ret;
595}
596
597static int iommu_pm_create_group_debugfs_hierarchy(struct iommu_info *iommu,
598 struct iommu_pmon *pmon_entry)
599{
600 int i;
601 int ret = 0;
602 char name[20];
603 unsigned int abs_counter_no = 0;
604
605 for (i = 0; i < pmon_entry->num_groups; ++i) {
606 pmon_entry->cnt_grp[i].pmon = pmon_entry;
607 pmon_entry->cnt_grp[i].grp_no = i;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800608 pmon_entry->cnt_grp[i].num_counters = pmon_entry->num_counters;
Olav Haugan99660ca2012-12-04 13:30:41 -0800609 pmon_entry->cnt_grp[i].counters =
610 kzalloc(sizeof(*pmon_entry->cnt_grp[i].counters)
611 * pmon_entry->cnt_grp[i].num_counters, GFP_KERNEL);
612
613 if (!pmon_entry->cnt_grp[i].counters) {
614 pr_err("Unable to allocate memory for counters\n");
615 ret = -ENOMEM;
616 goto out;
617 }
618 snprintf(name, 20, "group%u", i);
619 pmon_entry->cnt_grp[i].group_dir = debugfs_create_dir(name,
620 pmon_entry->iommu_dir);
621 if (IS_ERR_OR_NULL(pmon_entry->cnt_grp[i].group_dir)) {
622 pr_err("unable to create group debugfs dir %s\n", name);
623 ret = -ENOMEM;
624 goto out;
625 }
626
627 ret = iommu_pm_create_grp_debugfs_counters_hierarchy(
628 &pmon_entry->cnt_grp[i],
629 &abs_counter_no);
630 if (ret)
631 goto out;
632 }
633out:
634 return ret;
635}
636
Olav Haugan0c2d9322013-01-31 18:35:30 -0800637int msm_iommu_pm_iommu_register(struct iommu_pmon *pmon_entry)
Olav Haugan99660ca2012-12-04 13:30:41 -0800638{
Olav Haugan99660ca2012-12-04 13:30:41 -0800639 int ret = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800640 struct iommu_info *iommu = &pmon_entry->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800641 int i;
642
643 if (!iommu->ops || !iommu->iommu_name || !iommu->base
644 || !iommu->iommu_dev) {
645 ret = -EINVAL;
646 goto out;
647 }
648
649 if (!msm_iommu_root_debugfs_dir) {
650 msm_iommu_root_debugfs_dir = debugfs_create_dir("iommu", NULL);
651 if (IS_ERR_OR_NULL(msm_iommu_root_debugfs_dir)) {
652 pr_err("Failed creating iommu debugfs dir \"iommu\"\n");
653 ret = -EIO;
654 goto out;
655 }
656 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800657
Olav Haugan99660ca2012-12-04 13:30:41 -0800658 pmon_entry->cnt_grp = kzalloc(sizeof(*pmon_entry->cnt_grp)
659 * pmon_entry->num_groups, GFP_KERNEL);
660 if (!pmon_entry->cnt_grp) {
661 pr_err("Unable to allocate memory for counter groups\n");
662 ret = -ENOMEM;
663 goto file_err;
664 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800665 pmon_entry->iommu_dir = debugfs_create_dir(iommu->iommu_name,
666 msm_iommu_root_debugfs_dir);
667 if (IS_ERR_OR_NULL(pmon_entry->iommu_dir)) {
668 pr_err("unable to create iommu debugfs dir %s\n",
669 iommu->iommu_name);
670 ret = -ENOMEM;
671 goto free_mem;
672 }
673
674 if (!debugfs_create_file("reset_counters", 0644,
675 pmon_entry->iommu_dir, pmon_entry, &reset_file_ops)) {
676 ret = -EIO;
677 goto free_mem;
678 }
679
680 if (!debugfs_create_file("enable_counters", 0644,
681 pmon_entry->iommu_dir, pmon_entry, &event_enable_file_ops)) {
682 ret = -EIO;
683 goto free_mem;
684 }
685
686 if (!debugfs_create_file("available_event_classes", 0644,
687 pmon_entry->iommu_dir, pmon_entry,
688 &available_event_cls_file_ops)) {
689 ret = -EIO;
690 goto free_mem;
691 }
692
693 ret = iommu_pm_create_group_debugfs_hierarchy(iommu, pmon_entry);
694 if (ret)
695 goto free_mem;
696
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800697 iommu->hw_ops->initialize_hw(pmon_entry);
698
Olav Haugan99660ca2012-12-04 13:30:41 -0800699 if (iommu->evt_irq > 0) {
700 ret = request_threaded_irq(iommu->evt_irq, NULL,
Olav Hauganef69e892013-02-04 13:47:08 -0800701 iommu->hw_ops->evt_ovfl_int_handler,
Olav Haugan99660ca2012-12-04 13:30:41 -0800702 IRQF_ONESHOT | IRQF_SHARED,
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800703 "msm_iommu_pmon_nonsecure_irq", pmon_entry);
Olav Haugan99660ca2012-12-04 13:30:41 -0800704 if (ret) {
705 pr_err("Request IRQ %d failed with ret=%d\n",
706 iommu->evt_irq,
707 ret);
708 goto free_mem;
709 }
710 } else {
711 pr_info("%s: Overflow interrupt not available\n", __func__);
712 }
713
Olav Haugan0c2d9322013-01-31 18:35:30 -0800714 dev_dbg(iommu->iommu_dev, "%s iommu registered\n", iommu->iommu_name);
Olav Haugan99660ca2012-12-04 13:30:41 -0800715
716 goto out;
717free_mem:
718 if (pmon_entry->cnt_grp) {
719 for (i = 0; i < pmon_entry->num_groups; ++i) {
720 kfree(pmon_entry->cnt_grp[i].counters);
721 pmon_entry->cnt_grp[i].counters = 0;
722 }
723 }
724 kfree(pmon_entry->cnt_grp);
725 pmon_entry->cnt_grp = 0;
726file_err:
727 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
728out:
729 return ret;
730}
731EXPORT_SYMBOL(msm_iommu_pm_iommu_register);
732
733void msm_iommu_pm_iommu_unregister(struct device *dev)
734{
735 int i;
736 struct iommu_pmon *pmon_entry = iommu_pm_get_pm_by_dev(dev);
737
738 if (!pmon_entry)
739 return;
740
741 free_irq(pmon_entry->iommu.evt_irq, pmon_entry->iommu.iommu_dev);
742
743 if (!pmon_entry)
744 goto remove_debugfs;
745
746 if (pmon_entry->cnt_grp) {
747 for (i = 0; i < pmon_entry->num_groups; ++i)
748 kfree(pmon_entry->cnt_grp[i].counters);
749 }
750
751 kfree(pmon_entry->cnt_grp);
752
753remove_debugfs:
754 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
755
756 return;
757}
758EXPORT_SYMBOL(msm_iommu_pm_iommu_unregister);
759
Olav Haugan0c2d9322013-01-31 18:35:30 -0800760struct iommu_pmon *msm_iommu_pm_alloc(struct device *dev)
Olav Haugan99660ca2012-12-04 13:30:41 -0800761{
762 struct iommu_pmon *pmon_entry;
763 struct iommu_info *info;
764 pmon_entry = devm_kzalloc(dev, sizeof(*pmon_entry), GFP_KERNEL);
765 if (!pmon_entry)
766 return NULL;
767 info = &pmon_entry->iommu;
768 info->iommu_dev = dev;
769 mutex_init(&pmon_entry->lock);
770 iommu_pm_add_to_iommu_list(pmon_entry);
Olav Haugan0c2d9322013-01-31 18:35:30 -0800771 return pmon_entry;
Olav Haugan99660ca2012-12-04 13:30:41 -0800772}
773EXPORT_SYMBOL(msm_iommu_pm_alloc);
774
775void msm_iommu_pm_free(struct device *dev)
776{
777 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
778 if (pmon)
779 iommu_pm_del_from_iommu_list(pmon);
780}
781EXPORT_SYMBOL(msm_iommu_pm_free);
782
783void msm_iommu_attached(struct device *dev)
784{
785 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
786 if (pmon) {
787 mutex_lock(&pmon->lock);
788 ++pmon->iommu_attach_count;
789 if (pmon->iommu_attach_count == 1) {
790 /* If perf. mon was enabled before we attached we do
791 * the actual after we attach.
792 */
793 if (pmon->enabled)
794 iommu_pm_on(pmon);
795 }
796 mutex_unlock(&pmon->lock);
797 }
798}
799EXPORT_SYMBOL(msm_iommu_attached);
800
801void msm_iommu_detached(struct device *dev)
802{
803 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
804 if (pmon) {
805 mutex_lock(&pmon->lock);
806 if (pmon->iommu_attach_count == 1) {
807 /* If perf. mon is still enabled we have to disable
808 * before we do the detach.
809 */
810 if (pmon->enabled)
811 iommu_pm_off(pmon);
812 }
813 BUG_ON(pmon->iommu_attach_count == 0);
814 --pmon->iommu_attach_count;
815 mutex_unlock(&pmon->lock);
816 }
817}
818EXPORT_SYMBOL(msm_iommu_detached);
819