blob: 41df1edbcafdfdd29cb254406a2ea6ca3677ccda [file] [log] [blame]
Olav Haugan99660ca2012-12-04 13:30:41 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/iommu.h>
18#include <linux/slab.h>
19#include <linux/device.h>
20#include <linux/interrupt.h>
21#include <linux/bitops.h>
22#include <linux/debugfs.h>
Olav Haugan99660ca2012-12-04 13:30:41 -080023#include <mach/iommu.h>
24#include <mach/iommu_perfmon.h>
25
Olav Haugan99660ca2012-12-04 13:30:41 -080026static LIST_HEAD(iommu_list);
27static struct dentry *msm_iommu_root_debugfs_dir;
28static const char *NO_EVENT_CLASS_NAME = "none";
Olav Haugan99660ca2012-12-04 13:30:41 -080029static const unsigned int MAX_EVEN_CLASS_NAME_LEN = 36;
30
31struct event_class {
32 unsigned int event_number;
33 const char *desc;
34};
35
36static struct event_class pmu_event_classes[] = {
37 { 0x00, "cycle_count" },
38 { 0x01, "cycle_count64" },
39 { 0x08, "tlb_refill" },
40 { 0x09, "tlb_refill_read" },
41 { 0x0A, "tlb_refill_write" },
42 { 0x10, "access" },
43 { 0x11, "access_read" },
44 { 0x12, "access_write" },
Olav Haugan99660ca2012-12-04 13:30:41 -080045 { 0x80, "full_misses" },
46 { 0x81, "partial_miss_1lbfb_hit" },
47 { 0x82, "partial_miss_2lbfb_hit" },
48 { 0x83, "full_hit" },
49 { 0x90, "pred_req_full_miss" },
50 { 0x91, "pred_req_partial_miss_1lbfb_hit" },
51 { 0x92, "pred_req_partial_miss_2lbfb_hit" },
52 { 0xb0, "tot_num_miss_axi_htw_read_req" },
53 { 0xb1, "tot_num_pred_axi_htw_read_req" },
54};
55
Olav Haugan99660ca2012-12-04 13:30:41 -080056static unsigned int iommu_pm_create_sup_cls_str(char **buf,
Olav Haugan0c2d9322013-01-31 18:35:30 -080057 struct iommu_pmon *pmon)
Olav Haugan99660ca2012-12-04 13:30:41 -080058{
Olav Haugan0c2d9322013-01-31 18:35:30 -080059 unsigned long buf_size = ARRAY_SIZE(pmu_event_classes) *
60 MAX_EVEN_CLASS_NAME_LEN;
Olav Haugan99660ca2012-12-04 13:30:41 -080061 unsigned int pos = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -080062 unsigned int nevent_cls = pmon->nevent_cls_supported;
Olav Haugan99660ca2012-12-04 13:30:41 -080063
64 *buf = kzalloc(buf_size, GFP_KERNEL);
65 if (*buf) {
Olav Haugan0c2d9322013-01-31 18:35:30 -080066 unsigned int j;
Olav Haugan99660ca2012-12-04 13:30:41 -080067 int i;
68 struct event_class *ptr;
69 size_t array_len = ARRAY_SIZE(pmu_event_classes);
70 ptr = pmu_event_classes;
71
Olav Haugan0c2d9322013-01-31 18:35:30 -080072 for (j = 0; j < nevent_cls; ++j) {
73 for (i = 0; i < array_len; ++i) {
74
75 if (ptr[i].event_number !=
76 pmon->event_cls_supported[j])
77 continue;
78
Olav Haugan99660ca2012-12-04 13:30:41 -080079 if (pos < buf_size) {
80 pos += snprintf(&(*buf)[pos],
81 buf_size-pos,
82 "[%u] %s\n",
83 ptr[i].event_number,
84 ptr[i].desc);
85 }
Olav Haugan0c2d9322013-01-31 18:35:30 -080086 break;
Olav Haugan99660ca2012-12-04 13:30:41 -080087 }
88 }
89 }
90 return pos;
91}
92
93static const char *iommu_pm_find_event_class_name(int event_class)
94{
95 size_t array_len;
96 struct event_class *ptr;
97 int i;
98 const char *event_class_name = NO_EVENT_CLASS_NAME;
Olav Haugan0c2d9322013-01-31 18:35:30 -080099 if (event_class < 0)
Olav Haugan99660ca2012-12-04 13:30:41 -0800100 goto out;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800101
102 array_len = ARRAY_SIZE(pmu_event_classes);
103 ptr = pmu_event_classes;
Olav Haugan99660ca2012-12-04 13:30:41 -0800104
105 for (i = 0; i < array_len; ++i) {
106 if (ptr[i].event_number == event_class) {
107 event_class_name = ptr[i].desc;
108 break;
109 }
110 }
111
112out:
113 return event_class_name;
114}
115
116static int iommu_pm_find_event_class(const char *event_class_name)
117{
118 size_t array_len;
119 struct event_class *ptr;
120 int i;
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800121 int event_class = MSM_IOMMU_PMU_NO_EVENT_CLASS;
Olav Haugan99660ca2012-12-04 13:30:41 -0800122
123 if (strcmp(event_class_name, NO_EVENT_CLASS_NAME) == 0)
124 goto out;
125
126 array_len = ARRAY_SIZE(pmu_event_classes);
127 ptr = pmu_event_classes;
128
129 for (i = 0; i < array_len; ++i) {
130 if (strcmp(ptr[i].desc, event_class_name) == 0) {
131 event_class = ptr[i].event_number;
132 goto out;
133 }
134 }
135
Olav Haugan99660ca2012-12-04 13:30:41 -0800136out:
137 return event_class;
138}
139
140static inline void iommu_pm_add_to_iommu_list(struct iommu_pmon *iommu_pmon)
141{
142 list_add(&iommu_pmon->iommu_list, &iommu_list);
143}
144
145static inline void iommu_pm_del_from_iommu_list(struct iommu_pmon *iommu_pmon)
146{
147 list_del(&iommu_pmon->iommu_list);
148}
149
150static struct iommu_pmon *iommu_pm_get_pm_by_dev(struct device *dev)
151{
152 struct iommu_pmon *pmon;
153 struct iommu_info *info;
154 struct list_head *ent;
155 list_for_each(ent, &iommu_list) {
156 pmon = list_entry(ent, struct iommu_pmon, iommu_list);
157 info = &pmon->iommu;
158 if (dev == info->iommu_dev)
159 return pmon;
160 }
161 return NULL;
162}
163
Olav Haugan99660ca2012-12-04 13:30:41 -0800164static void iommu_pm_set_event_type(struct iommu_pmon *pmon,
165 struct iommu_pmon_counter *counter)
166{
167 int event_class;
168 unsigned int count_no;
169 struct iommu_info *iommu = &pmon->iommu;
170
171 event_class = counter->current_event_class;
172 count_no = counter->absolute_counter_no;
173
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800174 if (event_class == MSM_IOMMU_PMU_NO_EVENT_CLASS) {
Olav Hauganef69e892013-02-04 13:47:08 -0800175 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800176 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800177 iommu->hw_ops->counter_disable(iommu, counter);
178 iommu->hw_ops->ovfl_int_disable(iommu, counter);
179 iommu->hw_ops->set_event_class(pmon, count_no, 0);
Olav Haugan99660ca2012-12-04 13:30:41 -0800180 iommu->ops->iommu_lock_release();
181 }
182 counter->overflow_count = 0;
183 counter->value = 0;
184 } else {
185 counter->overflow_count = 0;
186 counter->value = 0;
Olav Hauganef69e892013-02-04 13:47:08 -0800187 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800188 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800189 iommu->hw_ops->set_event_class(pmon, count_no,
190 event_class);
191 iommu->hw_ops->ovfl_int_enable(iommu, counter);
192 iommu->hw_ops->counter_enable(iommu, counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800193 iommu->ops->iommu_lock_release();
194 }
195 }
196}
197
198static void iommu_pm_reset_counts(struct iommu_pmon *pmon)
199{
200 unsigned int i;
201 unsigned int j;
202 for (i = 0; i < pmon->num_groups; ++i) {
203 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
204 for (j = 0; j < cnt_grp->num_counters; ++j) {
205 cnt_grp->counters[j].value = 0;
206 cnt_grp->counters[j].overflow_count = 0;
207 }
208 }
209}
210
Olav Haugan99660ca2012-12-04 13:30:41 -0800211static void iommu_pm_set_all_counters(struct iommu_pmon *pmon)
212{
213 unsigned int i;
214 unsigned int j;
215 for (i = 0; i < pmon->num_groups; ++i) {
216 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
217 for (j = 0; j < cnt_grp->num_counters; ++j)
218 iommu_pm_set_event_type(pmon, &cnt_grp->counters[j]);
219 }
220}
221
222static void iommu_pm_read_all_counters(struct iommu_pmon *pmon)
223{
224 unsigned int i;
225 unsigned int j;
Olav Hauganef69e892013-02-04 13:47:08 -0800226 struct iommu_info *iommu = &pmon->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800227 for (i = 0; i < pmon->num_groups; ++i) {
228 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
229 for (j = 0; j < cnt_grp->num_counters; ++j) {
230 struct iommu_pmon_counter *counter;
231 counter = &cnt_grp->counters[j];
Olav Hauganef69e892013-02-04 13:47:08 -0800232 counter->value = iommu->hw_ops->read_counter(counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800233 }
234 }
235}
236
237static void iommu_pm_on(struct iommu_pmon *pmon)
238{
239 unsigned int i;
240 struct iommu_info *iommu = &pmon->iommu;
241 struct msm_iommu_drvdata *iommu_drvdata =
242 dev_get_drvdata(iommu->iommu_dev);
243
244 iommu->ops->iommu_power_on(iommu_drvdata);
245
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800246 /* Reset counters in HW */
247 iommu->ops->iommu_lock_acquire();
248 iommu->hw_ops->reset_counters(&pmon->iommu);
249 iommu->ops->iommu_lock_release();
250
251 /* Reset SW counters */
Olav Haugan99660ca2012-12-04 13:30:41 -0800252 iommu_pm_reset_counts(pmon);
253
254 pmon->enabled = 1;
255
256 iommu_pm_set_all_counters(pmon);
257
258 iommu->ops->iommu_lock_acquire();
259
260 /* enable all counter group */
261 for (i = 0; i < pmon->num_groups; ++i)
Olav Hauganef69e892013-02-04 13:47:08 -0800262 iommu->hw_ops->grp_enable(iommu, i);
Olav Haugan99660ca2012-12-04 13:30:41 -0800263
264 /* enable global counters */
Olav Hauganef69e892013-02-04 13:47:08 -0800265 iommu->hw_ops->enable_pm(iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800266 iommu->ops->iommu_lock_release();
267
268 pr_info("%s: TLB performance monitoring turned ON\n",
269 pmon->iommu.iommu_name);
270}
271
272static void iommu_pm_off(struct iommu_pmon *pmon)
273{
274 unsigned int i;
275 struct iommu_info *iommu = &pmon->iommu;
276 struct msm_iommu_drvdata *iommu_drvdata =
277 dev_get_drvdata(iommu->iommu_dev);
278
279 pmon->enabled = 0;
280
281 iommu->ops->iommu_lock_acquire();
282
283 /* disable global counters */
Olav Hauganef69e892013-02-04 13:47:08 -0800284 iommu->hw_ops->disable_pm(iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800285
286 /* Check if we overflowed just before turning off pmon */
Olav Hauganef69e892013-02-04 13:47:08 -0800287 iommu->hw_ops->check_for_overflow(pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800288
289 /* disable all counter group */
290 for (i = 0; i < pmon->num_groups; ++i)
Olav Hauganef69e892013-02-04 13:47:08 -0800291 iommu->hw_ops->grp_disable(iommu, i);
Olav Haugan99660ca2012-12-04 13:30:41 -0800292
293 /* Update cached copy of counters before turning off power */
294 iommu_pm_read_all_counters(pmon);
295
296 iommu->ops->iommu_lock_release();
297 iommu->ops->iommu_power_off(iommu_drvdata);
298
299 pr_info("%s: TLB performance monitoring turned OFF\n",
300 pmon->iommu.iommu_name);
301}
302
Olav Haugan99660ca2012-12-04 13:30:41 -0800303static int iommu_pm_debug_open(struct inode *inode, struct file *file)
304{
305 file->private_data = inode->i_private;
306 return 0;
307}
308
309static ssize_t iommu_pm_count_value_read(struct file *fp,
310 char __user *user_buff,
311 size_t count, loff_t *pos)
312{
313 size_t rd_cnt;
314 unsigned long long full_count;
315
316 struct iommu_pmon_counter *counter = fp->private_data;
317 struct iommu_pmon *pmon = counter->cnt_group->pmon;
318 struct iommu_info *iommu = &pmon->iommu;
319 char buf[50];
320 size_t len;
321
322 mutex_lock(&pmon->lock);
323
Olav Hauganef69e892013-02-04 13:47:08 -0800324 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800325 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800326 counter->value = iommu->hw_ops->read_counter(counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800327 iommu->ops->iommu_lock_release();
328 }
329 full_count = (unsigned long long) counter->value +
330 ((unsigned long long)counter->overflow_count *
331 0x100000000ULL);
332
333 len = snprintf(buf, 50, "%llu\n", full_count);
334 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
335 mutex_unlock(&pmon->lock);
336
337 return rd_cnt;
338}
339
340static const struct file_operations cnt_value_file_ops = {
341 .open = iommu_pm_debug_open,
342 .read = iommu_pm_count_value_read,
343};
344
345static ssize_t iommu_pm_event_class_read(struct file *fp,
346 char __user *user_buff,
347 size_t count, loff_t *pos)
348{
349 size_t rd_cnt;
350 struct iommu_pmon_counter *counter = fp->private_data;
351 struct iommu_pmon *pmon = counter->cnt_group->pmon;
352 char buf[50];
353 const char *event_class_name;
354 size_t len;
355
356 mutex_lock(&pmon->lock);
357 event_class_name = iommu_pm_find_event_class_name(
358 counter->current_event_class);
359 len = snprintf(buf, 50, "%s\n", event_class_name);
360
361 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
362 mutex_unlock(&pmon->lock);
363 return rd_cnt;
364}
365
366static ssize_t iommu_pm_event_class_write(struct file *fp,
367 const char __user *user_buff,
368 size_t count, loff_t *pos)
369{
370 size_t wr_cnt;
371 char buf[50];
372 size_t buf_size = sizeof(buf);
373 struct iommu_pmon_counter *counter = fp->private_data;
374 struct iommu_pmon *pmon = counter->cnt_group->pmon;
375 int current_event_class;
376
377 if ((count + *pos) >= buf_size)
378 return -EINVAL;
379
380 mutex_lock(&pmon->lock);
381 current_event_class = counter->current_event_class;
382 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
383 if (wr_cnt >= 1) {
384 int rv;
385 long value;
386 buf[wr_cnt-1] = '\0';
Olav Haugan0c2d9322013-01-31 18:35:30 -0800387 rv = kstrtol(buf, 10, &value);
Olav Haugan99660ca2012-12-04 13:30:41 -0800388 if (!rv) {
389 counter->current_event_class =
390 iommu_pm_find_event_class(
391 iommu_pm_find_event_class_name(value));
392 } else {
393 counter->current_event_class =
394 iommu_pm_find_event_class(buf);
395 } }
396
397 if (current_event_class != counter->current_event_class)
398 iommu_pm_set_event_type(pmon, counter);
399
400 mutex_unlock(&pmon->lock);
401 return wr_cnt;
402}
403
404static const struct file_operations event_class_file_ops = {
405 .open = iommu_pm_debug_open,
406 .read = iommu_pm_event_class_read,
407 .write = iommu_pm_event_class_write,
408};
409
410static ssize_t iommu_reset_counters_write(struct file *fp,
411 const char __user *user_buff,
412 size_t count, loff_t *pos)
413{
414 size_t wr_cnt;
415 char buf[10];
416 size_t buf_size = sizeof(buf);
417 struct iommu_pmon *pmon = fp->private_data;
418 struct iommu_info *iommu = &pmon->iommu;
419
420 if ((count + *pos) >= buf_size)
421 return -EINVAL;
422
423 mutex_lock(&pmon->lock);
424 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
425 if (wr_cnt >= 1) {
426 unsigned long cmd = 0;
427 int rv;
428 buf[wr_cnt-1] = '\0';
429 rv = kstrtoul(buf, 10, &cmd);
430 if (!rv && (cmd == 1)) {
Olav Hauganef69e892013-02-04 13:47:08 -0800431 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800432 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800433 iommu->hw_ops->reset_counters(&pmon->iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800434 iommu->ops->iommu_lock_release();
435 }
436 iommu_pm_reset_counts(pmon);
437 pr_info("TLB performance counters reset\n");
438 } else {
439 pr_err("Unknown performance monitor command: %lu\n",
440 cmd);
441 }
442 }
443 mutex_unlock(&pmon->lock);
444 return wr_cnt;
445}
446
447static const struct file_operations reset_file_ops = {
448 .open = iommu_pm_debug_open,
449 .write = iommu_reset_counters_write,
450};
451
452static ssize_t iommu_pm_enable_counters_read(struct file *fp,
453 char __user *user_buff,
454 size_t count, loff_t *pos)
455{
456 size_t rd_cnt;
457 char buf[5];
458 size_t len;
459 struct iommu_pmon *pmon = fp->private_data;
460
461 mutex_lock(&pmon->lock);
462 len = snprintf(buf, 5, "%u\n", pmon->enabled);
463 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
464 mutex_unlock(&pmon->lock);
465 return rd_cnt;
466}
467
468static ssize_t iommu_pm_enable_counters_write(struct file *fp,
469 const char __user *user_buff,
470 size_t count, loff_t *pos)
471{
472 size_t wr_cnt;
473 char buf[10];
474 size_t buf_size = sizeof(buf);
475 struct iommu_pmon *pmon = fp->private_data;
476
477 if ((count + *pos) >= buf_size)
478 return -EINVAL;
479
480 mutex_lock(&pmon->lock);
481 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
482 if (wr_cnt >= 1) {
483 unsigned long cmd;
484 int rv;
485 buf[wr_cnt-1] = '\0';
486 rv = kstrtoul(buf, 10, &cmd);
487 if (!rv && (cmd < 2)) {
488 if (pmon->enabled == 1 && cmd == 0) {
489 if (pmon->iommu_attach_count > 0)
490 iommu_pm_off(pmon);
491 } else if (pmon->enabled == 0 && cmd == 1) {
492 /* We can only turn on perf. monitoring if
493 * iommu is attached. Delay turning on perf.
494 * monitoring until we are attached.
495 */
496 if (pmon->iommu_attach_count > 0)
497 iommu_pm_on(pmon);
498 else
499 pmon->enabled = 1;
500 }
501 } else {
502 pr_err("Unknown performance monitor command: %lu\n",
503 cmd);
504 }
505 }
506 mutex_unlock(&pmon->lock);
507 return wr_cnt;
508}
509
510static const struct file_operations event_enable_file_ops = {
511 .open = iommu_pm_debug_open,
512 .read = iommu_pm_enable_counters_read,
513 .write = iommu_pm_enable_counters_write,
514};
515
516static ssize_t iommu_pm_avail_event_cls_read(struct file *fp,
517 char __user *user_buff,
518 size_t count, loff_t *pos)
519{
520 size_t rd_cnt = 0;
521 struct iommu_pmon *pmon = fp->private_data;
522 char *buf;
523 size_t len;
524
525 mutex_lock(&pmon->lock);
526
Olav Haugan0c2d9322013-01-31 18:35:30 -0800527 len = iommu_pm_create_sup_cls_str(&buf, pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800528 if (buf) {
529 rd_cnt = simple_read_from_buffer(user_buff, count, pos,
530 buf, len);
531 kfree(buf);
532 }
533 mutex_unlock(&pmon->lock);
534 return rd_cnt;
535}
536
537static const struct file_operations available_event_cls_file_ops = {
538 .open = iommu_pm_debug_open,
539 .read = iommu_pm_avail_event_cls_read,
540};
541
542
543
544static int iommu_pm_create_grp_debugfs_counters_hierarchy(
545 struct iommu_pmon_cnt_group *cnt_grp,
546 unsigned int *abs_counter_no)
547{
548 int ret = 0;
549 int j;
550 char name[20];
551
552 for (j = 0; j < cnt_grp->num_counters; ++j) {
553 struct dentry *grp_dir = cnt_grp->group_dir;
554 struct dentry *counter_dir;
555 cnt_grp->counters[j].cnt_group = cnt_grp;
556 cnt_grp->counters[j].counter_no = j;
557 cnt_grp->counters[j].absolute_counter_no = *abs_counter_no;
558 (*abs_counter_no)++;
559 cnt_grp->counters[j].value = 0;
560 cnt_grp->counters[j].overflow_count = 0;
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800561 cnt_grp->counters[j].current_event_class =
562 MSM_IOMMU_PMU_NO_EVENT_CLASS;
Olav Haugan99660ca2012-12-04 13:30:41 -0800563
564 snprintf(name, 20, "counter%u", j);
565
566 counter_dir = debugfs_create_dir(name, grp_dir);
567
568 if (IS_ERR_OR_NULL(counter_dir)) {
569 pr_err("unable to create counter debugfs dir %s\n",
570 name);
571 ret = -ENOMEM;
572 goto out;
573 }
574
575 cnt_grp->counters[j].counter_dir = counter_dir;
576
577 if (!debugfs_create_file("value", 0644, counter_dir,
578 &cnt_grp->counters[j],
579 &cnt_value_file_ops)) {
580 ret = -EIO;
581 goto out;
582 }
583
584 if (!debugfs_create_file("current_event_class", 0644,
585 counter_dir, &cnt_grp->counters[j],
586 &event_class_file_ops)) {
587 ret = -EIO;
588 goto out;
589 }
590 }
591out:
592 return ret;
593}
594
595static int iommu_pm_create_group_debugfs_hierarchy(struct iommu_info *iommu,
596 struct iommu_pmon *pmon_entry)
597{
598 int i;
599 int ret = 0;
600 char name[20];
601 unsigned int abs_counter_no = 0;
602
603 for (i = 0; i < pmon_entry->num_groups; ++i) {
604 pmon_entry->cnt_grp[i].pmon = pmon_entry;
605 pmon_entry->cnt_grp[i].grp_no = i;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800606 pmon_entry->cnt_grp[i].num_counters = pmon_entry->num_counters;
Olav Haugan99660ca2012-12-04 13:30:41 -0800607 pmon_entry->cnt_grp[i].counters =
608 kzalloc(sizeof(*pmon_entry->cnt_grp[i].counters)
609 * pmon_entry->cnt_grp[i].num_counters, GFP_KERNEL);
610
611 if (!pmon_entry->cnt_grp[i].counters) {
612 pr_err("Unable to allocate memory for counters\n");
613 ret = -ENOMEM;
614 goto out;
615 }
616 snprintf(name, 20, "group%u", i);
617 pmon_entry->cnt_grp[i].group_dir = debugfs_create_dir(name,
618 pmon_entry->iommu_dir);
619 if (IS_ERR_OR_NULL(pmon_entry->cnt_grp[i].group_dir)) {
620 pr_err("unable to create group debugfs dir %s\n", name);
621 ret = -ENOMEM;
622 goto out;
623 }
624
625 ret = iommu_pm_create_grp_debugfs_counters_hierarchy(
626 &pmon_entry->cnt_grp[i],
627 &abs_counter_no);
628 if (ret)
629 goto out;
630 }
631out:
632 return ret;
633}
634
Olav Haugan0c2d9322013-01-31 18:35:30 -0800635int msm_iommu_pm_iommu_register(struct iommu_pmon *pmon_entry)
Olav Haugan99660ca2012-12-04 13:30:41 -0800636{
Olav Haugan99660ca2012-12-04 13:30:41 -0800637 int ret = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800638 struct iommu_info *iommu = &pmon_entry->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800639 int i;
640
641 if (!iommu->ops || !iommu->iommu_name || !iommu->base
642 || !iommu->iommu_dev) {
643 ret = -EINVAL;
644 goto out;
645 }
646
647 if (!msm_iommu_root_debugfs_dir) {
648 msm_iommu_root_debugfs_dir = debugfs_create_dir("iommu", NULL);
649 if (IS_ERR_OR_NULL(msm_iommu_root_debugfs_dir)) {
650 pr_err("Failed creating iommu debugfs dir \"iommu\"\n");
651 ret = -EIO;
652 goto out;
653 }
654 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800655
Olav Haugan99660ca2012-12-04 13:30:41 -0800656 pmon_entry->cnt_grp = kzalloc(sizeof(*pmon_entry->cnt_grp)
657 * pmon_entry->num_groups, GFP_KERNEL);
658 if (!pmon_entry->cnt_grp) {
659 pr_err("Unable to allocate memory for counter groups\n");
660 ret = -ENOMEM;
661 goto file_err;
662 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800663 pmon_entry->iommu_dir = debugfs_create_dir(iommu->iommu_name,
664 msm_iommu_root_debugfs_dir);
665 if (IS_ERR_OR_NULL(pmon_entry->iommu_dir)) {
666 pr_err("unable to create iommu debugfs dir %s\n",
667 iommu->iommu_name);
668 ret = -ENOMEM;
669 goto free_mem;
670 }
671
672 if (!debugfs_create_file("reset_counters", 0644,
673 pmon_entry->iommu_dir, pmon_entry, &reset_file_ops)) {
674 ret = -EIO;
675 goto free_mem;
676 }
677
678 if (!debugfs_create_file("enable_counters", 0644,
679 pmon_entry->iommu_dir, pmon_entry, &event_enable_file_ops)) {
680 ret = -EIO;
681 goto free_mem;
682 }
683
684 if (!debugfs_create_file("available_event_classes", 0644,
685 pmon_entry->iommu_dir, pmon_entry,
686 &available_event_cls_file_ops)) {
687 ret = -EIO;
688 goto free_mem;
689 }
690
691 ret = iommu_pm_create_group_debugfs_hierarchy(iommu, pmon_entry);
692 if (ret)
693 goto free_mem;
694
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800695 iommu->hw_ops->initialize_hw(pmon_entry);
696
Olav Haugan99660ca2012-12-04 13:30:41 -0800697 if (iommu->evt_irq > 0) {
698 ret = request_threaded_irq(iommu->evt_irq, NULL,
Olav Hauganef69e892013-02-04 13:47:08 -0800699 iommu->hw_ops->evt_ovfl_int_handler,
Olav Haugan99660ca2012-12-04 13:30:41 -0800700 IRQF_ONESHOT | IRQF_SHARED,
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800701 "msm_iommu_pmon_nonsecure_irq", pmon_entry);
Olav Haugan99660ca2012-12-04 13:30:41 -0800702 if (ret) {
703 pr_err("Request IRQ %d failed with ret=%d\n",
704 iommu->evt_irq,
705 ret);
706 goto free_mem;
707 }
708 } else {
709 pr_info("%s: Overflow interrupt not available\n", __func__);
710 }
711
Olav Haugan0c2d9322013-01-31 18:35:30 -0800712 dev_dbg(iommu->iommu_dev, "%s iommu registered\n", iommu->iommu_name);
Olav Haugan99660ca2012-12-04 13:30:41 -0800713
714 goto out;
715free_mem:
716 if (pmon_entry->cnt_grp) {
717 for (i = 0; i < pmon_entry->num_groups; ++i) {
718 kfree(pmon_entry->cnt_grp[i].counters);
719 pmon_entry->cnt_grp[i].counters = 0;
720 }
721 }
722 kfree(pmon_entry->cnt_grp);
723 pmon_entry->cnt_grp = 0;
724file_err:
725 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
726out:
727 return ret;
728}
729EXPORT_SYMBOL(msm_iommu_pm_iommu_register);
730
731void msm_iommu_pm_iommu_unregister(struct device *dev)
732{
733 int i;
734 struct iommu_pmon *pmon_entry = iommu_pm_get_pm_by_dev(dev);
735
736 if (!pmon_entry)
737 return;
738
739 free_irq(pmon_entry->iommu.evt_irq, pmon_entry->iommu.iommu_dev);
740
741 if (!pmon_entry)
742 goto remove_debugfs;
743
744 if (pmon_entry->cnt_grp) {
745 for (i = 0; i < pmon_entry->num_groups; ++i)
746 kfree(pmon_entry->cnt_grp[i].counters);
747 }
748
749 kfree(pmon_entry->cnt_grp);
750
751remove_debugfs:
752 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
753
754 return;
755}
756EXPORT_SYMBOL(msm_iommu_pm_iommu_unregister);
757
Olav Haugan0c2d9322013-01-31 18:35:30 -0800758struct iommu_pmon *msm_iommu_pm_alloc(struct device *dev)
Olav Haugan99660ca2012-12-04 13:30:41 -0800759{
760 struct iommu_pmon *pmon_entry;
761 struct iommu_info *info;
762 pmon_entry = devm_kzalloc(dev, sizeof(*pmon_entry), GFP_KERNEL);
763 if (!pmon_entry)
764 return NULL;
765 info = &pmon_entry->iommu;
766 info->iommu_dev = dev;
767 mutex_init(&pmon_entry->lock);
768 iommu_pm_add_to_iommu_list(pmon_entry);
Olav Haugan0c2d9322013-01-31 18:35:30 -0800769 return pmon_entry;
Olav Haugan99660ca2012-12-04 13:30:41 -0800770}
771EXPORT_SYMBOL(msm_iommu_pm_alloc);
772
773void msm_iommu_pm_free(struct device *dev)
774{
775 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
776 if (pmon)
777 iommu_pm_del_from_iommu_list(pmon);
778}
779EXPORT_SYMBOL(msm_iommu_pm_free);
780
781void msm_iommu_attached(struct device *dev)
782{
783 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
784 if (pmon) {
785 mutex_lock(&pmon->lock);
786 ++pmon->iommu_attach_count;
787 if (pmon->iommu_attach_count == 1) {
788 /* If perf. mon was enabled before we attached we do
789 * the actual after we attach.
790 */
791 if (pmon->enabled)
792 iommu_pm_on(pmon);
793 }
794 mutex_unlock(&pmon->lock);
795 }
796}
797EXPORT_SYMBOL(msm_iommu_attached);
798
799void msm_iommu_detached(struct device *dev)
800{
801 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
802 if (pmon) {
803 mutex_lock(&pmon->lock);
804 if (pmon->iommu_attach_count == 1) {
805 /* If perf. mon is still enabled we have to disable
806 * before we do the detach.
807 */
808 if (pmon->enabled)
809 iommu_pm_off(pmon);
810 }
811 BUG_ON(pmon->iommu_attach_count == 0);
812 --pmon->iommu_attach_count;
813 mutex_unlock(&pmon->lock);
814 }
815}
816EXPORT_SYMBOL(msm_iommu_detached);
817