blob: 926570698f14bb55f4aeb0cbfe243a3d48dac869 [file] [log] [blame]
Olav Haugan99660ca2012-12-04 13:30:41 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/iommu.h>
18#include <linux/slab.h>
19#include <linux/device.h>
20#include <linux/interrupt.h>
21#include <linux/bitops.h>
22#include <linux/debugfs.h>
Olav Haugan99660ca2012-12-04 13:30:41 -080023#include <mach/iommu.h>
24#include <mach/iommu_perfmon.h>
25
Olav Haugan99660ca2012-12-04 13:30:41 -080026static LIST_HEAD(iommu_list);
27static struct dentry *msm_iommu_root_debugfs_dir;
28static const char *NO_EVENT_CLASS_NAME = "none";
Olav Haugan99660ca2012-12-04 13:30:41 -080029static const unsigned int MAX_EVEN_CLASS_NAME_LEN = 36;
30
31struct event_class {
32 unsigned int event_number;
33 const char *desc;
34};
35
36static struct event_class pmu_event_classes[] = {
37 { 0x00, "cycle_count" },
38 { 0x01, "cycle_count64" },
39 { 0x08, "tlb_refill" },
40 { 0x09, "tlb_refill_read" },
41 { 0x0A, "tlb_refill_write" },
42 { 0x10, "access" },
43 { 0x11, "access_read" },
44 { 0x12, "access_write" },
Olav Haugan99660ca2012-12-04 13:30:41 -080045 { 0x80, "full_misses" },
46 { 0x81, "partial_miss_1lbfb_hit" },
47 { 0x82, "partial_miss_2lbfb_hit" },
48 { 0x83, "full_hit" },
49 { 0x90, "pred_req_full_miss" },
50 { 0x91, "pred_req_partial_miss_1lbfb_hit" },
51 { 0x92, "pred_req_partial_miss_2lbfb_hit" },
52 { 0xb0, "tot_num_miss_axi_htw_read_req" },
53 { 0xb1, "tot_num_pred_axi_htw_read_req" },
54};
55
Olav Haugan99660ca2012-12-04 13:30:41 -080056static unsigned int iommu_pm_create_sup_cls_str(char **buf,
Olav Haugan0c2d9322013-01-31 18:35:30 -080057 struct iommu_pmon *pmon)
Olav Haugan99660ca2012-12-04 13:30:41 -080058{
Olav Haugan0c2d9322013-01-31 18:35:30 -080059 unsigned long buf_size = ARRAY_SIZE(pmu_event_classes) *
60 MAX_EVEN_CLASS_NAME_LEN;
Olav Haugan99660ca2012-12-04 13:30:41 -080061 unsigned int pos = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -080062 unsigned int nevent_cls = pmon->nevent_cls_supported;
Olav Haugan99660ca2012-12-04 13:30:41 -080063
64 *buf = kzalloc(buf_size, GFP_KERNEL);
65 if (*buf) {
Olav Haugan0c2d9322013-01-31 18:35:30 -080066 unsigned int j;
Olav Haugan99660ca2012-12-04 13:30:41 -080067 int i;
68 struct event_class *ptr;
69 size_t array_len = ARRAY_SIZE(pmu_event_classes);
70 ptr = pmu_event_classes;
71
Olav Haugan0c2d9322013-01-31 18:35:30 -080072 for (j = 0; j < nevent_cls; ++j) {
73 for (i = 0; i < array_len; ++i) {
74
75 if (ptr[i].event_number !=
76 pmon->event_cls_supported[j])
77 continue;
78
Olav Haugan99660ca2012-12-04 13:30:41 -080079 if (pos < buf_size) {
80 pos += snprintf(&(*buf)[pos],
81 buf_size-pos,
82 "[%u] %s\n",
83 ptr[i].event_number,
84 ptr[i].desc);
85 }
Olav Haugan0c2d9322013-01-31 18:35:30 -080086 break;
Olav Haugan99660ca2012-12-04 13:30:41 -080087 }
88 }
89 }
90 return pos;
91}
92
Olav Haugan14468272013-04-11 16:07:46 -070093static int iommu_pm_event_class_supported(struct iommu_pmon *pmon,
94 int event_class)
95{
96 unsigned int nevent_cls = pmon->nevent_cls_supported;
97 unsigned int i;
98
99 for (i = 0; i < nevent_cls; ++i) {
100 if (event_class == pmon->event_cls_supported[i])
101 return event_class;
102 }
103 return MSM_IOMMU_PMU_NO_EVENT_CLASS;
104}
105
Olav Haugan99660ca2012-12-04 13:30:41 -0800106static const char *iommu_pm_find_event_class_name(int event_class)
107{
108 size_t array_len;
109 struct event_class *ptr;
110 int i;
111 const char *event_class_name = NO_EVENT_CLASS_NAME;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800112 if (event_class < 0)
Olav Haugan99660ca2012-12-04 13:30:41 -0800113 goto out;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800114
115 array_len = ARRAY_SIZE(pmu_event_classes);
116 ptr = pmu_event_classes;
Olav Haugan99660ca2012-12-04 13:30:41 -0800117
118 for (i = 0; i < array_len; ++i) {
119 if (ptr[i].event_number == event_class) {
120 event_class_name = ptr[i].desc;
121 break;
122 }
123 }
124
125out:
126 return event_class_name;
127}
128
Olav Haugan14468272013-04-11 16:07:46 -0700129static int iommu_pm_find_event_class(struct iommu_pmon *pmon,
130 const char *event_class_name)
Olav Haugan99660ca2012-12-04 13:30:41 -0800131{
132 size_t array_len;
133 struct event_class *ptr;
134 int i;
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800135 int event_class = MSM_IOMMU_PMU_NO_EVENT_CLASS;
Olav Haugan99660ca2012-12-04 13:30:41 -0800136
137 if (strcmp(event_class_name, NO_EVENT_CLASS_NAME) == 0)
138 goto out;
139
140 array_len = ARRAY_SIZE(pmu_event_classes);
141 ptr = pmu_event_classes;
142
143 for (i = 0; i < array_len; ++i) {
144 if (strcmp(ptr[i].desc, event_class_name) == 0) {
145 event_class = ptr[i].event_number;
146 goto out;
147 }
148 }
149
Olav Haugan99660ca2012-12-04 13:30:41 -0800150out:
Olav Haugan14468272013-04-11 16:07:46 -0700151 event_class = iommu_pm_event_class_supported(pmon, event_class);
Olav Haugan99660ca2012-12-04 13:30:41 -0800152 return event_class;
153}
154
155static inline void iommu_pm_add_to_iommu_list(struct iommu_pmon *iommu_pmon)
156{
157 list_add(&iommu_pmon->iommu_list, &iommu_list);
158}
159
160static inline void iommu_pm_del_from_iommu_list(struct iommu_pmon *iommu_pmon)
161{
162 list_del(&iommu_pmon->iommu_list);
163}
164
165static struct iommu_pmon *iommu_pm_get_pm_by_dev(struct device *dev)
166{
167 struct iommu_pmon *pmon;
168 struct iommu_info *info;
169 struct list_head *ent;
170 list_for_each(ent, &iommu_list) {
171 pmon = list_entry(ent, struct iommu_pmon, iommu_list);
172 info = &pmon->iommu;
173 if (dev == info->iommu_dev)
174 return pmon;
175 }
176 return NULL;
177}
178
Olav Haugan99660ca2012-12-04 13:30:41 -0800179static void iommu_pm_set_event_type(struct iommu_pmon *pmon,
180 struct iommu_pmon_counter *counter)
181{
182 int event_class;
183 unsigned int count_no;
184 struct iommu_info *iommu = &pmon->iommu;
185
186 event_class = counter->current_event_class;
187 count_no = counter->absolute_counter_no;
188
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800189 if (event_class == MSM_IOMMU_PMU_NO_EVENT_CLASS) {
Olav Hauganef69e892013-02-04 13:47:08 -0800190 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800191 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800192 iommu->hw_ops->counter_disable(iommu, counter);
193 iommu->hw_ops->ovfl_int_disable(iommu, counter);
194 iommu->hw_ops->set_event_class(pmon, count_no, 0);
Olav Haugan99660ca2012-12-04 13:30:41 -0800195 iommu->ops->iommu_lock_release();
196 }
197 counter->overflow_count = 0;
198 counter->value = 0;
199 } else {
200 counter->overflow_count = 0;
201 counter->value = 0;
Olav Hauganef69e892013-02-04 13:47:08 -0800202 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800203 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800204 iommu->hw_ops->set_event_class(pmon, count_no,
205 event_class);
206 iommu->hw_ops->ovfl_int_enable(iommu, counter);
207 iommu->hw_ops->counter_enable(iommu, counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800208 iommu->ops->iommu_lock_release();
209 }
210 }
211}
212
213static void iommu_pm_reset_counts(struct iommu_pmon *pmon)
214{
215 unsigned int i;
216 unsigned int j;
217 for (i = 0; i < pmon->num_groups; ++i) {
218 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
219 for (j = 0; j < cnt_grp->num_counters; ++j) {
220 cnt_grp->counters[j].value = 0;
221 cnt_grp->counters[j].overflow_count = 0;
222 }
223 }
224}
225
Olav Haugan99660ca2012-12-04 13:30:41 -0800226static void iommu_pm_set_all_counters(struct iommu_pmon *pmon)
227{
228 unsigned int i;
229 unsigned int j;
230 for (i = 0; i < pmon->num_groups; ++i) {
231 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
232 for (j = 0; j < cnt_grp->num_counters; ++j)
233 iommu_pm_set_event_type(pmon, &cnt_grp->counters[j]);
234 }
235}
236
237static void iommu_pm_read_all_counters(struct iommu_pmon *pmon)
238{
239 unsigned int i;
240 unsigned int j;
Olav Hauganef69e892013-02-04 13:47:08 -0800241 struct iommu_info *iommu = &pmon->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800242 for (i = 0; i < pmon->num_groups; ++i) {
243 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
244 for (j = 0; j < cnt_grp->num_counters; ++j) {
245 struct iommu_pmon_counter *counter;
246 counter = &cnt_grp->counters[j];
Olav Hauganef69e892013-02-04 13:47:08 -0800247 counter->value = iommu->hw_ops->read_counter(counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800248 }
249 }
250}
251
252static void iommu_pm_on(struct iommu_pmon *pmon)
253{
254 unsigned int i;
255 struct iommu_info *iommu = &pmon->iommu;
256 struct msm_iommu_drvdata *iommu_drvdata =
257 dev_get_drvdata(iommu->iommu_dev);
258
259 iommu->ops->iommu_power_on(iommu_drvdata);
Olav Hauganeece7e52013-04-02 10:22:21 -0700260 iommu->ops->iommu_clk_on(iommu_drvdata);
Olav Haugan99660ca2012-12-04 13:30:41 -0800261
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800262 /* Reset counters in HW */
263 iommu->ops->iommu_lock_acquire();
264 iommu->hw_ops->reset_counters(&pmon->iommu);
265 iommu->ops->iommu_lock_release();
266
267 /* Reset SW counters */
Olav Haugan99660ca2012-12-04 13:30:41 -0800268 iommu_pm_reset_counts(pmon);
269
270 pmon->enabled = 1;
271
272 iommu_pm_set_all_counters(pmon);
273
274 iommu->ops->iommu_lock_acquire();
275
276 /* enable all counter group */
277 for (i = 0; i < pmon->num_groups; ++i)
Olav Hauganef69e892013-02-04 13:47:08 -0800278 iommu->hw_ops->grp_enable(iommu, i);
Olav Haugan99660ca2012-12-04 13:30:41 -0800279
280 /* enable global counters */
Olav Hauganef69e892013-02-04 13:47:08 -0800281 iommu->hw_ops->enable_pm(iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800282 iommu->ops->iommu_lock_release();
283
284 pr_info("%s: TLB performance monitoring turned ON\n",
285 pmon->iommu.iommu_name);
286}
287
288static void iommu_pm_off(struct iommu_pmon *pmon)
289{
290 unsigned int i;
291 struct iommu_info *iommu = &pmon->iommu;
292 struct msm_iommu_drvdata *iommu_drvdata =
293 dev_get_drvdata(iommu->iommu_dev);
294
295 pmon->enabled = 0;
296
297 iommu->ops->iommu_lock_acquire();
298
299 /* disable global counters */
Olav Hauganef69e892013-02-04 13:47:08 -0800300 iommu->hw_ops->disable_pm(iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800301
302 /* Check if we overflowed just before turning off pmon */
Olav Hauganef69e892013-02-04 13:47:08 -0800303 iommu->hw_ops->check_for_overflow(pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800304
305 /* disable all counter group */
306 for (i = 0; i < pmon->num_groups; ++i)
Olav Hauganef69e892013-02-04 13:47:08 -0800307 iommu->hw_ops->grp_disable(iommu, i);
Olav Haugan99660ca2012-12-04 13:30:41 -0800308
309 /* Update cached copy of counters before turning off power */
310 iommu_pm_read_all_counters(pmon);
311
312 iommu->ops->iommu_lock_release();
Olav Hauganeece7e52013-04-02 10:22:21 -0700313 iommu->ops->iommu_clk_off(iommu_drvdata);
Olav Haugan99660ca2012-12-04 13:30:41 -0800314 iommu->ops->iommu_power_off(iommu_drvdata);
315
316 pr_info("%s: TLB performance monitoring turned OFF\n",
317 pmon->iommu.iommu_name);
318}
319
Olav Haugan99660ca2012-12-04 13:30:41 -0800320static int iommu_pm_debug_open(struct inode *inode, struct file *file)
321{
322 file->private_data = inode->i_private;
323 return 0;
324}
325
326static ssize_t iommu_pm_count_value_read(struct file *fp,
327 char __user *user_buff,
328 size_t count, loff_t *pos)
329{
330 size_t rd_cnt;
331 unsigned long long full_count;
332
333 struct iommu_pmon_counter *counter = fp->private_data;
334 struct iommu_pmon *pmon = counter->cnt_group->pmon;
335 struct iommu_info *iommu = &pmon->iommu;
336 char buf[50];
337 size_t len;
338
339 mutex_lock(&pmon->lock);
340
Olav Hauganef69e892013-02-04 13:47:08 -0800341 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800342 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800343 counter->value = iommu->hw_ops->read_counter(counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800344 iommu->ops->iommu_lock_release();
345 }
346 full_count = (unsigned long long) counter->value +
347 ((unsigned long long)counter->overflow_count *
348 0x100000000ULL);
349
350 len = snprintf(buf, 50, "%llu\n", full_count);
351 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
352 mutex_unlock(&pmon->lock);
353
354 return rd_cnt;
355}
356
357static const struct file_operations cnt_value_file_ops = {
358 .open = iommu_pm_debug_open,
359 .read = iommu_pm_count_value_read,
360};
361
362static ssize_t iommu_pm_event_class_read(struct file *fp,
363 char __user *user_buff,
364 size_t count, loff_t *pos)
365{
366 size_t rd_cnt;
367 struct iommu_pmon_counter *counter = fp->private_data;
368 struct iommu_pmon *pmon = counter->cnt_group->pmon;
369 char buf[50];
370 const char *event_class_name;
371 size_t len;
372
373 mutex_lock(&pmon->lock);
374 event_class_name = iommu_pm_find_event_class_name(
375 counter->current_event_class);
376 len = snprintf(buf, 50, "%s\n", event_class_name);
377
378 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
379 mutex_unlock(&pmon->lock);
380 return rd_cnt;
381}
382
383static ssize_t iommu_pm_event_class_write(struct file *fp,
384 const char __user *user_buff,
385 size_t count, loff_t *pos)
386{
387 size_t wr_cnt;
388 char buf[50];
389 size_t buf_size = sizeof(buf);
390 struct iommu_pmon_counter *counter = fp->private_data;
391 struct iommu_pmon *pmon = counter->cnt_group->pmon;
392 int current_event_class;
393
394 if ((count + *pos) >= buf_size)
395 return -EINVAL;
396
397 mutex_lock(&pmon->lock);
398 current_event_class = counter->current_event_class;
399 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
400 if (wr_cnt >= 1) {
401 int rv;
402 long value;
403 buf[wr_cnt-1] = '\0';
Olav Haugan0c2d9322013-01-31 18:35:30 -0800404 rv = kstrtol(buf, 10, &value);
Olav Haugan99660ca2012-12-04 13:30:41 -0800405 if (!rv) {
406 counter->current_event_class =
Olav Haugan14468272013-04-11 16:07:46 -0700407 iommu_pm_find_event_class(pmon,
Olav Haugan99660ca2012-12-04 13:30:41 -0800408 iommu_pm_find_event_class_name(value));
409 } else {
410 counter->current_event_class =
Olav Haugan14468272013-04-11 16:07:46 -0700411 iommu_pm_find_event_class(pmon, buf);
Olav Haugan99660ca2012-12-04 13:30:41 -0800412 } }
413
414 if (current_event_class != counter->current_event_class)
415 iommu_pm_set_event_type(pmon, counter);
416
417 mutex_unlock(&pmon->lock);
418 return wr_cnt;
419}
420
421static const struct file_operations event_class_file_ops = {
422 .open = iommu_pm_debug_open,
423 .read = iommu_pm_event_class_read,
424 .write = iommu_pm_event_class_write,
425};
426
427static ssize_t iommu_reset_counters_write(struct file *fp,
428 const char __user *user_buff,
429 size_t count, loff_t *pos)
430{
431 size_t wr_cnt;
432 char buf[10];
433 size_t buf_size = sizeof(buf);
434 struct iommu_pmon *pmon = fp->private_data;
435 struct iommu_info *iommu = &pmon->iommu;
436
437 if ((count + *pos) >= buf_size)
438 return -EINVAL;
439
440 mutex_lock(&pmon->lock);
441 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
442 if (wr_cnt >= 1) {
443 unsigned long cmd = 0;
444 int rv;
445 buf[wr_cnt-1] = '\0';
446 rv = kstrtoul(buf, 10, &cmd);
447 if (!rv && (cmd == 1)) {
Olav Hauganef69e892013-02-04 13:47:08 -0800448 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Haugan99660ca2012-12-04 13:30:41 -0800449 iommu->ops->iommu_lock_acquire();
Olav Hauganef69e892013-02-04 13:47:08 -0800450 iommu->hw_ops->reset_counters(&pmon->iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800451 iommu->ops->iommu_lock_release();
452 }
453 iommu_pm_reset_counts(pmon);
454 pr_info("TLB performance counters reset\n");
455 } else {
456 pr_err("Unknown performance monitor command: %lu\n",
457 cmd);
458 }
459 }
460 mutex_unlock(&pmon->lock);
461 return wr_cnt;
462}
463
464static const struct file_operations reset_file_ops = {
465 .open = iommu_pm_debug_open,
466 .write = iommu_reset_counters_write,
467};
468
469static ssize_t iommu_pm_enable_counters_read(struct file *fp,
470 char __user *user_buff,
471 size_t count, loff_t *pos)
472{
473 size_t rd_cnt;
474 char buf[5];
475 size_t len;
476 struct iommu_pmon *pmon = fp->private_data;
477
478 mutex_lock(&pmon->lock);
479 len = snprintf(buf, 5, "%u\n", pmon->enabled);
480 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
481 mutex_unlock(&pmon->lock);
482 return rd_cnt;
483}
484
485static ssize_t iommu_pm_enable_counters_write(struct file *fp,
486 const char __user *user_buff,
487 size_t count, loff_t *pos)
488{
489 size_t wr_cnt;
490 char buf[10];
491 size_t buf_size = sizeof(buf);
492 struct iommu_pmon *pmon = fp->private_data;
493
494 if ((count + *pos) >= buf_size)
495 return -EINVAL;
496
497 mutex_lock(&pmon->lock);
498 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
499 if (wr_cnt >= 1) {
500 unsigned long cmd;
501 int rv;
502 buf[wr_cnt-1] = '\0';
503 rv = kstrtoul(buf, 10, &cmd);
504 if (!rv && (cmd < 2)) {
505 if (pmon->enabled == 1 && cmd == 0) {
506 if (pmon->iommu_attach_count > 0)
507 iommu_pm_off(pmon);
508 } else if (pmon->enabled == 0 && cmd == 1) {
509 /* We can only turn on perf. monitoring if
510 * iommu is attached. Delay turning on perf.
511 * monitoring until we are attached.
512 */
513 if (pmon->iommu_attach_count > 0)
514 iommu_pm_on(pmon);
515 else
516 pmon->enabled = 1;
517 }
518 } else {
519 pr_err("Unknown performance monitor command: %lu\n",
520 cmd);
521 }
522 }
523 mutex_unlock(&pmon->lock);
524 return wr_cnt;
525}
526
527static const struct file_operations event_enable_file_ops = {
528 .open = iommu_pm_debug_open,
529 .read = iommu_pm_enable_counters_read,
530 .write = iommu_pm_enable_counters_write,
531};
532
533static ssize_t iommu_pm_avail_event_cls_read(struct file *fp,
534 char __user *user_buff,
535 size_t count, loff_t *pos)
536{
537 size_t rd_cnt = 0;
538 struct iommu_pmon *pmon = fp->private_data;
539 char *buf;
540 size_t len;
541
542 mutex_lock(&pmon->lock);
543
Olav Haugan0c2d9322013-01-31 18:35:30 -0800544 len = iommu_pm_create_sup_cls_str(&buf, pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800545 if (buf) {
546 rd_cnt = simple_read_from_buffer(user_buff, count, pos,
547 buf, len);
548 kfree(buf);
549 }
550 mutex_unlock(&pmon->lock);
551 return rd_cnt;
552}
553
554static const struct file_operations available_event_cls_file_ops = {
555 .open = iommu_pm_debug_open,
556 .read = iommu_pm_avail_event_cls_read,
557};
558
559
560
561static int iommu_pm_create_grp_debugfs_counters_hierarchy(
562 struct iommu_pmon_cnt_group *cnt_grp,
563 unsigned int *abs_counter_no)
564{
565 int ret = 0;
566 int j;
567 char name[20];
568
569 for (j = 0; j < cnt_grp->num_counters; ++j) {
570 struct dentry *grp_dir = cnt_grp->group_dir;
571 struct dentry *counter_dir;
572 cnt_grp->counters[j].cnt_group = cnt_grp;
573 cnt_grp->counters[j].counter_no = j;
574 cnt_grp->counters[j].absolute_counter_no = *abs_counter_no;
575 (*abs_counter_no)++;
576 cnt_grp->counters[j].value = 0;
577 cnt_grp->counters[j].overflow_count = 0;
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800578 cnt_grp->counters[j].current_event_class =
579 MSM_IOMMU_PMU_NO_EVENT_CLASS;
Olav Haugan99660ca2012-12-04 13:30:41 -0800580
581 snprintf(name, 20, "counter%u", j);
582
583 counter_dir = debugfs_create_dir(name, grp_dir);
584
585 if (IS_ERR_OR_NULL(counter_dir)) {
586 pr_err("unable to create counter debugfs dir %s\n",
587 name);
588 ret = -ENOMEM;
589 goto out;
590 }
591
592 cnt_grp->counters[j].counter_dir = counter_dir;
593
594 if (!debugfs_create_file("value", 0644, counter_dir,
595 &cnt_grp->counters[j],
596 &cnt_value_file_ops)) {
597 ret = -EIO;
598 goto out;
599 }
600
601 if (!debugfs_create_file("current_event_class", 0644,
602 counter_dir, &cnt_grp->counters[j],
603 &event_class_file_ops)) {
604 ret = -EIO;
605 goto out;
606 }
607 }
608out:
609 return ret;
610}
611
612static int iommu_pm_create_group_debugfs_hierarchy(struct iommu_info *iommu,
613 struct iommu_pmon *pmon_entry)
614{
615 int i;
616 int ret = 0;
617 char name[20];
618 unsigned int abs_counter_no = 0;
619
620 for (i = 0; i < pmon_entry->num_groups; ++i) {
621 pmon_entry->cnt_grp[i].pmon = pmon_entry;
622 pmon_entry->cnt_grp[i].grp_no = i;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800623 pmon_entry->cnt_grp[i].num_counters = pmon_entry->num_counters;
Olav Haugan99660ca2012-12-04 13:30:41 -0800624 pmon_entry->cnt_grp[i].counters =
625 kzalloc(sizeof(*pmon_entry->cnt_grp[i].counters)
626 * pmon_entry->cnt_grp[i].num_counters, GFP_KERNEL);
627
628 if (!pmon_entry->cnt_grp[i].counters) {
629 pr_err("Unable to allocate memory for counters\n");
630 ret = -ENOMEM;
631 goto out;
632 }
633 snprintf(name, 20, "group%u", i);
634 pmon_entry->cnt_grp[i].group_dir = debugfs_create_dir(name,
635 pmon_entry->iommu_dir);
636 if (IS_ERR_OR_NULL(pmon_entry->cnt_grp[i].group_dir)) {
637 pr_err("unable to create group debugfs dir %s\n", name);
638 ret = -ENOMEM;
639 goto out;
640 }
641
642 ret = iommu_pm_create_grp_debugfs_counters_hierarchy(
643 &pmon_entry->cnt_grp[i],
644 &abs_counter_no);
645 if (ret)
646 goto out;
647 }
648out:
649 return ret;
650}
651
Olav Haugan0c2d9322013-01-31 18:35:30 -0800652int msm_iommu_pm_iommu_register(struct iommu_pmon *pmon_entry)
Olav Haugan99660ca2012-12-04 13:30:41 -0800653{
Olav Haugan99660ca2012-12-04 13:30:41 -0800654 int ret = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800655 struct iommu_info *iommu = &pmon_entry->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800656 int i;
657
658 if (!iommu->ops || !iommu->iommu_name || !iommu->base
659 || !iommu->iommu_dev) {
660 ret = -EINVAL;
661 goto out;
662 }
663
664 if (!msm_iommu_root_debugfs_dir) {
665 msm_iommu_root_debugfs_dir = debugfs_create_dir("iommu", NULL);
666 if (IS_ERR_OR_NULL(msm_iommu_root_debugfs_dir)) {
667 pr_err("Failed creating iommu debugfs dir \"iommu\"\n");
668 ret = -EIO;
669 goto out;
670 }
671 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800672
Olav Haugan99660ca2012-12-04 13:30:41 -0800673 pmon_entry->cnt_grp = kzalloc(sizeof(*pmon_entry->cnt_grp)
674 * pmon_entry->num_groups, GFP_KERNEL);
675 if (!pmon_entry->cnt_grp) {
676 pr_err("Unable to allocate memory for counter groups\n");
677 ret = -ENOMEM;
678 goto file_err;
679 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800680 pmon_entry->iommu_dir = debugfs_create_dir(iommu->iommu_name,
681 msm_iommu_root_debugfs_dir);
682 if (IS_ERR_OR_NULL(pmon_entry->iommu_dir)) {
683 pr_err("unable to create iommu debugfs dir %s\n",
684 iommu->iommu_name);
685 ret = -ENOMEM;
686 goto free_mem;
687 }
688
689 if (!debugfs_create_file("reset_counters", 0644,
690 pmon_entry->iommu_dir, pmon_entry, &reset_file_ops)) {
691 ret = -EIO;
692 goto free_mem;
693 }
694
695 if (!debugfs_create_file("enable_counters", 0644,
696 pmon_entry->iommu_dir, pmon_entry, &event_enable_file_ops)) {
697 ret = -EIO;
698 goto free_mem;
699 }
700
701 if (!debugfs_create_file("available_event_classes", 0644,
702 pmon_entry->iommu_dir, pmon_entry,
703 &available_event_cls_file_ops)) {
704 ret = -EIO;
705 goto free_mem;
706 }
707
708 ret = iommu_pm_create_group_debugfs_hierarchy(iommu, pmon_entry);
709 if (ret)
710 goto free_mem;
711
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800712 iommu->hw_ops->initialize_hw(pmon_entry);
713
Olav Haugan99660ca2012-12-04 13:30:41 -0800714 if (iommu->evt_irq > 0) {
715 ret = request_threaded_irq(iommu->evt_irq, NULL,
Olav Hauganef69e892013-02-04 13:47:08 -0800716 iommu->hw_ops->evt_ovfl_int_handler,
Olav Haugan99660ca2012-12-04 13:30:41 -0800717 IRQF_ONESHOT | IRQF_SHARED,
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800718 "msm_iommu_pmon_nonsecure_irq", pmon_entry);
Olav Haugan99660ca2012-12-04 13:30:41 -0800719 if (ret) {
720 pr_err("Request IRQ %d failed with ret=%d\n",
721 iommu->evt_irq,
722 ret);
723 goto free_mem;
724 }
725 } else {
726 pr_info("%s: Overflow interrupt not available\n", __func__);
727 }
728
Olav Haugan0c2d9322013-01-31 18:35:30 -0800729 dev_dbg(iommu->iommu_dev, "%s iommu registered\n", iommu->iommu_name);
Olav Haugan99660ca2012-12-04 13:30:41 -0800730
731 goto out;
732free_mem:
733 if (pmon_entry->cnt_grp) {
734 for (i = 0; i < pmon_entry->num_groups; ++i) {
735 kfree(pmon_entry->cnt_grp[i].counters);
736 pmon_entry->cnt_grp[i].counters = 0;
737 }
738 }
739 kfree(pmon_entry->cnt_grp);
740 pmon_entry->cnt_grp = 0;
741file_err:
742 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
743out:
744 return ret;
745}
746EXPORT_SYMBOL(msm_iommu_pm_iommu_register);
747
748void msm_iommu_pm_iommu_unregister(struct device *dev)
749{
750 int i;
751 struct iommu_pmon *pmon_entry = iommu_pm_get_pm_by_dev(dev);
752
753 if (!pmon_entry)
754 return;
755
756 free_irq(pmon_entry->iommu.evt_irq, pmon_entry->iommu.iommu_dev);
757
758 if (!pmon_entry)
759 goto remove_debugfs;
760
761 if (pmon_entry->cnt_grp) {
762 for (i = 0; i < pmon_entry->num_groups; ++i)
763 kfree(pmon_entry->cnt_grp[i].counters);
764 }
765
766 kfree(pmon_entry->cnt_grp);
767
768remove_debugfs:
769 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
770
771 return;
772}
773EXPORT_SYMBOL(msm_iommu_pm_iommu_unregister);
774
Olav Haugan0c2d9322013-01-31 18:35:30 -0800775struct iommu_pmon *msm_iommu_pm_alloc(struct device *dev)
Olav Haugan99660ca2012-12-04 13:30:41 -0800776{
777 struct iommu_pmon *pmon_entry;
778 struct iommu_info *info;
779 pmon_entry = devm_kzalloc(dev, sizeof(*pmon_entry), GFP_KERNEL);
780 if (!pmon_entry)
781 return NULL;
782 info = &pmon_entry->iommu;
783 info->iommu_dev = dev;
784 mutex_init(&pmon_entry->lock);
785 iommu_pm_add_to_iommu_list(pmon_entry);
Olav Haugan0c2d9322013-01-31 18:35:30 -0800786 return pmon_entry;
Olav Haugan99660ca2012-12-04 13:30:41 -0800787}
788EXPORT_SYMBOL(msm_iommu_pm_alloc);
789
790void msm_iommu_pm_free(struct device *dev)
791{
792 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
793 if (pmon)
794 iommu_pm_del_from_iommu_list(pmon);
795}
796EXPORT_SYMBOL(msm_iommu_pm_free);
797
798void msm_iommu_attached(struct device *dev)
799{
800 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
801 if (pmon) {
802 mutex_lock(&pmon->lock);
803 ++pmon->iommu_attach_count;
804 if (pmon->iommu_attach_count == 1) {
805 /* If perf. mon was enabled before we attached we do
806 * the actual after we attach.
807 */
808 if (pmon->enabled)
809 iommu_pm_on(pmon);
810 }
811 mutex_unlock(&pmon->lock);
812 }
813}
814EXPORT_SYMBOL(msm_iommu_attached);
815
816void msm_iommu_detached(struct device *dev)
817{
818 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
819 if (pmon) {
820 mutex_lock(&pmon->lock);
821 if (pmon->iommu_attach_count == 1) {
822 /* If perf. mon is still enabled we have to disable
823 * before we do the detach.
824 */
825 if (pmon->enabled)
826 iommu_pm_off(pmon);
827 }
828 BUG_ON(pmon->iommu_attach_count == 0);
829 --pmon->iommu_attach_count;
830 mutex_unlock(&pmon->lock);
831 }
832}
833EXPORT_SYMBOL(msm_iommu_detached);
834