blob: 503d4ab13a274ecb5990eabd8f6f5a5966bd2173 [file] [log] [blame]
Olav Haugan99660ca2012-12-04 13:30:41 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/iommu.h>
18#include <linux/slab.h>
19#include <linux/device.h>
20#include <linux/interrupt.h>
21#include <linux/bitops.h>
22#include <linux/debugfs.h>
Olav Haugan99660ca2012-12-04 13:30:41 -080023#include <mach/iommu.h>
24#include <mach/iommu_perfmon.h>
25
Olav Haugan99660ca2012-12-04 13:30:41 -080026static LIST_HEAD(iommu_list);
27static struct dentry *msm_iommu_root_debugfs_dir;
28static const char *NO_EVENT_CLASS_NAME = "none";
Olav Haugan99660ca2012-12-04 13:30:41 -080029static const unsigned int MAX_EVEN_CLASS_NAME_LEN = 36;
30
31struct event_class {
32 unsigned int event_number;
33 const char *desc;
34};
35
36static struct event_class pmu_event_classes[] = {
37 { 0x00, "cycle_count" },
38 { 0x01, "cycle_count64" },
39 { 0x08, "tlb_refill" },
40 { 0x09, "tlb_refill_read" },
41 { 0x0A, "tlb_refill_write" },
42 { 0x10, "access" },
43 { 0x11, "access_read" },
44 { 0x12, "access_write" },
Olav Haugan99660ca2012-12-04 13:30:41 -080045 { 0x80, "full_misses" },
46 { 0x81, "partial_miss_1lbfb_hit" },
47 { 0x82, "partial_miss_2lbfb_hit" },
48 { 0x83, "full_hit" },
49 { 0x90, "pred_req_full_miss" },
50 { 0x91, "pred_req_partial_miss_1lbfb_hit" },
51 { 0x92, "pred_req_partial_miss_2lbfb_hit" },
52 { 0xb0, "tot_num_miss_axi_htw_read_req" },
53 { 0xb1, "tot_num_pred_axi_htw_read_req" },
54};
55
Olav Haugan99660ca2012-12-04 13:30:41 -080056static unsigned int iommu_pm_create_sup_cls_str(char **buf,
Olav Haugan0c2d9322013-01-31 18:35:30 -080057 struct iommu_pmon *pmon)
Olav Haugan99660ca2012-12-04 13:30:41 -080058{
Olav Haugan0c2d9322013-01-31 18:35:30 -080059 unsigned long buf_size = ARRAY_SIZE(pmu_event_classes) *
60 MAX_EVEN_CLASS_NAME_LEN;
Olav Haugan99660ca2012-12-04 13:30:41 -080061 unsigned int pos = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -080062 unsigned int nevent_cls = pmon->nevent_cls_supported;
Olav Haugan99660ca2012-12-04 13:30:41 -080063
64 *buf = kzalloc(buf_size, GFP_KERNEL);
65 if (*buf) {
Olav Haugan0c2d9322013-01-31 18:35:30 -080066 unsigned int j;
Olav Haugan99660ca2012-12-04 13:30:41 -080067 int i;
68 struct event_class *ptr;
69 size_t array_len = ARRAY_SIZE(pmu_event_classes);
70 ptr = pmu_event_classes;
71
Olav Haugan0c2d9322013-01-31 18:35:30 -080072 for (j = 0; j < nevent_cls; ++j) {
73 for (i = 0; i < array_len; ++i) {
74
75 if (ptr[i].event_number !=
76 pmon->event_cls_supported[j])
77 continue;
78
Olav Haugan99660ca2012-12-04 13:30:41 -080079 if (pos < buf_size) {
80 pos += snprintf(&(*buf)[pos],
81 buf_size-pos,
82 "[%u] %s\n",
83 ptr[i].event_number,
84 ptr[i].desc);
85 }
Olav Haugan0c2d9322013-01-31 18:35:30 -080086 break;
Olav Haugan99660ca2012-12-04 13:30:41 -080087 }
88 }
89 }
90 return pos;
91}
92
Olav Haugan14468272013-04-11 16:07:46 -070093static int iommu_pm_event_class_supported(struct iommu_pmon *pmon,
94 int event_class)
95{
96 unsigned int nevent_cls = pmon->nevent_cls_supported;
97 unsigned int i;
98
99 for (i = 0; i < nevent_cls; ++i) {
100 if (event_class == pmon->event_cls_supported[i])
101 return event_class;
102 }
103 return MSM_IOMMU_PMU_NO_EVENT_CLASS;
104}
105
Olav Haugan99660ca2012-12-04 13:30:41 -0800106static const char *iommu_pm_find_event_class_name(int event_class)
107{
108 size_t array_len;
109 struct event_class *ptr;
110 int i;
111 const char *event_class_name = NO_EVENT_CLASS_NAME;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800112 if (event_class < 0)
Olav Haugan99660ca2012-12-04 13:30:41 -0800113 goto out;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800114
115 array_len = ARRAY_SIZE(pmu_event_classes);
116 ptr = pmu_event_classes;
Olav Haugan99660ca2012-12-04 13:30:41 -0800117
118 for (i = 0; i < array_len; ++i) {
119 if (ptr[i].event_number == event_class) {
120 event_class_name = ptr[i].desc;
121 break;
122 }
123 }
124
125out:
126 return event_class_name;
127}
128
Olav Haugan14468272013-04-11 16:07:46 -0700129static int iommu_pm_find_event_class(struct iommu_pmon *pmon,
130 const char *event_class_name)
Olav Haugan99660ca2012-12-04 13:30:41 -0800131{
132 size_t array_len;
133 struct event_class *ptr;
134 int i;
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800135 int event_class = MSM_IOMMU_PMU_NO_EVENT_CLASS;
Olav Haugan99660ca2012-12-04 13:30:41 -0800136
137 if (strcmp(event_class_name, NO_EVENT_CLASS_NAME) == 0)
138 goto out;
139
140 array_len = ARRAY_SIZE(pmu_event_classes);
141 ptr = pmu_event_classes;
142
143 for (i = 0; i < array_len; ++i) {
144 if (strcmp(ptr[i].desc, event_class_name) == 0) {
145 event_class = ptr[i].event_number;
146 goto out;
147 }
148 }
149
Olav Haugan99660ca2012-12-04 13:30:41 -0800150out:
Olav Haugan14468272013-04-11 16:07:46 -0700151 event_class = iommu_pm_event_class_supported(pmon, event_class);
Olav Haugan99660ca2012-12-04 13:30:41 -0800152 return event_class;
153}
154
155static inline void iommu_pm_add_to_iommu_list(struct iommu_pmon *iommu_pmon)
156{
157 list_add(&iommu_pmon->iommu_list, &iommu_list);
158}
159
160static inline void iommu_pm_del_from_iommu_list(struct iommu_pmon *iommu_pmon)
161{
162 list_del(&iommu_pmon->iommu_list);
163}
164
165static struct iommu_pmon *iommu_pm_get_pm_by_dev(struct device *dev)
166{
167 struct iommu_pmon *pmon;
168 struct iommu_info *info;
169 struct list_head *ent;
170 list_for_each(ent, &iommu_list) {
171 pmon = list_entry(ent, struct iommu_pmon, iommu_list);
172 info = &pmon->iommu;
173 if (dev == info->iommu_dev)
174 return pmon;
175 }
176 return NULL;
177}
178
Olav Haugan99660ca2012-12-04 13:30:41 -0800179static void iommu_pm_set_event_type(struct iommu_pmon *pmon,
180 struct iommu_pmon_counter *counter)
181{
182 int event_class;
183 unsigned int count_no;
184 struct iommu_info *iommu = &pmon->iommu;
185
186 event_class = counter->current_event_class;
187 count_no = counter->absolute_counter_no;
188
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800189 if (event_class == MSM_IOMMU_PMU_NO_EVENT_CLASS) {
Olav Hauganef69e892013-02-04 13:47:08 -0800190 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Hauganf75b52e2013-10-01 09:18:03 -0700191 iommu->ops->iommu_lock_acquire(1);
Olav Hauganef69e892013-02-04 13:47:08 -0800192 iommu->hw_ops->counter_disable(iommu, counter);
193 iommu->hw_ops->ovfl_int_disable(iommu, counter);
194 iommu->hw_ops->set_event_class(pmon, count_no, 0);
Olav Hauganf75b52e2013-10-01 09:18:03 -0700195 iommu->ops->iommu_lock_release(1);
Olav Haugan99660ca2012-12-04 13:30:41 -0800196 }
197 counter->overflow_count = 0;
198 counter->value = 0;
199 } else {
200 counter->overflow_count = 0;
201 counter->value = 0;
Olav Hauganef69e892013-02-04 13:47:08 -0800202 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Hauganf75b52e2013-10-01 09:18:03 -0700203 iommu->ops->iommu_lock_acquire(1);
Olav Hauganef69e892013-02-04 13:47:08 -0800204 iommu->hw_ops->set_event_class(pmon, count_no,
205 event_class);
206 iommu->hw_ops->ovfl_int_enable(iommu, counter);
207 iommu->hw_ops->counter_enable(iommu, counter);
Olav Hauganf75b52e2013-10-01 09:18:03 -0700208 iommu->ops->iommu_lock_release(1);
Olav Haugan99660ca2012-12-04 13:30:41 -0800209 }
210 }
211}
212
213static void iommu_pm_reset_counts(struct iommu_pmon *pmon)
214{
215 unsigned int i;
216 unsigned int j;
217 for (i = 0; i < pmon->num_groups; ++i) {
218 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
219 for (j = 0; j < cnt_grp->num_counters; ++j) {
220 cnt_grp->counters[j].value = 0;
221 cnt_grp->counters[j].overflow_count = 0;
222 }
223 }
224}
225
Olav Haugan99660ca2012-12-04 13:30:41 -0800226static void iommu_pm_set_all_counters(struct iommu_pmon *pmon)
227{
228 unsigned int i;
229 unsigned int j;
230 for (i = 0; i < pmon->num_groups; ++i) {
231 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
232 for (j = 0; j < cnt_grp->num_counters; ++j)
233 iommu_pm_set_event_type(pmon, &cnt_grp->counters[j]);
234 }
235}
236
237static void iommu_pm_read_all_counters(struct iommu_pmon *pmon)
238{
239 unsigned int i;
240 unsigned int j;
Olav Hauganef69e892013-02-04 13:47:08 -0800241 struct iommu_info *iommu = &pmon->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800242 for (i = 0; i < pmon->num_groups; ++i) {
243 struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
244 for (j = 0; j < cnt_grp->num_counters; ++j) {
245 struct iommu_pmon_counter *counter;
246 counter = &cnt_grp->counters[j];
Olav Hauganef69e892013-02-04 13:47:08 -0800247 counter->value = iommu->hw_ops->read_counter(counter);
Olav Haugan99660ca2012-12-04 13:30:41 -0800248 }
249 }
250}
251
252static void iommu_pm_on(struct iommu_pmon *pmon)
253{
254 unsigned int i;
255 struct iommu_info *iommu = &pmon->iommu;
256 struct msm_iommu_drvdata *iommu_drvdata =
257 dev_get_drvdata(iommu->iommu_dev);
258
259 iommu->ops->iommu_power_on(iommu_drvdata);
Olav Haugan236970a2013-05-14 17:00:02 -0700260 iommu->ops->iommu_bus_vote(iommu_drvdata, 1);
Olav Hauganeece7e52013-04-02 10:22:21 -0700261 iommu->ops->iommu_clk_on(iommu_drvdata);
Olav Haugan99660ca2012-12-04 13:30:41 -0800262
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800263 /* Reset counters in HW */
Olav Hauganf75b52e2013-10-01 09:18:03 -0700264 iommu->ops->iommu_lock_acquire(1);
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800265 iommu->hw_ops->reset_counters(&pmon->iommu);
Olav Hauganf75b52e2013-10-01 09:18:03 -0700266 iommu->ops->iommu_lock_release(1);
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800267
268 /* Reset SW counters */
Olav Haugan99660ca2012-12-04 13:30:41 -0800269 iommu_pm_reset_counts(pmon);
270
271 pmon->enabled = 1;
272
273 iommu_pm_set_all_counters(pmon);
274
Olav Hauganf75b52e2013-10-01 09:18:03 -0700275 iommu->ops->iommu_lock_acquire(1);
Olav Haugan99660ca2012-12-04 13:30:41 -0800276
277 /* enable all counter group */
278 for (i = 0; i < pmon->num_groups; ++i)
Olav Hauganef69e892013-02-04 13:47:08 -0800279 iommu->hw_ops->grp_enable(iommu, i);
Olav Haugan99660ca2012-12-04 13:30:41 -0800280
281 /* enable global counters */
Olav Hauganef69e892013-02-04 13:47:08 -0800282 iommu->hw_ops->enable_pm(iommu);
Olav Hauganf75b52e2013-10-01 09:18:03 -0700283 iommu->ops->iommu_lock_release(1);
Olav Haugan99660ca2012-12-04 13:30:41 -0800284
285 pr_info("%s: TLB performance monitoring turned ON\n",
286 pmon->iommu.iommu_name);
287}
288
289static void iommu_pm_off(struct iommu_pmon *pmon)
290{
291 unsigned int i;
292 struct iommu_info *iommu = &pmon->iommu;
293 struct msm_iommu_drvdata *iommu_drvdata =
294 dev_get_drvdata(iommu->iommu_dev);
295
296 pmon->enabled = 0;
297
Olav Hauganf75b52e2013-10-01 09:18:03 -0700298 iommu->ops->iommu_lock_acquire(1);
Olav Haugan99660ca2012-12-04 13:30:41 -0800299
300 /* disable global counters */
Olav Hauganef69e892013-02-04 13:47:08 -0800301 iommu->hw_ops->disable_pm(iommu);
Olav Haugan99660ca2012-12-04 13:30:41 -0800302
303 /* Check if we overflowed just before turning off pmon */
Olav Hauganef69e892013-02-04 13:47:08 -0800304 iommu->hw_ops->check_for_overflow(pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800305
306 /* disable all counter group */
307 for (i = 0; i < pmon->num_groups; ++i)
Olav Hauganef69e892013-02-04 13:47:08 -0800308 iommu->hw_ops->grp_disable(iommu, i);
Olav Haugan99660ca2012-12-04 13:30:41 -0800309
310 /* Update cached copy of counters before turning off power */
311 iommu_pm_read_all_counters(pmon);
312
Olav Hauganf75b52e2013-10-01 09:18:03 -0700313 iommu->ops->iommu_lock_release(1);
Olav Hauganeece7e52013-04-02 10:22:21 -0700314 iommu->ops->iommu_clk_off(iommu_drvdata);
Olav Haugan236970a2013-05-14 17:00:02 -0700315 iommu->ops->iommu_bus_vote(iommu_drvdata, 0);
Olav Haugan99660ca2012-12-04 13:30:41 -0800316 iommu->ops->iommu_power_off(iommu_drvdata);
317
318 pr_info("%s: TLB performance monitoring turned OFF\n",
319 pmon->iommu.iommu_name);
320}
321
Olav Haugan99660ca2012-12-04 13:30:41 -0800322static int iommu_pm_debug_open(struct inode *inode, struct file *file)
323{
324 file->private_data = inode->i_private;
325 return 0;
326}
327
328static ssize_t iommu_pm_count_value_read(struct file *fp,
329 char __user *user_buff,
330 size_t count, loff_t *pos)
331{
332 size_t rd_cnt;
333 unsigned long long full_count;
334
335 struct iommu_pmon_counter *counter = fp->private_data;
336 struct iommu_pmon *pmon = counter->cnt_group->pmon;
337 struct iommu_info *iommu = &pmon->iommu;
338 char buf[50];
339 size_t len;
340
341 mutex_lock(&pmon->lock);
342
Olav Hauganef69e892013-02-04 13:47:08 -0800343 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Hauganf75b52e2013-10-01 09:18:03 -0700344 iommu->ops->iommu_lock_acquire(1);
Olav Hauganef69e892013-02-04 13:47:08 -0800345 counter->value = iommu->hw_ops->read_counter(counter);
Olav Hauganf75b52e2013-10-01 09:18:03 -0700346 iommu->ops->iommu_lock_release(1);
Olav Haugan99660ca2012-12-04 13:30:41 -0800347 }
348 full_count = (unsigned long long) counter->value +
349 ((unsigned long long)counter->overflow_count *
350 0x100000000ULL);
351
352 len = snprintf(buf, 50, "%llu\n", full_count);
353 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
354 mutex_unlock(&pmon->lock);
355
356 return rd_cnt;
357}
358
359static const struct file_operations cnt_value_file_ops = {
360 .open = iommu_pm_debug_open,
361 .read = iommu_pm_count_value_read,
362};
363
364static ssize_t iommu_pm_event_class_read(struct file *fp,
365 char __user *user_buff,
366 size_t count, loff_t *pos)
367{
368 size_t rd_cnt;
369 struct iommu_pmon_counter *counter = fp->private_data;
370 struct iommu_pmon *pmon = counter->cnt_group->pmon;
371 char buf[50];
372 const char *event_class_name;
373 size_t len;
374
375 mutex_lock(&pmon->lock);
376 event_class_name = iommu_pm_find_event_class_name(
377 counter->current_event_class);
378 len = snprintf(buf, 50, "%s\n", event_class_name);
379
380 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
381 mutex_unlock(&pmon->lock);
382 return rd_cnt;
383}
384
385static ssize_t iommu_pm_event_class_write(struct file *fp,
386 const char __user *user_buff,
387 size_t count, loff_t *pos)
388{
389 size_t wr_cnt;
390 char buf[50];
391 size_t buf_size = sizeof(buf);
392 struct iommu_pmon_counter *counter = fp->private_data;
393 struct iommu_pmon *pmon = counter->cnt_group->pmon;
394 int current_event_class;
395
396 if ((count + *pos) >= buf_size)
397 return -EINVAL;
398
399 mutex_lock(&pmon->lock);
400 current_event_class = counter->current_event_class;
401 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
402 if (wr_cnt >= 1) {
403 int rv;
404 long value;
405 buf[wr_cnt-1] = '\0';
Olav Haugan0c2d9322013-01-31 18:35:30 -0800406 rv = kstrtol(buf, 10, &value);
Olav Haugan99660ca2012-12-04 13:30:41 -0800407 if (!rv) {
408 counter->current_event_class =
Olav Haugan14468272013-04-11 16:07:46 -0700409 iommu_pm_find_event_class(pmon,
Olav Haugan99660ca2012-12-04 13:30:41 -0800410 iommu_pm_find_event_class_name(value));
411 } else {
412 counter->current_event_class =
Olav Haugan14468272013-04-11 16:07:46 -0700413 iommu_pm_find_event_class(pmon, buf);
Olav Haugan99660ca2012-12-04 13:30:41 -0800414 } }
415
416 if (current_event_class != counter->current_event_class)
417 iommu_pm_set_event_type(pmon, counter);
418
419 mutex_unlock(&pmon->lock);
420 return wr_cnt;
421}
422
423static const struct file_operations event_class_file_ops = {
424 .open = iommu_pm_debug_open,
425 .read = iommu_pm_event_class_read,
426 .write = iommu_pm_event_class_write,
427};
428
429static ssize_t iommu_reset_counters_write(struct file *fp,
430 const char __user *user_buff,
431 size_t count, loff_t *pos)
432{
433 size_t wr_cnt;
434 char buf[10];
435 size_t buf_size = sizeof(buf);
436 struct iommu_pmon *pmon = fp->private_data;
437 struct iommu_info *iommu = &pmon->iommu;
438
439 if ((count + *pos) >= buf_size)
440 return -EINVAL;
441
442 mutex_lock(&pmon->lock);
443 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
444 if (wr_cnt >= 1) {
445 unsigned long cmd = 0;
446 int rv;
447 buf[wr_cnt-1] = '\0';
448 rv = kstrtoul(buf, 10, &cmd);
449 if (!rv && (cmd == 1)) {
Olav Hauganef69e892013-02-04 13:47:08 -0800450 if (iommu->hw_ops->is_hw_access_OK(pmon)) {
Olav Hauganf75b52e2013-10-01 09:18:03 -0700451 iommu->ops->iommu_lock_acquire(1);
Olav Hauganef69e892013-02-04 13:47:08 -0800452 iommu->hw_ops->reset_counters(&pmon->iommu);
Olav Hauganf75b52e2013-10-01 09:18:03 -0700453 iommu->ops->iommu_lock_release(1);
Olav Haugan99660ca2012-12-04 13:30:41 -0800454 }
455 iommu_pm_reset_counts(pmon);
456 pr_info("TLB performance counters reset\n");
457 } else {
458 pr_err("Unknown performance monitor command: %lu\n",
459 cmd);
460 }
461 }
462 mutex_unlock(&pmon->lock);
463 return wr_cnt;
464}
465
466static const struct file_operations reset_file_ops = {
467 .open = iommu_pm_debug_open,
468 .write = iommu_reset_counters_write,
469};
470
471static ssize_t iommu_pm_enable_counters_read(struct file *fp,
472 char __user *user_buff,
473 size_t count, loff_t *pos)
474{
475 size_t rd_cnt;
476 char buf[5];
477 size_t len;
478 struct iommu_pmon *pmon = fp->private_data;
479
480 mutex_lock(&pmon->lock);
481 len = snprintf(buf, 5, "%u\n", pmon->enabled);
482 rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
483 mutex_unlock(&pmon->lock);
484 return rd_cnt;
485}
486
487static ssize_t iommu_pm_enable_counters_write(struct file *fp,
488 const char __user *user_buff,
489 size_t count, loff_t *pos)
490{
491 size_t wr_cnt;
492 char buf[10];
493 size_t buf_size = sizeof(buf);
494 struct iommu_pmon *pmon = fp->private_data;
495
496 if ((count + *pos) >= buf_size)
497 return -EINVAL;
498
499 mutex_lock(&pmon->lock);
500 wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
501 if (wr_cnt >= 1) {
502 unsigned long cmd;
503 int rv;
504 buf[wr_cnt-1] = '\0';
505 rv = kstrtoul(buf, 10, &cmd);
506 if (!rv && (cmd < 2)) {
507 if (pmon->enabled == 1 && cmd == 0) {
Olav Haugan463e6402013-04-15 10:53:32 -0700508 if (pmon->iommu.always_on ||
509 pmon->iommu_attach_count > 0)
Olav Haugan99660ca2012-12-04 13:30:41 -0800510 iommu_pm_off(pmon);
511 } else if (pmon->enabled == 0 && cmd == 1) {
512 /* We can only turn on perf. monitoring if
Olav Haugan463e6402013-04-15 10:53:32 -0700513 * iommu is attached (if not always on).
514 * Delay turning on perf. monitoring until
515 * we are attached.
Olav Haugan99660ca2012-12-04 13:30:41 -0800516 */
Olav Haugan463e6402013-04-15 10:53:32 -0700517 if (pmon->iommu.always_on ||
518 pmon->iommu_attach_count > 0)
Olav Haugan99660ca2012-12-04 13:30:41 -0800519 iommu_pm_on(pmon);
520 else
521 pmon->enabled = 1;
522 }
523 } else {
524 pr_err("Unknown performance monitor command: %lu\n",
525 cmd);
526 }
527 }
528 mutex_unlock(&pmon->lock);
529 return wr_cnt;
530}
531
532static const struct file_operations event_enable_file_ops = {
533 .open = iommu_pm_debug_open,
534 .read = iommu_pm_enable_counters_read,
535 .write = iommu_pm_enable_counters_write,
536};
537
538static ssize_t iommu_pm_avail_event_cls_read(struct file *fp,
539 char __user *user_buff,
540 size_t count, loff_t *pos)
541{
542 size_t rd_cnt = 0;
543 struct iommu_pmon *pmon = fp->private_data;
544 char *buf;
545 size_t len;
546
547 mutex_lock(&pmon->lock);
548
Olav Haugan0c2d9322013-01-31 18:35:30 -0800549 len = iommu_pm_create_sup_cls_str(&buf, pmon);
Olav Haugan99660ca2012-12-04 13:30:41 -0800550 if (buf) {
551 rd_cnt = simple_read_from_buffer(user_buff, count, pos,
552 buf, len);
553 kfree(buf);
554 }
555 mutex_unlock(&pmon->lock);
556 return rd_cnt;
557}
558
559static const struct file_operations available_event_cls_file_ops = {
560 .open = iommu_pm_debug_open,
561 .read = iommu_pm_avail_event_cls_read,
562};
563
564
565
566static int iommu_pm_create_grp_debugfs_counters_hierarchy(
567 struct iommu_pmon_cnt_group *cnt_grp,
568 unsigned int *abs_counter_no)
569{
570 int ret = 0;
571 int j;
572 char name[20];
573
574 for (j = 0; j < cnt_grp->num_counters; ++j) {
575 struct dentry *grp_dir = cnt_grp->group_dir;
576 struct dentry *counter_dir;
577 cnt_grp->counters[j].cnt_group = cnt_grp;
578 cnt_grp->counters[j].counter_no = j;
579 cnt_grp->counters[j].absolute_counter_no = *abs_counter_no;
580 (*abs_counter_no)++;
581 cnt_grp->counters[j].value = 0;
582 cnt_grp->counters[j].overflow_count = 0;
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800583 cnt_grp->counters[j].current_event_class =
584 MSM_IOMMU_PMU_NO_EVENT_CLASS;
Olav Haugan99660ca2012-12-04 13:30:41 -0800585
586 snprintf(name, 20, "counter%u", j);
587
588 counter_dir = debugfs_create_dir(name, grp_dir);
589
590 if (IS_ERR_OR_NULL(counter_dir)) {
591 pr_err("unable to create counter debugfs dir %s\n",
592 name);
593 ret = -ENOMEM;
594 goto out;
595 }
596
597 cnt_grp->counters[j].counter_dir = counter_dir;
598
599 if (!debugfs_create_file("value", 0644, counter_dir,
600 &cnt_grp->counters[j],
601 &cnt_value_file_ops)) {
602 ret = -EIO;
603 goto out;
604 }
605
606 if (!debugfs_create_file("current_event_class", 0644,
607 counter_dir, &cnt_grp->counters[j],
608 &event_class_file_ops)) {
609 ret = -EIO;
610 goto out;
611 }
612 }
613out:
614 return ret;
615}
616
617static int iommu_pm_create_group_debugfs_hierarchy(struct iommu_info *iommu,
618 struct iommu_pmon *pmon_entry)
619{
620 int i;
621 int ret = 0;
622 char name[20];
623 unsigned int abs_counter_no = 0;
624
625 for (i = 0; i < pmon_entry->num_groups; ++i) {
626 pmon_entry->cnt_grp[i].pmon = pmon_entry;
627 pmon_entry->cnt_grp[i].grp_no = i;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800628 pmon_entry->cnt_grp[i].num_counters = pmon_entry->num_counters;
Olav Haugan99660ca2012-12-04 13:30:41 -0800629 pmon_entry->cnt_grp[i].counters =
630 kzalloc(sizeof(*pmon_entry->cnt_grp[i].counters)
631 * pmon_entry->cnt_grp[i].num_counters, GFP_KERNEL);
632
633 if (!pmon_entry->cnt_grp[i].counters) {
634 pr_err("Unable to allocate memory for counters\n");
635 ret = -ENOMEM;
636 goto out;
637 }
638 snprintf(name, 20, "group%u", i);
639 pmon_entry->cnt_grp[i].group_dir = debugfs_create_dir(name,
640 pmon_entry->iommu_dir);
641 if (IS_ERR_OR_NULL(pmon_entry->cnt_grp[i].group_dir)) {
642 pr_err("unable to create group debugfs dir %s\n", name);
643 ret = -ENOMEM;
644 goto out;
645 }
646
647 ret = iommu_pm_create_grp_debugfs_counters_hierarchy(
648 &pmon_entry->cnt_grp[i],
649 &abs_counter_no);
650 if (ret)
651 goto out;
652 }
653out:
654 return ret;
655}
656
Olav Haugan0c2d9322013-01-31 18:35:30 -0800657int msm_iommu_pm_iommu_register(struct iommu_pmon *pmon_entry)
Olav Haugan99660ca2012-12-04 13:30:41 -0800658{
Olav Haugan99660ca2012-12-04 13:30:41 -0800659 int ret = 0;
Olav Haugan0c2d9322013-01-31 18:35:30 -0800660 struct iommu_info *iommu = &pmon_entry->iommu;
Olav Haugan99660ca2012-12-04 13:30:41 -0800661 int i;
662
663 if (!iommu->ops || !iommu->iommu_name || !iommu->base
664 || !iommu->iommu_dev) {
665 ret = -EINVAL;
666 goto out;
667 }
668
669 if (!msm_iommu_root_debugfs_dir) {
670 msm_iommu_root_debugfs_dir = debugfs_create_dir("iommu", NULL);
671 if (IS_ERR_OR_NULL(msm_iommu_root_debugfs_dir)) {
672 pr_err("Failed creating iommu debugfs dir \"iommu\"\n");
673 ret = -EIO;
674 goto out;
675 }
676 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800677
Olav Haugan99660ca2012-12-04 13:30:41 -0800678 pmon_entry->cnt_grp = kzalloc(sizeof(*pmon_entry->cnt_grp)
679 * pmon_entry->num_groups, GFP_KERNEL);
680 if (!pmon_entry->cnt_grp) {
681 pr_err("Unable to allocate memory for counter groups\n");
682 ret = -ENOMEM;
683 goto file_err;
684 }
Olav Haugan99660ca2012-12-04 13:30:41 -0800685 pmon_entry->iommu_dir = debugfs_create_dir(iommu->iommu_name,
686 msm_iommu_root_debugfs_dir);
687 if (IS_ERR_OR_NULL(pmon_entry->iommu_dir)) {
688 pr_err("unable to create iommu debugfs dir %s\n",
689 iommu->iommu_name);
690 ret = -ENOMEM;
691 goto free_mem;
692 }
693
694 if (!debugfs_create_file("reset_counters", 0644,
695 pmon_entry->iommu_dir, pmon_entry, &reset_file_ops)) {
696 ret = -EIO;
697 goto free_mem;
698 }
699
700 if (!debugfs_create_file("enable_counters", 0644,
701 pmon_entry->iommu_dir, pmon_entry, &event_enable_file_ops)) {
702 ret = -EIO;
703 goto free_mem;
704 }
705
706 if (!debugfs_create_file("available_event_classes", 0644,
707 pmon_entry->iommu_dir, pmon_entry,
708 &available_event_cls_file_ops)) {
709 ret = -EIO;
710 goto free_mem;
711 }
712
713 ret = iommu_pm_create_group_debugfs_hierarchy(iommu, pmon_entry);
714 if (ret)
715 goto free_mem;
716
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800717 iommu->hw_ops->initialize_hw(pmon_entry);
718
Olav Haugan99660ca2012-12-04 13:30:41 -0800719 if (iommu->evt_irq > 0) {
720 ret = request_threaded_irq(iommu->evt_irq, NULL,
Olav Hauganef69e892013-02-04 13:47:08 -0800721 iommu->hw_ops->evt_ovfl_int_handler,
Olav Haugan99660ca2012-12-04 13:30:41 -0800722 IRQF_ONESHOT | IRQF_SHARED,
Olav Haugan7a2f99c2013-02-04 14:43:26 -0800723 "msm_iommu_pmon_nonsecure_irq", pmon_entry);
Olav Haugan99660ca2012-12-04 13:30:41 -0800724 if (ret) {
725 pr_err("Request IRQ %d failed with ret=%d\n",
726 iommu->evt_irq,
727 ret);
728 goto free_mem;
729 }
730 } else {
731 pr_info("%s: Overflow interrupt not available\n", __func__);
732 }
733
Olav Haugan0c2d9322013-01-31 18:35:30 -0800734 dev_dbg(iommu->iommu_dev, "%s iommu registered\n", iommu->iommu_name);
Olav Haugan99660ca2012-12-04 13:30:41 -0800735
736 goto out;
737free_mem:
738 if (pmon_entry->cnt_grp) {
739 for (i = 0; i < pmon_entry->num_groups; ++i) {
740 kfree(pmon_entry->cnt_grp[i].counters);
741 pmon_entry->cnt_grp[i].counters = 0;
742 }
743 }
744 kfree(pmon_entry->cnt_grp);
745 pmon_entry->cnt_grp = 0;
746file_err:
747 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
748out:
749 return ret;
750}
751EXPORT_SYMBOL(msm_iommu_pm_iommu_register);
752
753void msm_iommu_pm_iommu_unregister(struct device *dev)
754{
755 int i;
756 struct iommu_pmon *pmon_entry = iommu_pm_get_pm_by_dev(dev);
757
758 if (!pmon_entry)
759 return;
760
761 free_irq(pmon_entry->iommu.evt_irq, pmon_entry->iommu.iommu_dev);
762
763 if (!pmon_entry)
764 goto remove_debugfs;
765
766 if (pmon_entry->cnt_grp) {
767 for (i = 0; i < pmon_entry->num_groups; ++i)
768 kfree(pmon_entry->cnt_grp[i].counters);
769 }
770
771 kfree(pmon_entry->cnt_grp);
772
773remove_debugfs:
774 debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
775
776 return;
777}
778EXPORT_SYMBOL(msm_iommu_pm_iommu_unregister);
779
Olav Haugan0c2d9322013-01-31 18:35:30 -0800780struct iommu_pmon *msm_iommu_pm_alloc(struct device *dev)
Olav Haugan99660ca2012-12-04 13:30:41 -0800781{
782 struct iommu_pmon *pmon_entry;
783 struct iommu_info *info;
784 pmon_entry = devm_kzalloc(dev, sizeof(*pmon_entry), GFP_KERNEL);
785 if (!pmon_entry)
786 return NULL;
787 info = &pmon_entry->iommu;
788 info->iommu_dev = dev;
789 mutex_init(&pmon_entry->lock);
790 iommu_pm_add_to_iommu_list(pmon_entry);
Olav Haugan0c2d9322013-01-31 18:35:30 -0800791 return pmon_entry;
Olav Haugan99660ca2012-12-04 13:30:41 -0800792}
793EXPORT_SYMBOL(msm_iommu_pm_alloc);
794
795void msm_iommu_pm_free(struct device *dev)
796{
797 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
798 if (pmon)
799 iommu_pm_del_from_iommu_list(pmon);
800}
801EXPORT_SYMBOL(msm_iommu_pm_free);
802
803void msm_iommu_attached(struct device *dev)
804{
805 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
806 if (pmon) {
807 mutex_lock(&pmon->lock);
808 ++pmon->iommu_attach_count;
809 if (pmon->iommu_attach_count == 1) {
810 /* If perf. mon was enabled before we attached we do
Olav Haugan463e6402013-04-15 10:53:32 -0700811 * the actual enabling after we attach.
Olav Haugan99660ca2012-12-04 13:30:41 -0800812 */
Olav Haugan463e6402013-04-15 10:53:32 -0700813 if (pmon->enabled && !pmon->iommu.always_on)
Olav Haugan99660ca2012-12-04 13:30:41 -0800814 iommu_pm_on(pmon);
815 }
816 mutex_unlock(&pmon->lock);
817 }
818}
819EXPORT_SYMBOL(msm_iommu_attached);
820
821void msm_iommu_detached(struct device *dev)
822{
823 struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
824 if (pmon) {
825 mutex_lock(&pmon->lock);
826 if (pmon->iommu_attach_count == 1) {
827 /* If perf. mon is still enabled we have to disable
Olav Haugan463e6402013-04-15 10:53:32 -0700828 * before we do the detach if iommu is not always on.
Olav Haugan99660ca2012-12-04 13:30:41 -0800829 */
Olav Haugan463e6402013-04-15 10:53:32 -0700830 if (pmon->enabled && !pmon->iommu.always_on)
Olav Haugan99660ca2012-12-04 13:30:41 -0800831 iommu_pm_off(pmon);
832 }
833 BUG_ON(pmon->iommu_attach_count == 0);
834 --pmon->iommu_attach_count;
835 mutex_unlock(&pmon->lock);
836 }
837}
838EXPORT_SYMBOL(msm_iommu_detached);
839