blob: 6d79cfbdd8f618641222f23177e4f7a0e4abdbe3 [file] [log] [blame]
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001/*
Charan Teja Reddy29f61402017-02-09 20:44:29 +05302 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
16
17#include <linux/debugfs.h>
18#include <linux/device.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/module.h>
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -070024#include <linux/uaccess.h>
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070025#include <linux/dma-contiguous.h>
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070026#include <soc/qcom/secure_buffer.h>
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -070027#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma-iommu.h>
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070030
Susheel Khiania4417e72016-07-12 11:28:32 +053031#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
32
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070033static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
34{
35 switch (attr) {
36 case DOMAIN_ATTR_GEOMETRY:
37 return "DOMAIN_ATTR_GEOMETRY";
38 case DOMAIN_ATTR_PAGING:
39 return "DOMAIN_ATTR_PAGING";
40 case DOMAIN_ATTR_WINDOWS:
41 return "DOMAIN_ATTR_WINDOWS";
42 case DOMAIN_ATTR_FSL_PAMU_STASH:
43 return "DOMAIN_ATTR_FSL_PAMU_STASH";
44 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
45 return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
46 case DOMAIN_ATTR_FSL_PAMUV1:
47 return "DOMAIN_ATTR_FSL_PAMUV1";
48 case DOMAIN_ATTR_NESTING:
49 return "DOMAIN_ATTR_NESTING";
50 case DOMAIN_ATTR_PT_BASE_ADDR:
51 return "DOMAIN_ATTR_PT_BASE_ADDR";
52 case DOMAIN_ATTR_SECURE_VMID:
53 return "DOMAIN_ATTR_SECURE_VMID";
54 case DOMAIN_ATTR_ATOMIC:
55 return "DOMAIN_ATTR_ATOMIC";
56 case DOMAIN_ATTR_CONTEXT_BANK:
57 return "DOMAIN_ATTR_CONTEXT_BANK";
58 case DOMAIN_ATTR_TTBR0:
59 return "DOMAIN_ATTR_TTBR0";
60 case DOMAIN_ATTR_CONTEXTIDR:
61 return "DOMAIN_ATTR_CONTEXTIDR";
62 case DOMAIN_ATTR_PROCID:
63 return "DOMAIN_ATTR_PROCID";
64 case DOMAIN_ATTR_DYNAMIC:
65 return "DOMAIN_ATTR_DYNAMIC";
66 case DOMAIN_ATTR_NON_FATAL_FAULTS:
67 return "DOMAIN_ATTR_NON_FATAL_FAULTS";
68 case DOMAIN_ATTR_S1_BYPASS:
69 return "DOMAIN_ATTR_S1_BYPASS";
70 case DOMAIN_ATTR_FAST:
71 return "DOMAIN_ATTR_FAST";
Patrick Dalyef6c1dc2016-11-16 14:35:23 -080072 case DOMAIN_ATTR_EARLY_MAP:
73 return "DOMAIN_ATTR_EARLY_MAP";
Charan Teja Reddyc682e472017-04-20 19:11:20 +053074 case DOMAIN_ATTR_CB_STALL_DISABLE:
75 return "DOMAIN_ATTR_CB_STALL_DISABLE";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070076 default:
77 return "Unknown attr!";
78 }
79}
Susheel Khiania4417e72016-07-12 11:28:32 +053080#endif
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070081
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070082#ifdef CONFIG_IOMMU_DEBUG_TRACKING
83
84static DEFINE_MUTEX(iommu_debug_attachments_lock);
85static LIST_HEAD(iommu_debug_attachments);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070086
Patrick Dalyee7a25f2017-04-05 18:05:02 -070087/*
88 * Each group may have more than one domain; but each domain may
89 * only have one group.
90 * Used by debug tools to display the name of the device(s) associated
91 * with a particular domain.
92 */
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070093struct iommu_debug_attachment {
94 struct iommu_domain *domain;
Patrick Dalyee7a25f2017-04-05 18:05:02 -070095 struct iommu_group *group;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070096 struct list_head list;
97};
98
Susheel Khianie66aa5b2015-08-25 17:25:42 +053099void iommu_debug_attach_device(struct iommu_domain *domain,
100 struct device *dev)
101{
102 struct iommu_debug_attachment *attach;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700103 struct iommu_group *group;
104
Patrick Daly35af1bb2017-09-29 16:09:05 -0700105 group = dev->iommu_group;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700106 if (!group)
107 return;
108
Patrick Daly35af1bb2017-09-29 16:09:05 -0700109 mutex_lock(&iommu_debug_attachments_lock);
110 list_for_each_entry(attach, &iommu_debug_attachments, list)
111 if ((attach->domain == domain) && (attach->group == group))
112 goto out;
113
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700114 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
115 if (!attach)
Patrick Daly35af1bb2017-09-29 16:09:05 -0700116 goto out;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700117
118 attach->domain = domain;
119 attach->group = group;
120 INIT_LIST_HEAD(&attach->list);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530121
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700122 list_add(&attach->list, &iommu_debug_attachments);
Patrick Daly35af1bb2017-09-29 16:09:05 -0700123out:
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700124 mutex_unlock(&iommu_debug_attachments_lock);
125}
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530126
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700127void iommu_debug_domain_remove(struct iommu_domain *domain)
128{
129 struct iommu_debug_attachment *it, *tmp;
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530130
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700131 mutex_lock(&iommu_debug_attachments_lock);
132 list_for_each_entry_safe(it, tmp, &iommu_debug_attachments, list) {
133 if (it->domain != domain)
134 continue;
135 list_del(&it->list);
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700136 kfree(it);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530137 }
138
139 mutex_unlock(&iommu_debug_attachments_lock);
140}
141
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700142#endif
143
144#ifdef CONFIG_IOMMU_TESTS
145
Susheel Khiania4417e72016-07-12 11:28:32 +0530146#ifdef CONFIG_64BIT
147
148#define kstrtoux kstrtou64
Patrick Daly9ef01862016-10-13 20:03:50 -0700149#define kstrtox_from_user kstrtoull_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530150#define kstrtosize_t kstrtoul
151
152#else
153
154#define kstrtoux kstrtou32
Patrick Daly9ef01862016-10-13 20:03:50 -0700155#define kstrtox_from_user kstrtouint_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530156#define kstrtosize_t kstrtouint
157
158#endif
159
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700160static LIST_HEAD(iommu_debug_devices);
161static struct dentry *debugfs_tests_dir;
Patrick Dalye4e39862015-11-20 20:00:50 -0800162static u32 iters_per_op = 1;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700163static void *test_virt_addr;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700164
165struct iommu_debug_device {
166 struct device *dev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700167 struct iommu_domain *domain;
168 u64 iova;
169 u64 phys;
170 size_t len;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700171 struct list_head list;
172};
173
174static int iommu_debug_build_phoney_sg_table(struct device *dev,
175 struct sg_table *table,
176 unsigned long total_size,
177 unsigned long chunk_size)
178{
179 unsigned long nents = total_size / chunk_size;
180 struct scatterlist *sg;
181 int i;
182 struct page *page;
183
184 if (!IS_ALIGNED(total_size, PAGE_SIZE))
185 return -EINVAL;
186 if (!IS_ALIGNED(total_size, chunk_size))
187 return -EINVAL;
188 if (sg_alloc_table(table, nents, GFP_KERNEL))
189 return -EINVAL;
190 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
191 if (!page)
192 goto free_table;
193
194 /* all the same page... why not. */
195 for_each_sg(table->sgl, sg, table->nents, i)
196 sg_set_page(sg, page, chunk_size, 0);
197
198 return 0;
199
200free_table:
201 sg_free_table(table);
202 return -ENOMEM;
203}
204
205static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
206 struct sg_table *table,
207 unsigned long chunk_size)
208{
209 __free_pages(sg_page(table->sgl), get_order(chunk_size));
210 sg_free_table(table);
211}
212
213static const char * const _size_to_string(unsigned long size)
214{
215 switch (size) {
216 case SZ_4K:
217 return "4K";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700218 case SZ_8K:
219 return "8K";
220 case SZ_16K:
221 return "16K";
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700222 case SZ_64K:
223 return "64K";
224 case SZ_2M:
225 return "2M";
226 case SZ_1M * 12:
227 return "12M";
228 case SZ_1M * 20:
229 return "20M";
230 }
231 return "unknown size, please add to _size_to_string";
232}
233
Patrick Dalye4e39862015-11-20 20:00:50 -0800234static int nr_iters_set(void *data, u64 val)
235{
236 if (!val)
237 val = 1;
238 if (val > 10000)
239 val = 10000;
240 *(u32 *)data = val;
241 return 0;
242}
243
244static int nr_iters_get(void *data, u64 *val)
245{
246 *val = *(u32 *)data;
247 return 0;
248}
249
250DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
251 nr_iters_get, nr_iters_set, "%llu\n");
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700252
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700253static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700254 enum iommu_attr attrs[],
255 void *attr_values[], int nattrs,
Susheel Khiania4417e72016-07-12 11:28:32 +0530256 const size_t sizes[])
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700257{
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700258 int i;
Susheel Khiania4417e72016-07-12 11:28:32 +0530259 const size_t *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700260 struct iommu_domain *domain;
261 unsigned long iova = 0x10000;
262 phys_addr_t paddr = 0xa000;
263
264 domain = iommu_domain_alloc(&platform_bus_type);
265 if (!domain) {
266 seq_puts(s, "Couldn't allocate domain\n");
267 return;
268 }
269
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700270 seq_puts(s, "Domain attributes: [ ");
271 for (i = 0; i < nattrs; ++i) {
272 /* not all attrs are ints, but this will get us by for now */
273 seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
274 *((int *)attr_values[i]),
275 i < nattrs ? " " : "");
Mitchel Humpherys679567c2015-08-28 10:51:24 -0700276 }
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700277 seq_puts(s, "]\n");
278 for (i = 0; i < nattrs; ++i) {
279 if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
280 seq_printf(s, "Couldn't set %d to the value at %p\n",
281 attrs[i], attr_values[i]);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700282 goto out_domain_free;
283 }
284 }
285
Patrick Daly6dd80252017-04-17 20:41:59 -0700286 if (iommu_attach_group(domain, dev->iommu_group)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700287 seq_puts(s,
288 "Couldn't attach new domain to device. Is it already attached?\n");
289 goto out_domain_free;
290 }
291
Patrick Dalye4e39862015-11-20 20:00:50 -0800292 seq_printf(s, "(average over %d iterations)\n", iters_per_op);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800293 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700294 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530295 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700296 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800297 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700298 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800299 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700300 struct timespec tbefore, tafter, diff;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700301 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700302
Patrick Dalye4e39862015-11-20 20:00:50 -0800303 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700304 getnstimeofday(&tbefore);
305 if (iommu_map(domain, iova, paddr, size,
306 IOMMU_READ | IOMMU_WRITE)) {
307 seq_puts(s, "Failed to map\n");
308 continue;
309 }
310 getnstimeofday(&tafter);
311 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800312 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700313
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700314 getnstimeofday(&tbefore);
315 unmapped = iommu_unmap(domain, iova, size);
316 if (unmapped != size) {
317 seq_printf(s,
318 "Only unmapped %zx instead of %zx\n",
319 unmapped, size);
320 continue;
321 }
322 getnstimeofday(&tafter);
323 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800324 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700325 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700326
Susheel Khiania4417e72016-07-12 11:28:32 +0530327 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
328 &map_elapsed_rem);
329 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
330 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700331
Patrick Daly3ca31e32015-11-20 20:33:04 -0800332 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
333 &map_elapsed_rem);
334 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
335 &unmap_elapsed_rem);
336
337 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
338 _size_to_string(size),
339 map_elapsed_us, map_elapsed_rem,
340 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700341 }
342
343 seq_putc(s, '\n');
Patrick Daly3ca31e32015-11-20 20:33:04 -0800344 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700345 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530346 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700347 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800348 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700349 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800350 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700351 struct timespec tbefore, tafter, diff;
352 struct sg_table table;
353 unsigned long chunk_size = SZ_4K;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700354 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700355
356 if (iommu_debug_build_phoney_sg_table(dev, &table, size,
357 chunk_size)) {
358 seq_puts(s,
359 "couldn't build phoney sg table! bailing...\n");
360 goto out_detach;
361 }
362
Patrick Dalye4e39862015-11-20 20:00:50 -0800363 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700364 getnstimeofday(&tbefore);
365 if (iommu_map_sg(domain, iova, table.sgl, table.nents,
366 IOMMU_READ | IOMMU_WRITE) != size) {
367 seq_puts(s, "Failed to map_sg\n");
368 goto next;
369 }
370 getnstimeofday(&tafter);
371 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800372 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700373
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700374 getnstimeofday(&tbefore);
375 unmapped = iommu_unmap(domain, iova, size);
376 if (unmapped != size) {
377 seq_printf(s,
378 "Only unmapped %zx instead of %zx\n",
379 unmapped, size);
380 goto next;
381 }
382 getnstimeofday(&tafter);
383 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800384 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700385 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700386
Susheel Khiania4417e72016-07-12 11:28:32 +0530387 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
388 &map_elapsed_rem);
389 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
390 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700391
Patrick Daly3ca31e32015-11-20 20:33:04 -0800392 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
393 &map_elapsed_rem);
394 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
395 &unmap_elapsed_rem);
396
397 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
398 _size_to_string(size),
399 map_elapsed_us, map_elapsed_rem,
400 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700401
402next:
403 iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
404 }
405
406out_detach:
Patrick Daly6dd80252017-04-17 20:41:59 -0700407 iommu_detach_group(domain, dev->iommu_group);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700408out_domain_free:
409 iommu_domain_free(domain);
410}
411
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700412static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700413{
414 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530415 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700416 SZ_1M * 20, 0 };
417 enum iommu_attr attrs[] = {
418 DOMAIN_ATTR_ATOMIC,
419 };
420 int htw_disable = 1, atomic = 1;
421 void *attr_values[] = { &htw_disable, &atomic };
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700422
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700423 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
424 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700425
426 return 0;
427}
428
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700429static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700430{
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700431 return single_open(file, iommu_debug_profiling_show, inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700432}
433
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700434static const struct file_operations iommu_debug_profiling_fops = {
435 .open = iommu_debug_profiling_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700436 .read = seq_read,
437 .llseek = seq_lseek,
438 .release = single_release,
439};
440
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700441static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
442{
443 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530444 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700445 SZ_1M * 20, 0 };
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700446
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700447 enum iommu_attr attrs[] = {
448 DOMAIN_ATTR_ATOMIC,
449 DOMAIN_ATTR_SECURE_VMID,
450 };
451 int one = 1, secure_vmid = VMID_CP_PIXEL;
452 void *attr_values[] = { &one, &secure_vmid };
453
454 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
455 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700456
457 return 0;
458}
459
460static int iommu_debug_secure_profiling_open(struct inode *inode,
461 struct file *file)
462{
463 return single_open(file, iommu_debug_secure_profiling_show,
464 inode->i_private);
465}
466
467static const struct file_operations iommu_debug_secure_profiling_fops = {
468 .open = iommu_debug_secure_profiling_open,
469 .read = seq_read,
470 .llseek = seq_lseek,
471 .release = single_release,
472};
473
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700474static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
475{
476 struct iommu_debug_device *ddev = s->private;
477 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
478 enum iommu_attr attrs[] = {
479 DOMAIN_ATTR_FAST,
480 DOMAIN_ATTR_ATOMIC,
481 };
482 int one = 1;
483 void *attr_values[] = { &one, &one };
484
485 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
486 ARRAY_SIZE(attrs), sizes);
487
488 return 0;
489}
490
491static int iommu_debug_profiling_fast_open(struct inode *inode,
492 struct file *file)
493{
494 return single_open(file, iommu_debug_profiling_fast_show,
495 inode->i_private);
496}
497
498static const struct file_operations iommu_debug_profiling_fast_fops = {
499 .open = iommu_debug_profiling_fast_open,
500 .read = seq_read,
501 .llseek = seq_lseek,
502 .release = single_release,
503};
504
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700505static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
506 void *ignored)
507{
508 int i, experiment;
509 struct iommu_debug_device *ddev = s->private;
510 struct device *dev = ddev->dev;
511 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
512 struct dma_iommu_mapping *mapping;
513 dma_addr_t dma_addr;
514 void *virt;
515 int fast = 1;
516 const char * const extra_labels[] = {
517 "not coherent",
518 "coherent",
519 };
520 unsigned long extra_attrs[] = {
521 0,
522 DMA_ATTR_SKIP_CPU_SYNC,
523 };
524
525 virt = kmalloc(1518, GFP_KERNEL);
526 if (!virt)
527 goto out;
528
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530529 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700530 if (!mapping) {
531 seq_puts(s, "fast_smmu_create_mapping failed\n");
532 goto out_kfree;
533 }
534
535 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
536 seq_puts(s, "iommu_domain_set_attr failed\n");
537 goto out_release_mapping;
538 }
539
540 if (arm_iommu_attach_device(dev, mapping)) {
541 seq_puts(s, "fast_smmu_attach_device failed\n");
542 goto out_release_mapping;
543 }
544
545 if (iommu_enable_config_clocks(mapping->domain)) {
546 seq_puts(s, "Couldn't enable clocks\n");
547 goto out_detach;
548 }
549 for (experiment = 0; experiment < 2; ++experiment) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530550 size_t map_avg = 0, unmap_avg = 0;
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700551
552 for (i = 0; i < 10; ++i) {
553 struct timespec tbefore, tafter, diff;
554 u64 ns;
555
556 getnstimeofday(&tbefore);
557 dma_addr = dma_map_single_attrs(
558 dev, virt, SZ_4K, DMA_TO_DEVICE,
559 extra_attrs[experiment]);
560 getnstimeofday(&tafter);
561 diff = timespec_sub(tafter, tbefore);
562 ns = timespec_to_ns(&diff);
563 if (dma_mapping_error(dev, dma_addr)) {
564 seq_puts(s, "dma_map_single failed\n");
565 goto out_disable_config_clocks;
566 }
567 map_elapsed_ns[i] = ns;
568
569 getnstimeofday(&tbefore);
570 dma_unmap_single_attrs(
571 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
572 extra_attrs[experiment]);
573 getnstimeofday(&tafter);
574 diff = timespec_sub(tafter, tbefore);
575 ns = timespec_to_ns(&diff);
576 unmap_elapsed_ns[i] = ns;
577 }
578
579 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
580 "dma_map_single_attrs");
581 for (i = 0; i < 10; ++i) {
582 map_avg += map_elapsed_ns[i];
583 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
584 i < 9 ? ", " : "");
585 }
586 map_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530587 seq_printf(s, "] (avg: %zu)\n", map_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700588
589 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
590 "dma_unmap_single_attrs");
591 for (i = 0; i < 10; ++i) {
592 unmap_avg += unmap_elapsed_ns[i];
593 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
594 i < 9 ? ", " : "");
595 }
596 unmap_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530597 seq_printf(s, "] (avg: %zu)\n", unmap_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700598 }
599
600out_disable_config_clocks:
601 iommu_disable_config_clocks(mapping->domain);
602out_detach:
603 arm_iommu_detach_device(dev);
604out_release_mapping:
605 arm_iommu_release_mapping(mapping);
606out_kfree:
607 kfree(virt);
608out:
609 return 0;
610}
611
612static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
613 struct file *file)
614{
615 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
616 inode->i_private);
617}
618
619static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
620 .open = iommu_debug_profiling_fast_dma_api_open,
621 .read = seq_read,
622 .llseek = seq_lseek,
623 .release = single_release,
624};
625
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800626static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
627{
628 int i, ret = 0;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530629 u64 iova;
630 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800631 void *virt;
632 phys_addr_t phys;
633 dma_addr_t dma_addr;
634
635 /*
636 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
637 * chunk that we can work with.
638 */
639 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
640 phys = virt_to_phys(virt);
641
642 /* fill the whole 4GB space */
643 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
644 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
645 if (dma_addr == DMA_ERROR_CODE) {
646 dev_err(dev, "Failed map on iter %d\n", i);
647 ret = -EINVAL;
648 goto out;
649 }
650 }
651
652 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
653 dev_err(dev,
654 "dma_map_single unexpectedly (VA should have been exhausted)\n");
655 ret = -EINVAL;
656 goto out;
657 }
658
659 /*
660 * free up 4K at the very beginning, then leave one 4K mapping,
661 * then free up 8K. This will result in the next 8K map to skip
662 * over the 4K hole and take the 8K one.
663 */
664 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
665 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
666 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
667
668 /* remap 8K */
669 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
670 if (dma_addr != SZ_8K) {
671 dma_addr_t expected = SZ_8K;
672
673 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
674 &dma_addr, &expected);
675 ret = -EINVAL;
676 goto out;
677 }
678
679 /*
680 * now remap 4K. We should get the first 4K chunk that was skipped
681 * over during the previous 8K map. If we missed a TLB invalidate
682 * at that point this should explode.
683 */
684 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
685 if (dma_addr != 0) {
686 dma_addr_t expected = 0;
687
688 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
689 &dma_addr, &expected);
690 ret = -EINVAL;
691 goto out;
692 }
693
694 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
695 dev_err(dev,
696 "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
697 ret = -EINVAL;
698 goto out;
699 }
700
701 /* we're all full again. unmap everything. */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530702 for (iova = 0; iova < max; iova += SZ_8K)
703 dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800704
705out:
706 free_pages((unsigned long)virt, get_order(SZ_8K));
707 return ret;
708}
709
710struct fib_state {
711 unsigned long cur;
712 unsigned long prev;
713};
714
715static void fib_init(struct fib_state *f)
716{
717 f->cur = f->prev = 1;
718}
719
720static unsigned long get_next_fib(struct fib_state *f)
721{
722 int next = f->cur + f->prev;
723
724 f->prev = f->cur;
725 f->cur = next;
726 return next;
727}
728
729/*
730 * Not actually random. Just testing the fibs (and max - the fibs).
731 */
732static int __rand_va_sweep(struct device *dev, struct seq_file *s,
733 const size_t size)
734{
735 u64 iova;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530736 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800737 int i, remapped, unmapped, ret = 0;
738 void *virt;
739 dma_addr_t dma_addr, dma_addr2;
740 struct fib_state fib;
741
742 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
743 if (!virt) {
744 if (size > SZ_8K) {
745 dev_err(dev,
746 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
747 _size_to_string(size));
748 return 0;
749 }
750 return -ENOMEM;
751 }
752
753 /* fill the whole 4GB space */
754 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
755 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
756 if (dma_addr == DMA_ERROR_CODE) {
757 dev_err(dev, "Failed map on iter %d\n", i);
758 ret = -EINVAL;
759 goto out;
760 }
761 }
762
763 /* now unmap "random" iovas */
764 unmapped = 0;
765 fib_init(&fib);
766 for (iova = get_next_fib(&fib) * size;
767 iova < max - size;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530768 iova = (u64)get_next_fib(&fib) * size) {
769 dma_addr = (dma_addr_t)(iova);
770 dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800771 if (dma_addr == dma_addr2) {
772 WARN(1,
773 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
774 __func__);
775 return -EINVAL;
776 }
777 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
778 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
779 unmapped += 2;
780 }
781
782 /* and map until everything fills back up */
783 for (remapped = 0; ; ++remapped) {
784 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
785 if (dma_addr == DMA_ERROR_CODE)
786 break;
787 }
788
789 if (unmapped != remapped) {
790 dev_err(dev,
791 "Unexpected random remap count! Unmapped %d but remapped %d\n",
792 unmapped, remapped);
793 ret = -EINVAL;
794 }
795
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530796 for (iova = 0; iova < max; iova += size)
797 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800798
799out:
800 free_pages((unsigned long)virt, get_order(size));
801 return ret;
802}
803
804static int __check_mapping(struct device *dev, struct iommu_domain *domain,
805 dma_addr_t iova, phys_addr_t expected)
806{
807 phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
808 phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
809
810 WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
811
812 if (res != expected) {
813 dev_err_ratelimited(dev,
814 "Bad translation for %pa! Expected: %pa Got: %pa\n",
815 &iova, &expected, &res);
816 return -EINVAL;
817 }
818
819 return 0;
820}
821
822static int __full_va_sweep(struct device *dev, struct seq_file *s,
823 const size_t size, struct iommu_domain *domain)
824{
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530825 u64 iova;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800826 dma_addr_t dma_addr;
827 void *virt;
828 phys_addr_t phys;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530829 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800830 int ret = 0, i;
831
832 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
833 if (!virt) {
834 if (size > SZ_8K) {
835 dev_err(dev,
836 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
837 _size_to_string(size));
838 return 0;
839 }
840 return -ENOMEM;
841 }
842 phys = virt_to_phys(virt);
843
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530844 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800845 unsigned long expected = iova;
846
847 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
848 if (dma_addr != expected) {
849 dev_err_ratelimited(dev,
850 "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
851 i, expected,
852 (unsigned long)dma_addr);
853 ret = -EINVAL;
854 goto out;
855 }
856 }
857
858 if (domain) {
859 /* check every mapping from 0..6M */
860 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
861 phys_addr_t expected = phys;
862
863 if (__check_mapping(dev, domain, iova, expected)) {
864 dev_err(dev, "iter: %d\n", i);
865 ret = -EINVAL;
866 goto out;
867 }
868 }
869 /* and from 4G..4G-6M */
870 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
871 phys_addr_t expected = phys;
872 unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
873
874 if (__check_mapping(dev, domain, theiova, expected)) {
875 dev_err(dev, "iter: %d\n", i);
876 ret = -EINVAL;
877 goto out;
878 }
879 }
880 }
881
882 /* at this point, our VA space should be full */
883 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
884 if (dma_addr != DMA_ERROR_CODE) {
885 dev_err_ratelimited(dev,
886 "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
887 (unsigned long)dma_addr);
888 ret = -EINVAL;
889 }
890
891out:
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530892 for (iova = 0; iova < max; iova += size)
893 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800894
895 free_pages((unsigned long)virt, get_order(size));
896 return ret;
897}
898
899#define ds_printf(d, s, fmt, ...) ({ \
900 dev_err(d, fmt, ##__VA_ARGS__); \
901 seq_printf(s, fmt, ##__VA_ARGS__); \
902 })
903
904static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
905 struct iommu_domain *domain, void *priv)
906{
907 int i, j, ret = 0;
908 size_t *sz, *sizes = priv;
909
910 for (j = 0; j < 1; ++j) {
911 for (sz = sizes; *sz; ++sz) {
912 for (i = 0; i < 2; ++i) {
913 ds_printf(dev, s, "Full VA sweep @%s %d",
914 _size_to_string(*sz), i);
915 if (__full_va_sweep(dev, s, *sz, domain)) {
916 ds_printf(dev, s, " -> FAILED\n");
917 ret = -EINVAL;
918 } else {
919 ds_printf(dev, s, " -> SUCCEEDED\n");
920 }
921 }
922 }
923 }
924
925 ds_printf(dev, s, "bonus map:");
926 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
927 ds_printf(dev, s, " -> FAILED\n");
928 ret = -EINVAL;
929 } else {
930 ds_printf(dev, s, " -> SUCCEEDED\n");
931 }
932
933 for (sz = sizes; *sz; ++sz) {
934 for (i = 0; i < 2; ++i) {
935 ds_printf(dev, s, "Rand VA sweep @%s %d",
936 _size_to_string(*sz), i);
937 if (__rand_va_sweep(dev, s, *sz)) {
938 ds_printf(dev, s, " -> FAILED\n");
939 ret = -EINVAL;
940 } else {
941 ds_printf(dev, s, " -> SUCCEEDED\n");
942 }
943 }
944 }
945
946 ds_printf(dev, s, "TLB stress sweep");
947 if (__tlb_stress_sweep(dev, s)) {
948 ds_printf(dev, s, " -> FAILED\n");
949 ret = -EINVAL;
950 } else {
951 ds_printf(dev, s, " -> SUCCEEDED\n");
952 }
953
954 ds_printf(dev, s, "second bonus map:");
955 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
956 ds_printf(dev, s, " -> FAILED\n");
957 ret = -EINVAL;
958 } else {
959 ds_printf(dev, s, " -> SUCCEEDED\n");
960 }
961
962 return ret;
963}
964
965static int __functional_dma_api_alloc_test(struct device *dev,
966 struct seq_file *s,
967 struct iommu_domain *domain,
968 void *ignored)
969{
970 size_t size = SZ_1K * 742;
971 int ret = 0;
972 u8 *data;
973 dma_addr_t iova;
974
975 /* Make sure we can allocate and use a buffer */
976 ds_printf(dev, s, "Allocating coherent buffer");
977 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
978 if (!data) {
979 ds_printf(dev, s, " -> FAILED\n");
980 ret = -EINVAL;
981 } else {
982 int i;
983
984 ds_printf(dev, s, " -> SUCCEEDED\n");
985 ds_printf(dev, s, "Using coherent buffer");
986 for (i = 0; i < 742; ++i) {
987 int ind = SZ_1K * i;
988 u8 *p = data + ind;
989 u8 val = i % 255;
990
991 memset(data, 0xa5, size);
992 *p = val;
993 (*p)++;
994 if ((*p) != val + 1) {
995 ds_printf(dev, s,
996 " -> FAILED on iter %d since %d != %d\n",
997 i, *p, val + 1);
998 ret = -EINVAL;
999 }
1000 }
1001 if (!ret)
1002 ds_printf(dev, s, " -> SUCCEEDED\n");
1003 dma_free_coherent(dev, size, data, iova);
1004 }
1005
1006 return ret;
1007}
1008
1009static int __functional_dma_api_basic_test(struct device *dev,
1010 struct seq_file *s,
1011 struct iommu_domain *domain,
1012 void *ignored)
1013{
1014 size_t size = 1518;
1015 int i, j, ret = 0;
1016 u8 *data;
1017 dma_addr_t iova;
1018 phys_addr_t pa, pa2;
1019
1020 ds_printf(dev, s, "Basic DMA API test");
1021 /* Make sure we can allocate and use a buffer */
1022 for (i = 0; i < 1000; ++i) {
1023 data = kmalloc(size, GFP_KERNEL);
1024 if (!data) {
1025 ds_printf(dev, s, " -> FAILED\n");
1026 ret = -EINVAL;
1027 goto out;
1028 }
1029 memset(data, 0xa5, size);
1030 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
1031 pa = iommu_iova_to_phys(domain, iova);
1032 pa2 = iommu_iova_to_phys_hard(domain, iova);
1033 if (pa != pa2) {
1034 dev_err(dev,
1035 "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
1036 &pa, &pa2);
1037 ret = -EINVAL;
1038 goto out;
1039 }
1040 pa2 = virt_to_phys(data);
1041 if (pa != pa2) {
1042 dev_err(dev,
1043 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
1044 &pa, &pa2);
1045 ret = -EINVAL;
1046 goto out;
1047 }
1048 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
1049 for (j = 0; j < size; ++j) {
1050 if (data[j] != 0xa5) {
1051 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
1052 ret = -EINVAL;
1053 goto out;
1054 }
1055 }
1056 kfree(data);
1057 }
1058
1059out:
1060 if (ret)
1061 ds_printf(dev, s, " -> FAILED\n");
1062 else
1063 ds_printf(dev, s, " -> SUCCEEDED\n");
1064
1065 return ret;
1066}
1067
1068/* Creates a fresh fast mapping and applies @fn to it */
1069static int __apply_to_new_mapping(struct seq_file *s,
1070 int (*fn)(struct device *dev,
1071 struct seq_file *s,
1072 struct iommu_domain *domain,
1073 void *priv),
1074 void *priv)
1075{
1076 struct dma_iommu_mapping *mapping;
1077 struct iommu_debug_device *ddev = s->private;
1078 struct device *dev = ddev->dev;
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301079 int ret = -EINVAL, fast = 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001080 phys_addr_t pt_phys;
1081
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301082 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1083 (SZ_1G * 4ULL));
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001084 if (!mapping)
1085 goto out;
1086
1087 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
1088 seq_puts(s, "iommu_domain_set_attr failed\n");
1089 goto out_release_mapping;
1090 }
1091
1092 if (arm_iommu_attach_device(dev, mapping))
1093 goto out_release_mapping;
1094
1095 if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
1096 &pt_phys)) {
1097 ds_printf(dev, s, "Couldn't get page table base address\n");
1098 goto out_release_mapping;
1099 }
1100
1101 dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
1102 if (iommu_enable_config_clocks(mapping->domain)) {
1103 ds_printf(dev, s, "Couldn't enable clocks\n");
1104 goto out_release_mapping;
1105 }
1106 ret = fn(dev, s, mapping->domain, priv);
1107 iommu_disable_config_clocks(mapping->domain);
1108
1109 arm_iommu_detach_device(dev);
1110out_release_mapping:
1111 arm_iommu_release_mapping(mapping);
1112out:
1113 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1114 return 0;
1115}
1116
1117static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
1118 void *ignored)
1119{
1120 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
1121 int ret = 0;
1122
1123 ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
1124 ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
1125 ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
1126 return ret;
1127}
1128
1129static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
1130 struct file *file)
1131{
1132 return single_open(file, iommu_debug_functional_fast_dma_api_show,
1133 inode->i_private);
1134}
1135
1136static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
1137 .open = iommu_debug_functional_fast_dma_api_open,
1138 .read = seq_read,
1139 .llseek = seq_lseek,
1140 .release = single_release,
1141};
1142
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001143static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
1144 void *ignored)
1145{
1146 struct dma_iommu_mapping *mapping;
1147 struct iommu_debug_device *ddev = s->private;
1148 struct device *dev = ddev->dev;
1149 size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
1150 int ret = -EINVAL;
1151
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301152 /* Make the size equal to MAX_ULONG */
1153 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1154 (SZ_1G * 4ULL - 1));
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001155 if (!mapping)
1156 goto out;
1157
1158 if (arm_iommu_attach_device(dev, mapping))
1159 goto out_release_mapping;
1160
1161 ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
1162 ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
1163
1164 arm_iommu_detach_device(dev);
1165out_release_mapping:
1166 arm_iommu_release_mapping(mapping);
1167out:
1168 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1169 return 0;
1170}
1171
1172static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
1173 struct file *file)
1174{
1175 return single_open(file, iommu_debug_functional_arm_dma_api_show,
1176 inode->i_private);
1177}
1178
1179static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
1180 .open = iommu_debug_functional_arm_dma_api_open,
1181 .read = seq_read,
1182 .llseek = seq_lseek,
1183 .release = single_release,
1184};
1185
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001186static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
1187 int val, bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001188{
Patrick Daly6dd80252017-04-17 20:41:59 -07001189 struct iommu_group *group = ddev->dev->iommu_group;
1190
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001191 ddev->domain = iommu_domain_alloc(&platform_bus_type);
1192 if (!ddev->domain) {
1193 pr_err("Couldn't allocate domain\n");
1194 return -ENOMEM;
1195 }
1196
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001197 if (is_secure && iommu_domain_set_attr(ddev->domain,
1198 DOMAIN_ATTR_SECURE_VMID,
1199 &val)) {
1200 pr_err("Couldn't set secure vmid to %d\n", val);
1201 goto out_domain_free;
1202 }
1203
Patrick Daly6dd80252017-04-17 20:41:59 -07001204 if (iommu_attach_group(ddev->domain, group)) {
1205 dev_err(ddev->dev, "Couldn't attach new domain to device\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001206 goto out_domain_free;
1207 }
1208
1209 return 0;
1210
1211out_domain_free:
1212 iommu_domain_free(ddev->domain);
1213 ddev->domain = NULL;
1214 return -EIO;
1215}
1216
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001217static ssize_t __iommu_debug_dma_attach_write(struct file *file,
1218 const char __user *ubuf,
1219 size_t count, loff_t *offset)
1220{
1221 struct iommu_debug_device *ddev = file->private_data;
1222 struct device *dev = ddev->dev;
1223 struct dma_iommu_mapping *dma_mapping;
1224 ssize_t retval = -EINVAL;
1225 int val;
1226
1227 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1228 pr_err("Invalid format. Expected a hex or decimal integer");
1229 retval = -EFAULT;
1230 goto out;
1231 }
1232
1233 if (val) {
1234 if (dev->archdata.mapping)
1235 if (dev->archdata.mapping->domain) {
1236 pr_err("Already attached.\n");
1237 retval = -EINVAL;
1238 goto out;
1239 }
1240 if (WARN(dev->archdata.iommu,
1241 "Attachment tracking out of sync with device\n")) {
1242 retval = -EINVAL;
1243 goto out;
1244 }
1245
1246 dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1247 (SZ_1G * 4ULL));
1248
1249 if (!dma_mapping)
1250 goto out;
1251
1252 if (arm_iommu_attach_device(dev, dma_mapping))
1253 goto out_release_mapping;
1254 pr_err("Attached\n");
1255 } else {
1256 if (!dev->archdata.mapping) {
1257 pr_err("No mapping. Did you already attach?\n");
1258 retval = -EINVAL;
1259 goto out;
1260 }
1261 if (!dev->archdata.mapping->domain) {
1262 pr_err("No domain. Did you already attach?\n");
1263 retval = -EINVAL;
1264 goto out;
1265 }
1266 arm_iommu_detach_device(dev);
1267 arm_iommu_release_mapping(dev->archdata.mapping);
1268 pr_err("Detached\n");
1269 }
1270 retval = count;
1271 return retval;
1272
1273out_release_mapping:
1274 arm_iommu_release_mapping(dma_mapping);
1275out:
1276 return retval;
1277}
1278
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001279static ssize_t __iommu_debug_attach_write(struct file *file,
1280 const char __user *ubuf,
1281 size_t count, loff_t *offset,
1282 bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001283{
1284 struct iommu_debug_device *ddev = file->private_data;
Patrick Daly6dd80252017-04-17 20:41:59 -07001285 struct device *dev = ddev->dev;
1286 struct iommu_domain *domain;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001287 ssize_t retval;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001288 int val;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001289
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001290 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1291 pr_err("Invalid format. Expected a hex or decimal integer");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001292 retval = -EFAULT;
1293 goto out;
1294 }
1295
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001296 if (val) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001297 if (ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001298 pr_err("Iommu-Debug is already attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001299 retval = -EINVAL;
1300 goto out;
1301 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001302
1303 domain = iommu_get_domain_for_dev(dev);
1304 if (domain) {
1305 pr_err("Another driver is using this device's iommu\n"
1306 "Iommu-Debug cannot be used concurrently\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001307 retval = -EINVAL;
1308 goto out;
1309 }
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001310 if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001311 retval = -EIO;
1312 goto out;
1313 }
1314 pr_err("Attached\n");
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001315 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001316 if (!ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001317 pr_err("Iommu-Debug is not attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001318 retval = -EINVAL;
1319 goto out;
1320 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001321 iommu_detach_group(ddev->domain, dev->iommu_group);
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001322 iommu_domain_free(ddev->domain);
1323 ddev->domain = NULL;
1324 pr_err("Detached\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001325 }
1326
1327 retval = count;
1328out:
1329 return retval;
1330}
1331
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001332static ssize_t iommu_debug_dma_attach_write(struct file *file,
1333 const char __user *ubuf,
1334 size_t count, loff_t *offset)
1335{
1336 return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
1337
1338}
1339
1340static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
1341 size_t count, loff_t *offset)
1342{
1343 struct iommu_debug_device *ddev = file->private_data;
1344 struct device *dev = ddev->dev;
1345 char c[2];
1346
1347 if (*offset)
1348 return 0;
1349
1350 if (!dev->archdata.mapping)
1351 c[0] = '0';
1352 else
1353 c[0] = dev->archdata.mapping->domain ? '1' : '0';
1354
1355 c[1] = '\n';
1356 if (copy_to_user(ubuf, &c, 2)) {
1357 pr_err("copy_to_user failed\n");
1358 return -EFAULT;
1359 }
1360 *offset = 1; /* non-zero means we're done */
1361
1362 return 2;
1363}
1364
1365static const struct file_operations iommu_debug_dma_attach_fops = {
1366 .open = simple_open,
1367 .write = iommu_debug_dma_attach_write,
1368 .read = iommu_debug_dma_attach_read,
1369};
1370
1371static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
1372 char __user *ubuf,
1373 size_t count, loff_t *offset)
1374{
1375 char buf[100];
1376 ssize_t retval;
1377 size_t buflen;
1378 int buf_len = sizeof(buf);
1379
1380 if (*offset)
1381 return 0;
1382
1383 memset(buf, 0, buf_len);
1384
1385 if (!test_virt_addr)
1386 strlcpy(buf, "FAIL\n", buf_len);
1387 else
1388 snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
1389
1390 buflen = strlen(buf);
1391 if (copy_to_user(ubuf, buf, buflen)) {
1392 pr_err("Couldn't copy_to_user\n");
1393 retval = -EFAULT;
1394 } else {
1395 *offset = 1; /* non-zero means we're done */
1396 retval = buflen;
1397 }
1398
1399 return retval;
1400}
1401
1402static const struct file_operations iommu_debug_test_virt_addr_fops = {
1403 .open = simple_open,
1404 .read = iommu_debug_test_virt_addr_read,
1405};
1406
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001407static ssize_t iommu_debug_attach_write(struct file *file,
1408 const char __user *ubuf,
1409 size_t count, loff_t *offset)
1410{
1411 return __iommu_debug_attach_write(file, ubuf, count, offset,
1412 false);
1413
1414}
1415
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001416static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
1417 size_t count, loff_t *offset)
1418{
1419 struct iommu_debug_device *ddev = file->private_data;
1420 char c[2];
1421
1422 if (*offset)
1423 return 0;
1424
1425 c[0] = ddev->domain ? '1' : '0';
1426 c[1] = '\n';
1427 if (copy_to_user(ubuf, &c, 2)) {
1428 pr_err("copy_to_user failed\n");
1429 return -EFAULT;
1430 }
1431 *offset = 1; /* non-zero means we're done */
1432
1433 return 2;
1434}
1435
1436static const struct file_operations iommu_debug_attach_fops = {
1437 .open = simple_open,
1438 .write = iommu_debug_attach_write,
1439 .read = iommu_debug_attach_read,
1440};
1441
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001442static ssize_t iommu_debug_attach_write_secure(struct file *file,
1443 const char __user *ubuf,
1444 size_t count, loff_t *offset)
1445{
1446 return __iommu_debug_attach_write(file, ubuf, count, offset,
1447 true);
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001448}
1449
1450static const struct file_operations iommu_debug_secure_attach_fops = {
1451 .open = simple_open,
1452 .write = iommu_debug_attach_write_secure,
1453 .read = iommu_debug_attach_read,
1454};
1455
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001456static ssize_t iommu_debug_pte_write(struct file *file,
1457 const char __user *ubuf,
1458 size_t count, loff_t *offset)
1459{
1460 struct iommu_debug_device *ddev = file->private_data;
1461 dma_addr_t iova;
1462
1463 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
1464 pr_err("Invalid format for iova\n");
1465 ddev->iova = 0;
1466 return -EINVAL;
1467 }
1468
1469 ddev->iova = iova;
1470 pr_err("Saved iova=%pa for future PTE commands\n", &iova);
1471 return count;
1472}
1473
1474
1475static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
1476 size_t count, loff_t *offset)
1477{
1478 struct iommu_debug_device *ddev = file->private_data;
1479 struct device *dev = ddev->dev;
1480 uint64_t pte;
1481 char buf[100];
1482 ssize_t retval;
1483 size_t buflen;
1484
1485 if (!dev->archdata.mapping) {
1486 pr_err("No mapping. Did you already attach?\n");
1487 return -EINVAL;
1488 }
1489 if (!dev->archdata.mapping->domain) {
1490 pr_err("No domain. Did you already attach?\n");
1491 return -EINVAL;
1492 }
1493
1494 if (*offset)
1495 return 0;
1496
1497 memset(buf, 0, sizeof(buf));
1498
1499 pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
1500 ddev->iova);
1501
1502 if (!pte)
1503 strlcpy(buf, "FAIL\n", sizeof(buf));
1504 else
1505 snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
1506
1507 buflen = strlen(buf);
1508 if (copy_to_user(ubuf, buf, buflen)) {
1509 pr_err("Couldn't copy_to_user\n");
1510 retval = -EFAULT;
1511 } else {
1512 *offset = 1; /* non-zero means we're done */
1513 retval = buflen;
1514 }
1515
1516 return retval;
1517}
1518
1519static const struct file_operations iommu_debug_pte_fops = {
1520 .open = simple_open,
1521 .write = iommu_debug_pte_write,
1522 .read = iommu_debug_pte_read,
1523};
1524
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001525static ssize_t iommu_debug_atos_write(struct file *file,
1526 const char __user *ubuf,
1527 size_t count, loff_t *offset)
1528{
1529 struct iommu_debug_device *ddev = file->private_data;
1530 dma_addr_t iova;
1531
Susheel Khiania4417e72016-07-12 11:28:32 +05301532 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001533 pr_err("Invalid format for iova\n");
1534 ddev->iova = 0;
1535 return -EINVAL;
1536 }
1537
1538 ddev->iova = iova;
1539 pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
1540 return count;
1541}
1542
1543static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
1544 size_t count, loff_t *offset)
1545{
1546 struct iommu_debug_device *ddev = file->private_data;
1547 phys_addr_t phys;
1548 char buf[100];
1549 ssize_t retval;
1550 size_t buflen;
1551
1552 if (!ddev->domain) {
1553 pr_err("No domain. Did you already attach?\n");
1554 return -EINVAL;
1555 }
1556
1557 if (*offset)
1558 return 0;
1559
1560 memset(buf, 0, 100);
1561
1562 phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001563 if (!phys) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001564 strlcpy(buf, "FAIL\n", 100);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001565 phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
1566 dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
1567 &ddev->iova, &phys);
1568 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001569 snprintf(buf, 100, "%pa\n", &phys);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001570 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001571
1572 buflen = strlen(buf);
1573 if (copy_to_user(ubuf, buf, buflen)) {
1574 pr_err("Couldn't copy_to_user\n");
1575 retval = -EFAULT;
1576 } else {
1577 *offset = 1; /* non-zero means we're done */
1578 retval = buflen;
1579 }
1580
1581 return retval;
1582}
1583
1584static const struct file_operations iommu_debug_atos_fops = {
1585 .open = simple_open,
1586 .write = iommu_debug_atos_write,
1587 .read = iommu_debug_atos_read,
1588};
1589
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001590static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
1591 size_t count, loff_t *offset)
1592{
1593 struct iommu_debug_device *ddev = file->private_data;
1594 struct device *dev = ddev->dev;
1595 phys_addr_t phys;
1596 char buf[100];
1597 ssize_t retval;
1598 size_t buflen;
1599
1600 if (!dev->archdata.mapping) {
1601 pr_err("No mapping. Did you already attach?\n");
1602 return -EINVAL;
1603 }
1604 if (!dev->archdata.mapping->domain) {
1605 pr_err("No domain. Did you already attach?\n");
1606 return -EINVAL;
1607 }
1608
1609 if (*offset)
1610 return 0;
1611
1612 memset(buf, 0, sizeof(buf));
1613
1614 phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
1615 ddev->iova);
1616 if (!phys)
1617 strlcpy(buf, "FAIL\n", sizeof(buf));
1618 else
1619 snprintf(buf, sizeof(buf), "%pa\n", &phys);
1620
1621 buflen = strlen(buf);
1622 if (copy_to_user(ubuf, buf, buflen)) {
1623 pr_err("Couldn't copy_to_user\n");
1624 retval = -EFAULT;
1625 } else {
1626 *offset = 1; /* non-zero means we're done */
1627 retval = buflen;
1628 }
1629
1630 return retval;
1631}
1632
1633static const struct file_operations iommu_debug_dma_atos_fops = {
1634 .open = simple_open,
1635 .write = iommu_debug_atos_write,
1636 .read = iommu_debug_dma_atos_read,
1637};
1638
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001639static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
1640 size_t count, loff_t *offset)
1641{
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301642 ssize_t retval = -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001643 int ret;
1644 char *comma1, *comma2, *comma3;
1645 char buf[100];
1646 dma_addr_t iova;
1647 phys_addr_t phys;
1648 size_t size;
1649 int prot;
1650 struct iommu_debug_device *ddev = file->private_data;
1651
1652 if (count >= 100) {
1653 pr_err("Value too large\n");
1654 return -EINVAL;
1655 }
1656
1657 if (!ddev->domain) {
1658 pr_err("No domain. Did you already attach?\n");
1659 return -EINVAL;
1660 }
1661
1662 memset(buf, 0, 100);
1663
1664 if (copy_from_user(buf, ubuf, count)) {
1665 pr_err("Couldn't copy from user\n");
1666 retval = -EFAULT;
1667 }
1668
1669 comma1 = strnchr(buf, count, ',');
1670 if (!comma1)
1671 goto invalid_format;
1672
1673 comma2 = strnchr(comma1 + 1, count, ',');
1674 if (!comma2)
1675 goto invalid_format;
1676
1677 comma3 = strnchr(comma2 + 1, count, ',');
1678 if (!comma3)
1679 goto invalid_format;
1680
1681 /* split up the words */
1682 *comma1 = *comma2 = *comma3 = '\0';
1683
Susheel Khiania4417e72016-07-12 11:28:32 +05301684 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001685 goto invalid_format;
1686
Susheel Khiania4417e72016-07-12 11:28:32 +05301687 if (kstrtoux(comma1 + 1, 0, &phys))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001688 goto invalid_format;
1689
Susheel Khiania4417e72016-07-12 11:28:32 +05301690 if (kstrtosize_t(comma2 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001691 goto invalid_format;
1692
1693 if (kstrtoint(comma3 + 1, 0, &prot))
1694 goto invalid_format;
1695
1696 ret = iommu_map(ddev->domain, iova, phys, size, prot);
1697 if (ret) {
1698 pr_err("iommu_map failed with %d\n", ret);
1699 retval = -EIO;
1700 goto out;
1701 }
1702
1703 retval = count;
1704 pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
1705 &iova, &phys, size, prot);
1706out:
1707 return retval;
1708
1709invalid_format:
1710 pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
1711 return -EINVAL;
1712}
1713
1714static const struct file_operations iommu_debug_map_fops = {
1715 .open = simple_open,
1716 .write = iommu_debug_map_write,
1717};
1718
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001719/*
1720 * Performs DMA mapping of a given virtual address and size to an iova address.
1721 * User input format: (addr,len,dma attr) where dma attr is:
1722 * 0: normal mapping
1723 * 1: force coherent mapping
1724 * 2: force non-cohernet mapping
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001725 * 3: use system cache
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001726 */
1727static ssize_t iommu_debug_dma_map_write(struct file *file,
1728 const char __user *ubuf, size_t count, loff_t *offset)
1729{
1730 ssize_t retval = -EINVAL;
1731 int ret;
1732 char *comma1, *comma2;
1733 char buf[100];
1734 unsigned long addr;
1735 void *v_addr;
1736 dma_addr_t iova;
1737 size_t size;
1738 unsigned int attr;
1739 unsigned long dma_attrs;
1740 struct iommu_debug_device *ddev = file->private_data;
1741 struct device *dev = ddev->dev;
1742
1743 if (count >= sizeof(buf)) {
1744 pr_err("Value too large\n");
1745 return -EINVAL;
1746 }
1747
1748 if (!dev->archdata.mapping) {
1749 pr_err("No mapping. Did you already attach?\n");
1750 retval = -EINVAL;
1751 goto out;
1752 }
1753 if (!dev->archdata.mapping->domain) {
1754 pr_err("No domain. Did you already attach?\n");
1755 retval = -EINVAL;
1756 goto out;
1757 }
1758
1759 memset(buf, 0, sizeof(buf));
1760
1761 if (copy_from_user(buf, ubuf, count)) {
1762 pr_err("Couldn't copy from user\n");
1763 retval = -EFAULT;
1764 goto out;
1765 }
1766
1767 comma1 = strnchr(buf, count, ',');
1768 if (!comma1)
1769 goto invalid_format;
1770
1771 comma2 = strnchr(comma1 + 1, count, ',');
1772 if (!comma2)
1773 goto invalid_format;
1774
1775 *comma1 = *comma2 = '\0';
1776
1777 if (kstrtoul(buf, 0, &addr))
1778 goto invalid_format;
1779 v_addr = (void *)addr;
1780
1781 if (kstrtosize_t(comma1 + 1, 0, &size))
1782 goto invalid_format;
1783
1784 if (kstrtouint(comma2 + 1, 0, &attr))
1785 goto invalid_format;
1786
1787 if (v_addr < test_virt_addr || v_addr > (test_virt_addr + SZ_1M - 1))
1788 goto invalid_addr;
1789
1790 if (attr == 0)
1791 dma_attrs = 0;
1792 else if (attr == 1)
1793 dma_attrs = DMA_ATTR_FORCE_COHERENT;
1794 else if (attr == 2)
1795 dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001796 else if (attr == 3)
1797 dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001798 else
1799 goto invalid_format;
1800
1801 iova = dma_map_single_attrs(dev, v_addr, size,
1802 DMA_TO_DEVICE, dma_attrs);
1803
1804 if (dma_mapping_error(dev, iova)) {
1805 pr_err("Failed to perform dma_map_single\n");
1806 ret = -EINVAL;
1807 goto out;
1808 }
1809
1810 retval = count;
1811 pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
1812 v_addr, &iova, size);
1813 ddev->iova = iova;
1814 pr_err("Saved iova=%pa for future PTE commands\n", &iova);
1815out:
1816 return retval;
1817
1818invalid_format:
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001819 pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001820 return retval;
1821
1822invalid_addr:
1823 pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
1824 return retval;
1825}
1826
1827static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
1828 size_t count, loff_t *offset)
1829{
1830 struct iommu_debug_device *ddev = file->private_data;
1831 struct device *dev = ddev->dev;
1832 char buf[100];
1833 ssize_t retval;
1834 size_t buflen;
1835 dma_addr_t iova;
1836
1837 if (!dev->archdata.mapping) {
1838 pr_err("No mapping. Did you already attach?\n");
1839 return -EINVAL;
1840 }
1841 if (!dev->archdata.mapping->domain) {
1842 pr_err("No domain. Did you already attach?\n");
1843 return -EINVAL;
1844 }
1845
1846 if (*offset)
1847 return 0;
1848
1849 memset(buf, 0, sizeof(buf));
1850
1851 iova = ddev->iova;
1852 snprintf(buf, sizeof(buf), "%pa\n", &iova);
1853
1854 buflen = strlen(buf);
1855 if (copy_to_user(ubuf, buf, buflen)) {
1856 pr_err("Couldn't copy_to_user\n");
1857 retval = -EFAULT;
1858 } else {
1859 *offset = 1; /* non-zero means we're done */
1860 retval = buflen;
1861 }
1862
1863 return retval;
1864}
1865
1866static const struct file_operations iommu_debug_dma_map_fops = {
1867 .open = simple_open,
1868 .write = iommu_debug_dma_map_write,
1869 .read = iommu_debug_dma_map_read,
1870};
1871
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001872static ssize_t iommu_debug_unmap_write(struct file *file,
1873 const char __user *ubuf,
1874 size_t count, loff_t *offset)
1875{
1876 ssize_t retval = 0;
1877 char *comma1;
1878 char buf[100];
1879 dma_addr_t iova;
1880 size_t size;
1881 size_t unmapped;
1882 struct iommu_debug_device *ddev = file->private_data;
1883
1884 if (count >= 100) {
1885 pr_err("Value too large\n");
1886 return -EINVAL;
1887 }
1888
1889 if (!ddev->domain) {
1890 pr_err("No domain. Did you already attach?\n");
1891 return -EINVAL;
1892 }
1893
1894 memset(buf, 0, 100);
1895
1896 if (copy_from_user(buf, ubuf, count)) {
1897 pr_err("Couldn't copy from user\n");
1898 retval = -EFAULT;
1899 goto out;
1900 }
1901
1902 comma1 = strnchr(buf, count, ',');
1903 if (!comma1)
1904 goto invalid_format;
1905
1906 /* split up the words */
1907 *comma1 = '\0';
1908
Susheel Khiania4417e72016-07-12 11:28:32 +05301909 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001910 goto invalid_format;
1911
Susheel Khiania4417e72016-07-12 11:28:32 +05301912 if (kstrtosize_t(comma1 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001913 goto invalid_format;
1914
1915 unmapped = iommu_unmap(ddev->domain, iova, size);
1916 if (unmapped != size) {
1917 pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
1918 size, unmapped);
1919 return -EIO;
1920 }
1921
1922 retval = count;
1923 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
1924out:
1925 return retval;
1926
1927invalid_format:
1928 pr_err("Invalid format. Expected: iova,len\n");
Patrick Daly5a5e3ff2016-10-13 19:31:50 -07001929 return -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001930}
1931
1932static const struct file_operations iommu_debug_unmap_fops = {
1933 .open = simple_open,
1934 .write = iommu_debug_unmap_write,
1935};
1936
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001937static ssize_t iommu_debug_dma_unmap_write(struct file *file,
1938 const char __user *ubuf,
1939 size_t count, loff_t *offset)
1940{
1941 ssize_t retval = 0;
1942 char *comma1, *comma2;
1943 char buf[100];
1944 size_t size;
1945 unsigned int attr;
1946 dma_addr_t iova;
1947 unsigned long dma_attrs;
1948 struct iommu_debug_device *ddev = file->private_data;
1949 struct device *dev = ddev->dev;
1950
1951 if (count >= sizeof(buf)) {
1952 pr_err("Value too large\n");
1953 return -EINVAL;
1954 }
1955
1956 if (!dev->archdata.mapping) {
1957 pr_err("No mapping. Did you already attach?\n");
1958 retval = -EINVAL;
1959 goto out;
1960 }
1961 if (!dev->archdata.mapping->domain) {
1962 pr_err("No domain. Did you already attach?\n");
1963 retval = -EINVAL;
1964 goto out;
1965 }
1966
1967 memset(buf, 0, sizeof(buf));
1968
1969 if (copy_from_user(buf, ubuf, count)) {
1970 pr_err("Couldn't copy from user\n");
1971 retval = -EFAULT;
1972 goto out;
1973 }
1974
1975 comma1 = strnchr(buf, count, ',');
1976 if (!comma1)
1977 goto invalid_format;
1978
1979 comma2 = strnchr(comma1 + 1, count, ',');
1980 if (!comma2)
1981 goto invalid_format;
1982
1983 *comma1 = *comma2 = '\0';
1984
1985 if (kstrtoux(buf, 0, &iova))
1986 goto invalid_format;
1987
1988 if (kstrtosize_t(comma1 + 1, 0, &size))
1989 goto invalid_format;
1990
1991 if (kstrtouint(comma2 + 1, 0, &attr))
1992 goto invalid_format;
1993
1994 if (attr == 0)
1995 dma_attrs = 0;
1996 else if (attr == 1)
1997 dma_attrs = DMA_ATTR_FORCE_COHERENT;
1998 else if (attr == 2)
1999 dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07002000 else if (attr == 3)
2001 dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002002 else
2003 goto invalid_format;
2004
2005 dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
2006
2007 retval = count;
2008 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
2009out:
2010 return retval;
2011
2012invalid_format:
2013 pr_err("Invalid format. Expected: iova,len, dma attr\n");
2014 return retval;
2015}
2016
2017static const struct file_operations iommu_debug_dma_unmap_fops = {
2018 .open = simple_open,
2019 .write = iommu_debug_dma_unmap_write,
2020};
2021
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002022static ssize_t iommu_debug_config_clocks_write(struct file *file,
2023 const char __user *ubuf,
2024 size_t count, loff_t *offset)
2025{
2026 char buf;
2027 struct iommu_debug_device *ddev = file->private_data;
2028 struct device *dev = ddev->dev;
2029
2030 /* we're expecting a single character plus (optionally) a newline */
2031 if (count > 2) {
2032 dev_err(dev, "Invalid value\n");
2033 return -EINVAL;
2034 }
2035
2036 if (!ddev->domain) {
2037 dev_err(dev, "No domain. Did you already attach?\n");
2038 return -EINVAL;
2039 }
2040
2041 if (copy_from_user(&buf, ubuf, 1)) {
2042 dev_err(dev, "Couldn't copy from user\n");
2043 return -EFAULT;
2044 }
2045
2046 switch (buf) {
2047 case '0':
2048 dev_err(dev, "Disabling config clocks\n");
2049 iommu_disable_config_clocks(ddev->domain);
2050 break;
2051 case '1':
2052 dev_err(dev, "Enabling config clocks\n");
2053 if (iommu_enable_config_clocks(ddev->domain))
2054 dev_err(dev, "Failed!\n");
2055 break;
2056 default:
2057 dev_err(dev, "Invalid value. Should be 0 or 1.\n");
2058 return -EINVAL;
2059 }
2060
2061 return count;
2062}
2063
2064static const struct file_operations iommu_debug_config_clocks_fops = {
2065 .open = simple_open,
2066 .write = iommu_debug_config_clocks_write,
2067};
2068
Patrick Daly9438f322017-04-05 18:03:19 -07002069static ssize_t iommu_debug_trigger_fault_write(
2070 struct file *file, const char __user *ubuf, size_t count,
2071 loff_t *offset)
2072{
2073 struct iommu_debug_device *ddev = file->private_data;
2074 unsigned long flags;
2075
2076 if (!ddev->domain) {
2077 pr_err("No domain. Did you already attach?\n");
2078 return -EINVAL;
2079 }
2080
2081 if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
2082 pr_err("Invalid flags format\n");
2083 return -EFAULT;
2084 }
2085
2086 iommu_trigger_fault(ddev->domain, flags);
2087
2088 return count;
2089}
2090
2091static const struct file_operations iommu_debug_trigger_fault_fops = {
2092 .open = simple_open,
2093 .write = iommu_debug_trigger_fault_write,
2094};
2095
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002096/*
2097 * The following will only work for drivers that implement the generic
2098 * device tree bindings described in
2099 * Documentation/devicetree/bindings/iommu/iommu.txt
2100 */
2101static int snarf_iommu_devices(struct device *dev, void *ignored)
2102{
2103 struct iommu_debug_device *ddev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002104 struct dentry *dir;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002105
2106 if (!of_find_property(dev->of_node, "iommus", NULL))
2107 return 0;
2108
Patrick Daly6dd80252017-04-17 20:41:59 -07002109 /* Hold a reference count */
2110 if (!iommu_group_get(dev))
2111 return 0;
2112
Mitchel Humpherys89924fd2015-07-09 14:50:22 -07002113 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002114 if (!ddev)
2115 return -ENODEV;
2116 ddev->dev = dev;
2117 dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
2118 if (!dir) {
2119 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
2120 dev_name(dev));
2121 goto err;
2122 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002123
Patrick Dalye4e39862015-11-20 20:00:50 -08002124 if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
2125 &iommu_debug_nr_iters_ops)) {
2126 pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
2127 dev_name(dev));
2128 goto err_rmdir;
2129 }
2130
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002131 if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
2132 &iommu_debug_test_virt_addr_fops)) {
2133 pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
2134 dev_name(dev));
2135 goto err_rmdir;
2136 }
2137
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002138 if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
2139 &iommu_debug_profiling_fops)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002140 pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
2141 dev_name(dev));
2142 goto err_rmdir;
2143 }
2144
Mitchel Humpherys020f90f2015-10-02 16:02:31 -07002145 if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
2146 &iommu_debug_secure_profiling_fops)) {
2147 pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
2148 dev_name(dev));
2149 goto err_rmdir;
2150 }
2151
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -07002152 if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
2153 &iommu_debug_profiling_fast_fops)) {
2154 pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
2155 dev_name(dev));
2156 goto err_rmdir;
2157 }
2158
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -07002159 if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
2160 &iommu_debug_profiling_fast_dma_api_fops)) {
2161 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
2162 dev_name(dev));
2163 goto err_rmdir;
2164 }
2165
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08002166 if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
2167 &iommu_debug_functional_fast_dma_api_fops)) {
2168 pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
2169 dev_name(dev));
2170 goto err_rmdir;
2171 }
2172
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08002173 if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
2174 &iommu_debug_functional_arm_dma_api_fops)) {
2175 pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
2176 dev_name(dev));
2177 goto err_rmdir;
2178 }
2179
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002180 if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
2181 &iommu_debug_dma_attach_fops)) {
2182 pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
2183 dev_name(dev));
2184 goto err_rmdir;
2185 }
2186
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002187 if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
2188 &iommu_debug_attach_fops)) {
2189 pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
2190 dev_name(dev));
2191 goto err_rmdir;
2192 }
2193
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07002194 if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
2195 &iommu_debug_secure_attach_fops)) {
2196 pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
2197 dev_name(dev));
2198 goto err_rmdir;
2199 }
2200
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002201 if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
2202 &iommu_debug_atos_fops)) {
2203 pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
2204 dev_name(dev));
2205 goto err_rmdir;
2206 }
2207
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002208 if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
2209 &iommu_debug_dma_atos_fops)) {
2210 pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
2211 dev_name(dev));
2212 goto err_rmdir;
2213 }
2214
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002215 if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
2216 &iommu_debug_map_fops)) {
2217 pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
2218 dev_name(dev));
2219 goto err_rmdir;
2220 }
2221
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002222 if (!debugfs_create_file("dma_map", 0600, dir, ddev,
2223 &iommu_debug_dma_map_fops)) {
2224 pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
2225 dev_name(dev));
2226 goto err_rmdir;
2227 }
2228
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002229 if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
2230 &iommu_debug_unmap_fops)) {
2231 pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
2232 dev_name(dev));
2233 goto err_rmdir;
2234 }
2235
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002236 if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
2237 &iommu_debug_dma_unmap_fops)) {
2238 pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
2239 dev_name(dev));
2240 goto err_rmdir;
2241 }
2242
2243 if (!debugfs_create_file("pte", 0600, dir, ddev,
2244 &iommu_debug_pte_fops)) {
2245 pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
2246 dev_name(dev));
2247 goto err_rmdir;
2248 }
2249
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002250 if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
2251 &iommu_debug_config_clocks_fops)) {
2252 pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
2253 dev_name(dev));
2254 goto err_rmdir;
2255 }
2256
Patrick Daly9438f322017-04-05 18:03:19 -07002257 if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
2258 &iommu_debug_trigger_fault_fops)) {
2259 pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
2260 dev_name(dev));
2261 goto err_rmdir;
2262 }
2263
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002264 list_add(&ddev->list, &iommu_debug_devices);
2265 return 0;
2266
2267err_rmdir:
2268 debugfs_remove_recursive(dir);
2269err:
2270 kfree(ddev);
2271 return 0;
2272}
2273
2274static int iommu_debug_init_tests(void)
2275{
2276 debugfs_tests_dir = debugfs_create_dir("tests",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002277 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002278 if (!debugfs_tests_dir) {
2279 pr_err("Couldn't create iommu/tests debugfs directory\n");
2280 return -ENODEV;
2281 }
2282
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002283 test_virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
2284
2285 if (!test_virt_addr)
2286 return -ENOMEM;
2287
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002288 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
2289 snarf_iommu_devices);
2290}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002291
2292static void iommu_debug_destroy_tests(void)
2293{
2294 debugfs_remove_recursive(debugfs_tests_dir);
2295}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002296#else
2297static inline int iommu_debug_init_tests(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002298static inline void iommu_debug_destroy_tests(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002299#endif
2300
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002301/*
2302 * This isn't really a "driver", we just need something in the device tree
2303 * so that our tests can run without any client drivers, and our tests rely
2304 * on parsing the device tree for nodes with the `iommus' property.
2305 */
2306static int iommu_debug_pass(struct platform_device *pdev)
2307{
2308 return 0;
2309}
2310
2311static const struct of_device_id iommu_debug_of_match[] = {
2312 { .compatible = "iommu-debug-test" },
2313 { },
2314};
2315
2316static struct platform_driver iommu_debug_driver = {
2317 .probe = iommu_debug_pass,
2318 .remove = iommu_debug_pass,
2319 .driver = {
2320 .name = "iommu-debug",
2321 .of_match_table = iommu_debug_of_match,
2322 },
2323};
2324
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002325static int iommu_debug_init(void)
2326{
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002327 if (iommu_debug_init_tests())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002328 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002329
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002330 return platform_driver_register(&iommu_debug_driver);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002331}
2332
2333static void iommu_debug_exit(void)
2334{
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002335 platform_driver_unregister(&iommu_debug_driver);
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002336 iommu_debug_destroy_tests();
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002337}
2338
2339module_init(iommu_debug_init);
2340module_exit(iommu_debug_exit);