blob: 22a708e2d67e49bdf1d9751b390748184e79ebf2 [file] [log] [blame]
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001/*
Charan Teja Reddy29f61402017-02-09 20:44:29 +05302 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
16
17#include <linux/debugfs.h>
18#include <linux/device.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/module.h>
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -070024#include <linux/uaccess.h>
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070025#include <linux/dma-contiguous.h>
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070026#include <soc/qcom/secure_buffer.h>
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -070027#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma-iommu.h>
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070030
Susheel Khiania4417e72016-07-12 11:28:32 +053031#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
32
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070033static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
34{
35 switch (attr) {
36 case DOMAIN_ATTR_GEOMETRY:
37 return "DOMAIN_ATTR_GEOMETRY";
38 case DOMAIN_ATTR_PAGING:
39 return "DOMAIN_ATTR_PAGING";
40 case DOMAIN_ATTR_WINDOWS:
41 return "DOMAIN_ATTR_WINDOWS";
42 case DOMAIN_ATTR_FSL_PAMU_STASH:
43 return "DOMAIN_ATTR_FSL_PAMU_STASH";
44 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
45 return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
46 case DOMAIN_ATTR_FSL_PAMUV1:
47 return "DOMAIN_ATTR_FSL_PAMUV1";
48 case DOMAIN_ATTR_NESTING:
49 return "DOMAIN_ATTR_NESTING";
50 case DOMAIN_ATTR_PT_BASE_ADDR:
51 return "DOMAIN_ATTR_PT_BASE_ADDR";
52 case DOMAIN_ATTR_SECURE_VMID:
53 return "DOMAIN_ATTR_SECURE_VMID";
54 case DOMAIN_ATTR_ATOMIC:
55 return "DOMAIN_ATTR_ATOMIC";
56 case DOMAIN_ATTR_CONTEXT_BANK:
57 return "DOMAIN_ATTR_CONTEXT_BANK";
58 case DOMAIN_ATTR_TTBR0:
59 return "DOMAIN_ATTR_TTBR0";
60 case DOMAIN_ATTR_CONTEXTIDR:
61 return "DOMAIN_ATTR_CONTEXTIDR";
62 case DOMAIN_ATTR_PROCID:
63 return "DOMAIN_ATTR_PROCID";
64 case DOMAIN_ATTR_DYNAMIC:
65 return "DOMAIN_ATTR_DYNAMIC";
66 case DOMAIN_ATTR_NON_FATAL_FAULTS:
67 return "DOMAIN_ATTR_NON_FATAL_FAULTS";
68 case DOMAIN_ATTR_S1_BYPASS:
69 return "DOMAIN_ATTR_S1_BYPASS";
70 case DOMAIN_ATTR_FAST:
71 return "DOMAIN_ATTR_FAST";
Patrick Dalyef6c1dc2016-11-16 14:35:23 -080072 case DOMAIN_ATTR_EARLY_MAP:
73 return "DOMAIN_ATTR_EARLY_MAP";
Charan Teja Reddyc682e472017-04-20 19:11:20 +053074 case DOMAIN_ATTR_CB_STALL_DISABLE:
75 return "DOMAIN_ATTR_CB_STALL_DISABLE";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070076 default:
77 return "Unknown attr!";
78 }
79}
Susheel Khiania4417e72016-07-12 11:28:32 +053080#endif
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070081
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070082#ifdef CONFIG_IOMMU_DEBUG_TRACKING
83
84static DEFINE_MUTEX(iommu_debug_attachments_lock);
85static LIST_HEAD(iommu_debug_attachments);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070086
Patrick Dalyee7a25f2017-04-05 18:05:02 -070087/*
88 * Each group may have more than one domain; but each domain may
89 * only have one group.
90 * Used by debug tools to display the name of the device(s) associated
91 * with a particular domain.
92 */
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070093struct iommu_debug_attachment {
94 struct iommu_domain *domain;
Patrick Dalyee7a25f2017-04-05 18:05:02 -070095 struct iommu_group *group;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070096 struct list_head list;
97};
98
Susheel Khianie66aa5b2015-08-25 17:25:42 +053099void iommu_debug_attach_device(struct iommu_domain *domain,
100 struct device *dev)
101{
102 struct iommu_debug_attachment *attach;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700103 struct iommu_group *group;
104
Patrick Daly35af1bb2017-09-29 16:09:05 -0700105 group = dev->iommu_group;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700106 if (!group)
107 return;
108
Patrick Daly35af1bb2017-09-29 16:09:05 -0700109 mutex_lock(&iommu_debug_attachments_lock);
110 list_for_each_entry(attach, &iommu_debug_attachments, list)
111 if ((attach->domain == domain) && (attach->group == group))
112 goto out;
113
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700114 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
115 if (!attach)
Patrick Daly35af1bb2017-09-29 16:09:05 -0700116 goto out;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700117
118 attach->domain = domain;
119 attach->group = group;
120 INIT_LIST_HEAD(&attach->list);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530121
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700122 list_add(&attach->list, &iommu_debug_attachments);
Patrick Daly35af1bb2017-09-29 16:09:05 -0700123out:
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700124 mutex_unlock(&iommu_debug_attachments_lock);
125}
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530126
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700127void iommu_debug_domain_remove(struct iommu_domain *domain)
128{
129 struct iommu_debug_attachment *it, *tmp;
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530130
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700131 mutex_lock(&iommu_debug_attachments_lock);
132 list_for_each_entry_safe(it, tmp, &iommu_debug_attachments, list) {
133 if (it->domain != domain)
134 continue;
135 list_del(&it->list);
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700136 kfree(it);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530137 }
138
139 mutex_unlock(&iommu_debug_attachments_lock);
140}
141
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700142#endif
143
144#ifdef CONFIG_IOMMU_TESTS
145
Susheel Khiania4417e72016-07-12 11:28:32 +0530146#ifdef CONFIG_64BIT
147
148#define kstrtoux kstrtou64
Patrick Daly9ef01862016-10-13 20:03:50 -0700149#define kstrtox_from_user kstrtoull_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530150#define kstrtosize_t kstrtoul
151
152#else
153
154#define kstrtoux kstrtou32
Patrick Daly9ef01862016-10-13 20:03:50 -0700155#define kstrtox_from_user kstrtouint_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530156#define kstrtosize_t kstrtouint
157
158#endif
159
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700160static LIST_HEAD(iommu_debug_devices);
161static struct dentry *debugfs_tests_dir;
Patrick Dalye4e39862015-11-20 20:00:50 -0800162static u32 iters_per_op = 1;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700163static void *test_virt_addr;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700164
165struct iommu_debug_device {
166 struct device *dev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700167 struct iommu_domain *domain;
Patrick Daly727fcc62017-11-13 19:27:08 -0800168 struct dma_iommu_mapping *mapping;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700169 u64 iova;
170 u64 phys;
171 size_t len;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700172 struct list_head list;
173};
174
175static int iommu_debug_build_phoney_sg_table(struct device *dev,
176 struct sg_table *table,
177 unsigned long total_size,
178 unsigned long chunk_size)
179{
180 unsigned long nents = total_size / chunk_size;
181 struct scatterlist *sg;
182 int i;
183 struct page *page;
184
185 if (!IS_ALIGNED(total_size, PAGE_SIZE))
186 return -EINVAL;
187 if (!IS_ALIGNED(total_size, chunk_size))
188 return -EINVAL;
189 if (sg_alloc_table(table, nents, GFP_KERNEL))
190 return -EINVAL;
191 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
192 if (!page)
193 goto free_table;
194
195 /* all the same page... why not. */
196 for_each_sg(table->sgl, sg, table->nents, i)
197 sg_set_page(sg, page, chunk_size, 0);
198
199 return 0;
200
201free_table:
202 sg_free_table(table);
203 return -ENOMEM;
204}
205
206static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
207 struct sg_table *table,
208 unsigned long chunk_size)
209{
210 __free_pages(sg_page(table->sgl), get_order(chunk_size));
211 sg_free_table(table);
212}
213
214static const char * const _size_to_string(unsigned long size)
215{
216 switch (size) {
217 case SZ_4K:
218 return "4K";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700219 case SZ_8K:
220 return "8K";
221 case SZ_16K:
222 return "16K";
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700223 case SZ_64K:
224 return "64K";
225 case SZ_2M:
226 return "2M";
227 case SZ_1M * 12:
228 return "12M";
229 case SZ_1M * 20:
230 return "20M";
231 }
232 return "unknown size, please add to _size_to_string";
233}
234
Patrick Dalye4e39862015-11-20 20:00:50 -0800235static int nr_iters_set(void *data, u64 val)
236{
237 if (!val)
238 val = 1;
239 if (val > 10000)
240 val = 10000;
241 *(u32 *)data = val;
242 return 0;
243}
244
245static int nr_iters_get(void *data, u64 *val)
246{
247 *val = *(u32 *)data;
248 return 0;
249}
250
251DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
252 nr_iters_get, nr_iters_set, "%llu\n");
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700253
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700254static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700255 enum iommu_attr attrs[],
256 void *attr_values[], int nattrs,
Susheel Khiania4417e72016-07-12 11:28:32 +0530257 const size_t sizes[])
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700258{
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700259 int i;
Susheel Khiania4417e72016-07-12 11:28:32 +0530260 const size_t *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700261 struct iommu_domain *domain;
262 unsigned long iova = 0x10000;
263 phys_addr_t paddr = 0xa000;
264
265 domain = iommu_domain_alloc(&platform_bus_type);
266 if (!domain) {
267 seq_puts(s, "Couldn't allocate domain\n");
268 return;
269 }
270
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700271 seq_puts(s, "Domain attributes: [ ");
272 for (i = 0; i < nattrs; ++i) {
273 /* not all attrs are ints, but this will get us by for now */
274 seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
275 *((int *)attr_values[i]),
276 i < nattrs ? " " : "");
Mitchel Humpherys679567c2015-08-28 10:51:24 -0700277 }
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700278 seq_puts(s, "]\n");
279 for (i = 0; i < nattrs; ++i) {
280 if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
281 seq_printf(s, "Couldn't set %d to the value at %p\n",
282 attrs[i], attr_values[i]);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700283 goto out_domain_free;
284 }
285 }
286
Patrick Daly6dd80252017-04-17 20:41:59 -0700287 if (iommu_attach_group(domain, dev->iommu_group)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700288 seq_puts(s,
289 "Couldn't attach new domain to device. Is it already attached?\n");
290 goto out_domain_free;
291 }
292
Patrick Dalye4e39862015-11-20 20:00:50 -0800293 seq_printf(s, "(average over %d iterations)\n", iters_per_op);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800294 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700295 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530296 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700297 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800298 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700299 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800300 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700301 struct timespec tbefore, tafter, diff;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700302 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700303
Patrick Dalye4e39862015-11-20 20:00:50 -0800304 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700305 getnstimeofday(&tbefore);
306 if (iommu_map(domain, iova, paddr, size,
307 IOMMU_READ | IOMMU_WRITE)) {
308 seq_puts(s, "Failed to map\n");
309 continue;
310 }
311 getnstimeofday(&tafter);
312 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800313 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700314
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700315 getnstimeofday(&tbefore);
316 unmapped = iommu_unmap(domain, iova, size);
317 if (unmapped != size) {
318 seq_printf(s,
319 "Only unmapped %zx instead of %zx\n",
320 unmapped, size);
321 continue;
322 }
323 getnstimeofday(&tafter);
324 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800325 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700326 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700327
Susheel Khiania4417e72016-07-12 11:28:32 +0530328 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
329 &map_elapsed_rem);
330 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
331 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700332
Patrick Daly3ca31e32015-11-20 20:33:04 -0800333 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
334 &map_elapsed_rem);
335 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
336 &unmap_elapsed_rem);
337
338 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
339 _size_to_string(size),
340 map_elapsed_us, map_elapsed_rem,
341 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700342 }
343
344 seq_putc(s, '\n');
Patrick Daly3ca31e32015-11-20 20:33:04 -0800345 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700346 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530347 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700348 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800349 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700350 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800351 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700352 struct timespec tbefore, tafter, diff;
353 struct sg_table table;
354 unsigned long chunk_size = SZ_4K;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700355 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700356
357 if (iommu_debug_build_phoney_sg_table(dev, &table, size,
358 chunk_size)) {
359 seq_puts(s,
360 "couldn't build phoney sg table! bailing...\n");
361 goto out_detach;
362 }
363
Patrick Dalye4e39862015-11-20 20:00:50 -0800364 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700365 getnstimeofday(&tbefore);
366 if (iommu_map_sg(domain, iova, table.sgl, table.nents,
367 IOMMU_READ | IOMMU_WRITE) != size) {
368 seq_puts(s, "Failed to map_sg\n");
369 goto next;
370 }
371 getnstimeofday(&tafter);
372 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800373 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700374
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700375 getnstimeofday(&tbefore);
376 unmapped = iommu_unmap(domain, iova, size);
377 if (unmapped != size) {
378 seq_printf(s,
379 "Only unmapped %zx instead of %zx\n",
380 unmapped, size);
381 goto next;
382 }
383 getnstimeofday(&tafter);
384 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800385 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700386 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700387
Susheel Khiania4417e72016-07-12 11:28:32 +0530388 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
389 &map_elapsed_rem);
390 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
391 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700392
Patrick Daly3ca31e32015-11-20 20:33:04 -0800393 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
394 &map_elapsed_rem);
395 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
396 &unmap_elapsed_rem);
397
398 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
399 _size_to_string(size),
400 map_elapsed_us, map_elapsed_rem,
401 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700402
403next:
404 iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
405 }
406
407out_detach:
Patrick Daly6dd80252017-04-17 20:41:59 -0700408 iommu_detach_group(domain, dev->iommu_group);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700409out_domain_free:
410 iommu_domain_free(domain);
411}
412
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700413static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700414{
415 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530416 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700417 SZ_1M * 20, 0 };
418 enum iommu_attr attrs[] = {
419 DOMAIN_ATTR_ATOMIC,
420 };
421 int htw_disable = 1, atomic = 1;
422 void *attr_values[] = { &htw_disable, &atomic };
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700423
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700424 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
425 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700426
427 return 0;
428}
429
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700430static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700431{
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700432 return single_open(file, iommu_debug_profiling_show, inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700433}
434
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700435static const struct file_operations iommu_debug_profiling_fops = {
436 .open = iommu_debug_profiling_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700437 .read = seq_read,
438 .llseek = seq_lseek,
439 .release = single_release,
440};
441
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700442static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
443{
444 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530445 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700446 SZ_1M * 20, 0 };
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700447
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700448 enum iommu_attr attrs[] = {
449 DOMAIN_ATTR_ATOMIC,
450 DOMAIN_ATTR_SECURE_VMID,
451 };
452 int one = 1, secure_vmid = VMID_CP_PIXEL;
453 void *attr_values[] = { &one, &secure_vmid };
454
455 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
456 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700457
458 return 0;
459}
460
461static int iommu_debug_secure_profiling_open(struct inode *inode,
462 struct file *file)
463{
464 return single_open(file, iommu_debug_secure_profiling_show,
465 inode->i_private);
466}
467
468static const struct file_operations iommu_debug_secure_profiling_fops = {
469 .open = iommu_debug_secure_profiling_open,
470 .read = seq_read,
471 .llseek = seq_lseek,
472 .release = single_release,
473};
474
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700475static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
476{
477 struct iommu_debug_device *ddev = s->private;
478 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
479 enum iommu_attr attrs[] = {
480 DOMAIN_ATTR_FAST,
481 DOMAIN_ATTR_ATOMIC,
482 };
483 int one = 1;
484 void *attr_values[] = { &one, &one };
485
486 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
487 ARRAY_SIZE(attrs), sizes);
488
489 return 0;
490}
491
492static int iommu_debug_profiling_fast_open(struct inode *inode,
493 struct file *file)
494{
495 return single_open(file, iommu_debug_profiling_fast_show,
496 inode->i_private);
497}
498
499static const struct file_operations iommu_debug_profiling_fast_fops = {
500 .open = iommu_debug_profiling_fast_open,
501 .read = seq_read,
502 .llseek = seq_lseek,
503 .release = single_release,
504};
505
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700506static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
507 void *ignored)
508{
509 int i, experiment;
510 struct iommu_debug_device *ddev = s->private;
511 struct device *dev = ddev->dev;
512 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
513 struct dma_iommu_mapping *mapping;
514 dma_addr_t dma_addr;
515 void *virt;
516 int fast = 1;
517 const char * const extra_labels[] = {
518 "not coherent",
519 "coherent",
520 };
521 unsigned long extra_attrs[] = {
522 0,
523 DMA_ATTR_SKIP_CPU_SYNC,
524 };
525
526 virt = kmalloc(1518, GFP_KERNEL);
527 if (!virt)
528 goto out;
529
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530530 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700531 if (!mapping) {
532 seq_puts(s, "fast_smmu_create_mapping failed\n");
533 goto out_kfree;
534 }
535
536 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
537 seq_puts(s, "iommu_domain_set_attr failed\n");
538 goto out_release_mapping;
539 }
540
541 if (arm_iommu_attach_device(dev, mapping)) {
542 seq_puts(s, "fast_smmu_attach_device failed\n");
543 goto out_release_mapping;
544 }
545
546 if (iommu_enable_config_clocks(mapping->domain)) {
547 seq_puts(s, "Couldn't enable clocks\n");
548 goto out_detach;
549 }
550 for (experiment = 0; experiment < 2; ++experiment) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530551 size_t map_avg = 0, unmap_avg = 0;
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700552
553 for (i = 0; i < 10; ++i) {
554 struct timespec tbefore, tafter, diff;
555 u64 ns;
556
557 getnstimeofday(&tbefore);
558 dma_addr = dma_map_single_attrs(
559 dev, virt, SZ_4K, DMA_TO_DEVICE,
560 extra_attrs[experiment]);
561 getnstimeofday(&tafter);
562 diff = timespec_sub(tafter, tbefore);
563 ns = timespec_to_ns(&diff);
564 if (dma_mapping_error(dev, dma_addr)) {
565 seq_puts(s, "dma_map_single failed\n");
566 goto out_disable_config_clocks;
567 }
568 map_elapsed_ns[i] = ns;
569
570 getnstimeofday(&tbefore);
571 dma_unmap_single_attrs(
572 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
573 extra_attrs[experiment]);
574 getnstimeofday(&tafter);
575 diff = timespec_sub(tafter, tbefore);
576 ns = timespec_to_ns(&diff);
577 unmap_elapsed_ns[i] = ns;
578 }
579
580 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
581 "dma_map_single_attrs");
582 for (i = 0; i < 10; ++i) {
583 map_avg += map_elapsed_ns[i];
584 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
585 i < 9 ? ", " : "");
586 }
587 map_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530588 seq_printf(s, "] (avg: %zu)\n", map_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700589
590 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
591 "dma_unmap_single_attrs");
592 for (i = 0; i < 10; ++i) {
593 unmap_avg += unmap_elapsed_ns[i];
594 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
595 i < 9 ? ", " : "");
596 }
597 unmap_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530598 seq_printf(s, "] (avg: %zu)\n", unmap_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700599 }
600
601out_disable_config_clocks:
602 iommu_disable_config_clocks(mapping->domain);
603out_detach:
604 arm_iommu_detach_device(dev);
605out_release_mapping:
606 arm_iommu_release_mapping(mapping);
607out_kfree:
608 kfree(virt);
609out:
610 return 0;
611}
612
613static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
614 struct file *file)
615{
616 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
617 inode->i_private);
618}
619
620static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
621 .open = iommu_debug_profiling_fast_dma_api_open,
622 .read = seq_read,
623 .llseek = seq_lseek,
624 .release = single_release,
625};
626
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800627static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
628{
629 int i, ret = 0;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530630 u64 iova;
631 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800632 void *virt;
633 phys_addr_t phys;
634 dma_addr_t dma_addr;
635
636 /*
637 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
638 * chunk that we can work with.
639 */
640 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
641 phys = virt_to_phys(virt);
642
643 /* fill the whole 4GB space */
644 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
645 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
646 if (dma_addr == DMA_ERROR_CODE) {
647 dev_err(dev, "Failed map on iter %d\n", i);
648 ret = -EINVAL;
649 goto out;
650 }
651 }
652
653 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
654 dev_err(dev,
655 "dma_map_single unexpectedly (VA should have been exhausted)\n");
656 ret = -EINVAL;
657 goto out;
658 }
659
660 /*
661 * free up 4K at the very beginning, then leave one 4K mapping,
662 * then free up 8K. This will result in the next 8K map to skip
663 * over the 4K hole and take the 8K one.
664 */
665 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
666 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
667 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
668
669 /* remap 8K */
670 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
671 if (dma_addr != SZ_8K) {
672 dma_addr_t expected = SZ_8K;
673
674 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
675 &dma_addr, &expected);
676 ret = -EINVAL;
677 goto out;
678 }
679
680 /*
681 * now remap 4K. We should get the first 4K chunk that was skipped
682 * over during the previous 8K map. If we missed a TLB invalidate
683 * at that point this should explode.
684 */
685 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
686 if (dma_addr != 0) {
687 dma_addr_t expected = 0;
688
689 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
690 &dma_addr, &expected);
691 ret = -EINVAL;
692 goto out;
693 }
694
695 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
696 dev_err(dev,
697 "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
698 ret = -EINVAL;
699 goto out;
700 }
701
702 /* we're all full again. unmap everything. */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530703 for (iova = 0; iova < max; iova += SZ_8K)
704 dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800705
706out:
707 free_pages((unsigned long)virt, get_order(SZ_8K));
708 return ret;
709}
710
711struct fib_state {
712 unsigned long cur;
713 unsigned long prev;
714};
715
716static void fib_init(struct fib_state *f)
717{
718 f->cur = f->prev = 1;
719}
720
721static unsigned long get_next_fib(struct fib_state *f)
722{
723 int next = f->cur + f->prev;
724
725 f->prev = f->cur;
726 f->cur = next;
727 return next;
728}
729
730/*
731 * Not actually random. Just testing the fibs (and max - the fibs).
732 */
733static int __rand_va_sweep(struct device *dev, struct seq_file *s,
734 const size_t size)
735{
736 u64 iova;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530737 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800738 int i, remapped, unmapped, ret = 0;
739 void *virt;
740 dma_addr_t dma_addr, dma_addr2;
741 struct fib_state fib;
742
743 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
744 if (!virt) {
745 if (size > SZ_8K) {
746 dev_err(dev,
747 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
748 _size_to_string(size));
749 return 0;
750 }
751 return -ENOMEM;
752 }
753
754 /* fill the whole 4GB space */
755 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
756 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
757 if (dma_addr == DMA_ERROR_CODE) {
758 dev_err(dev, "Failed map on iter %d\n", i);
759 ret = -EINVAL;
760 goto out;
761 }
762 }
763
764 /* now unmap "random" iovas */
765 unmapped = 0;
766 fib_init(&fib);
767 for (iova = get_next_fib(&fib) * size;
768 iova < max - size;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530769 iova = (u64)get_next_fib(&fib) * size) {
770 dma_addr = (dma_addr_t)(iova);
771 dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800772 if (dma_addr == dma_addr2) {
773 WARN(1,
774 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
775 __func__);
776 return -EINVAL;
777 }
778 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
779 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
780 unmapped += 2;
781 }
782
783 /* and map until everything fills back up */
784 for (remapped = 0; ; ++remapped) {
785 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
786 if (dma_addr == DMA_ERROR_CODE)
787 break;
788 }
789
790 if (unmapped != remapped) {
791 dev_err(dev,
792 "Unexpected random remap count! Unmapped %d but remapped %d\n",
793 unmapped, remapped);
794 ret = -EINVAL;
795 }
796
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530797 for (iova = 0; iova < max; iova += size)
798 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800799
800out:
801 free_pages((unsigned long)virt, get_order(size));
802 return ret;
803}
804
805static int __check_mapping(struct device *dev, struct iommu_domain *domain,
806 dma_addr_t iova, phys_addr_t expected)
807{
808 phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
809 phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
810
811 WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
812
813 if (res != expected) {
814 dev_err_ratelimited(dev,
815 "Bad translation for %pa! Expected: %pa Got: %pa\n",
816 &iova, &expected, &res);
817 return -EINVAL;
818 }
819
820 return 0;
821}
822
823static int __full_va_sweep(struct device *dev, struct seq_file *s,
824 const size_t size, struct iommu_domain *domain)
825{
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530826 u64 iova;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800827 dma_addr_t dma_addr;
828 void *virt;
829 phys_addr_t phys;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530830 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800831 int ret = 0, i;
832
833 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
834 if (!virt) {
835 if (size > SZ_8K) {
836 dev_err(dev,
837 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
838 _size_to_string(size));
839 return 0;
840 }
841 return -ENOMEM;
842 }
843 phys = virt_to_phys(virt);
844
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530845 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800846 unsigned long expected = iova;
847
848 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
849 if (dma_addr != expected) {
850 dev_err_ratelimited(dev,
851 "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
852 i, expected,
853 (unsigned long)dma_addr);
854 ret = -EINVAL;
855 goto out;
856 }
857 }
858
859 if (domain) {
860 /* check every mapping from 0..6M */
861 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
862 phys_addr_t expected = phys;
863
864 if (__check_mapping(dev, domain, iova, expected)) {
865 dev_err(dev, "iter: %d\n", i);
866 ret = -EINVAL;
867 goto out;
868 }
869 }
870 /* and from 4G..4G-6M */
871 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
872 phys_addr_t expected = phys;
873 unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
874
875 if (__check_mapping(dev, domain, theiova, expected)) {
876 dev_err(dev, "iter: %d\n", i);
877 ret = -EINVAL;
878 goto out;
879 }
880 }
881 }
882
883 /* at this point, our VA space should be full */
884 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
885 if (dma_addr != DMA_ERROR_CODE) {
886 dev_err_ratelimited(dev,
887 "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
888 (unsigned long)dma_addr);
889 ret = -EINVAL;
890 }
891
892out:
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530893 for (iova = 0; iova < max; iova += size)
894 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800895
896 free_pages((unsigned long)virt, get_order(size));
897 return ret;
898}
899
900#define ds_printf(d, s, fmt, ...) ({ \
901 dev_err(d, fmt, ##__VA_ARGS__); \
902 seq_printf(s, fmt, ##__VA_ARGS__); \
903 })
904
905static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
906 struct iommu_domain *domain, void *priv)
907{
908 int i, j, ret = 0;
909 size_t *sz, *sizes = priv;
910
911 for (j = 0; j < 1; ++j) {
912 for (sz = sizes; *sz; ++sz) {
913 for (i = 0; i < 2; ++i) {
914 ds_printf(dev, s, "Full VA sweep @%s %d",
915 _size_to_string(*sz), i);
916 if (__full_va_sweep(dev, s, *sz, domain)) {
917 ds_printf(dev, s, " -> FAILED\n");
918 ret = -EINVAL;
919 } else {
920 ds_printf(dev, s, " -> SUCCEEDED\n");
921 }
922 }
923 }
924 }
925
926 ds_printf(dev, s, "bonus map:");
927 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
928 ds_printf(dev, s, " -> FAILED\n");
929 ret = -EINVAL;
930 } else {
931 ds_printf(dev, s, " -> SUCCEEDED\n");
932 }
933
934 for (sz = sizes; *sz; ++sz) {
935 for (i = 0; i < 2; ++i) {
936 ds_printf(dev, s, "Rand VA sweep @%s %d",
937 _size_to_string(*sz), i);
938 if (__rand_va_sweep(dev, s, *sz)) {
939 ds_printf(dev, s, " -> FAILED\n");
940 ret = -EINVAL;
941 } else {
942 ds_printf(dev, s, " -> SUCCEEDED\n");
943 }
944 }
945 }
946
947 ds_printf(dev, s, "TLB stress sweep");
948 if (__tlb_stress_sweep(dev, s)) {
949 ds_printf(dev, s, " -> FAILED\n");
950 ret = -EINVAL;
951 } else {
952 ds_printf(dev, s, " -> SUCCEEDED\n");
953 }
954
955 ds_printf(dev, s, "second bonus map:");
956 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
957 ds_printf(dev, s, " -> FAILED\n");
958 ret = -EINVAL;
959 } else {
960 ds_printf(dev, s, " -> SUCCEEDED\n");
961 }
962
963 return ret;
964}
965
966static int __functional_dma_api_alloc_test(struct device *dev,
967 struct seq_file *s,
968 struct iommu_domain *domain,
969 void *ignored)
970{
971 size_t size = SZ_1K * 742;
972 int ret = 0;
973 u8 *data;
974 dma_addr_t iova;
975
976 /* Make sure we can allocate and use a buffer */
977 ds_printf(dev, s, "Allocating coherent buffer");
978 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
979 if (!data) {
980 ds_printf(dev, s, " -> FAILED\n");
981 ret = -EINVAL;
982 } else {
983 int i;
984
985 ds_printf(dev, s, " -> SUCCEEDED\n");
986 ds_printf(dev, s, "Using coherent buffer");
987 for (i = 0; i < 742; ++i) {
988 int ind = SZ_1K * i;
989 u8 *p = data + ind;
990 u8 val = i % 255;
991
992 memset(data, 0xa5, size);
993 *p = val;
994 (*p)++;
995 if ((*p) != val + 1) {
996 ds_printf(dev, s,
997 " -> FAILED on iter %d since %d != %d\n",
998 i, *p, val + 1);
999 ret = -EINVAL;
1000 }
1001 }
1002 if (!ret)
1003 ds_printf(dev, s, " -> SUCCEEDED\n");
1004 dma_free_coherent(dev, size, data, iova);
1005 }
1006
1007 return ret;
1008}
1009
1010static int __functional_dma_api_basic_test(struct device *dev,
1011 struct seq_file *s,
1012 struct iommu_domain *domain,
1013 void *ignored)
1014{
1015 size_t size = 1518;
1016 int i, j, ret = 0;
1017 u8 *data;
1018 dma_addr_t iova;
1019 phys_addr_t pa, pa2;
1020
1021 ds_printf(dev, s, "Basic DMA API test");
1022 /* Make sure we can allocate and use a buffer */
1023 for (i = 0; i < 1000; ++i) {
1024 data = kmalloc(size, GFP_KERNEL);
1025 if (!data) {
1026 ds_printf(dev, s, " -> FAILED\n");
1027 ret = -EINVAL;
1028 goto out;
1029 }
1030 memset(data, 0xa5, size);
1031 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
1032 pa = iommu_iova_to_phys(domain, iova);
1033 pa2 = iommu_iova_to_phys_hard(domain, iova);
1034 if (pa != pa2) {
1035 dev_err(dev,
1036 "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
1037 &pa, &pa2);
1038 ret = -EINVAL;
1039 goto out;
1040 }
1041 pa2 = virt_to_phys(data);
1042 if (pa != pa2) {
1043 dev_err(dev,
1044 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
1045 &pa, &pa2);
1046 ret = -EINVAL;
1047 goto out;
1048 }
1049 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
1050 for (j = 0; j < size; ++j) {
1051 if (data[j] != 0xa5) {
1052 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
1053 ret = -EINVAL;
1054 goto out;
1055 }
1056 }
1057 kfree(data);
1058 }
1059
1060out:
1061 if (ret)
1062 ds_printf(dev, s, " -> FAILED\n");
1063 else
1064 ds_printf(dev, s, " -> SUCCEEDED\n");
1065
1066 return ret;
1067}
1068
1069/* Creates a fresh fast mapping and applies @fn to it */
1070static int __apply_to_new_mapping(struct seq_file *s,
1071 int (*fn)(struct device *dev,
1072 struct seq_file *s,
1073 struct iommu_domain *domain,
1074 void *priv),
1075 void *priv)
1076{
1077 struct dma_iommu_mapping *mapping;
1078 struct iommu_debug_device *ddev = s->private;
1079 struct device *dev = ddev->dev;
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301080 int ret = -EINVAL, fast = 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001081 phys_addr_t pt_phys;
1082
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301083 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1084 (SZ_1G * 4ULL));
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001085 if (!mapping)
1086 goto out;
1087
1088 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
1089 seq_puts(s, "iommu_domain_set_attr failed\n");
1090 goto out_release_mapping;
1091 }
1092
1093 if (arm_iommu_attach_device(dev, mapping))
1094 goto out_release_mapping;
1095
1096 if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
1097 &pt_phys)) {
1098 ds_printf(dev, s, "Couldn't get page table base address\n");
1099 goto out_release_mapping;
1100 }
1101
1102 dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
1103 if (iommu_enable_config_clocks(mapping->domain)) {
1104 ds_printf(dev, s, "Couldn't enable clocks\n");
1105 goto out_release_mapping;
1106 }
1107 ret = fn(dev, s, mapping->domain, priv);
1108 iommu_disable_config_clocks(mapping->domain);
1109
1110 arm_iommu_detach_device(dev);
1111out_release_mapping:
1112 arm_iommu_release_mapping(mapping);
1113out:
1114 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1115 return 0;
1116}
1117
1118static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
1119 void *ignored)
1120{
1121 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
1122 int ret = 0;
1123
1124 ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
1125 ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
1126 ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
1127 return ret;
1128}
1129
1130static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
1131 struct file *file)
1132{
1133 return single_open(file, iommu_debug_functional_fast_dma_api_show,
1134 inode->i_private);
1135}
1136
1137static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
1138 .open = iommu_debug_functional_fast_dma_api_open,
1139 .read = seq_read,
1140 .llseek = seq_lseek,
1141 .release = single_release,
1142};
1143
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001144static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
1145 void *ignored)
1146{
1147 struct dma_iommu_mapping *mapping;
1148 struct iommu_debug_device *ddev = s->private;
1149 struct device *dev = ddev->dev;
1150 size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
1151 int ret = -EINVAL;
1152
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301153 /* Make the size equal to MAX_ULONG */
1154 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1155 (SZ_1G * 4ULL - 1));
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001156 if (!mapping)
1157 goto out;
1158
1159 if (arm_iommu_attach_device(dev, mapping))
1160 goto out_release_mapping;
1161
1162 ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
1163 ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
1164
1165 arm_iommu_detach_device(dev);
1166out_release_mapping:
1167 arm_iommu_release_mapping(mapping);
1168out:
1169 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1170 return 0;
1171}
1172
1173static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
1174 struct file *file)
1175{
1176 return single_open(file, iommu_debug_functional_arm_dma_api_show,
1177 inode->i_private);
1178}
1179
1180static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
1181 .open = iommu_debug_functional_arm_dma_api_open,
1182 .read = seq_read,
1183 .llseek = seq_lseek,
1184 .release = single_release,
1185};
1186
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001187static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
1188 int val, bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001189{
Patrick Daly6dd80252017-04-17 20:41:59 -07001190 struct iommu_group *group = ddev->dev->iommu_group;
1191
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001192 ddev->domain = iommu_domain_alloc(&platform_bus_type);
1193 if (!ddev->domain) {
1194 pr_err("Couldn't allocate domain\n");
1195 return -ENOMEM;
1196 }
1197
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001198 if (is_secure && iommu_domain_set_attr(ddev->domain,
1199 DOMAIN_ATTR_SECURE_VMID,
1200 &val)) {
1201 pr_err("Couldn't set secure vmid to %d\n", val);
1202 goto out_domain_free;
1203 }
1204
Patrick Daly6dd80252017-04-17 20:41:59 -07001205 if (iommu_attach_group(ddev->domain, group)) {
1206 dev_err(ddev->dev, "Couldn't attach new domain to device\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001207 goto out_domain_free;
1208 }
1209
1210 return 0;
1211
1212out_domain_free:
1213 iommu_domain_free(ddev->domain);
1214 ddev->domain = NULL;
1215 return -EIO;
1216}
1217
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001218static ssize_t __iommu_debug_dma_attach_write(struct file *file,
1219 const char __user *ubuf,
1220 size_t count, loff_t *offset)
1221{
1222 struct iommu_debug_device *ddev = file->private_data;
1223 struct device *dev = ddev->dev;
1224 struct dma_iommu_mapping *dma_mapping;
1225 ssize_t retval = -EINVAL;
1226 int val;
1227
1228 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1229 pr_err("Invalid format. Expected a hex or decimal integer");
1230 retval = -EFAULT;
1231 goto out;
1232 }
1233
1234 if (val) {
1235 if (dev->archdata.mapping)
1236 if (dev->archdata.mapping->domain) {
1237 pr_err("Already attached.\n");
1238 retval = -EINVAL;
1239 goto out;
1240 }
1241 if (WARN(dev->archdata.iommu,
1242 "Attachment tracking out of sync with device\n")) {
1243 retval = -EINVAL;
1244 goto out;
1245 }
1246
1247 dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1248 (SZ_1G * 4ULL));
1249
1250 if (!dma_mapping)
1251 goto out;
1252
1253 if (arm_iommu_attach_device(dev, dma_mapping))
1254 goto out_release_mapping;
Patrick Daly727fcc62017-11-13 19:27:08 -08001255
1256 ddev->mapping = dma_mapping;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001257 pr_err("Attached\n");
1258 } else {
1259 if (!dev->archdata.mapping) {
1260 pr_err("No mapping. Did you already attach?\n");
1261 retval = -EINVAL;
1262 goto out;
1263 }
1264 if (!dev->archdata.mapping->domain) {
1265 pr_err("No domain. Did you already attach?\n");
1266 retval = -EINVAL;
1267 goto out;
1268 }
1269 arm_iommu_detach_device(dev);
Patrick Daly727fcc62017-11-13 19:27:08 -08001270 arm_iommu_release_mapping(ddev->mapping);
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001271 pr_err("Detached\n");
1272 }
1273 retval = count;
1274 return retval;
1275
1276out_release_mapping:
1277 arm_iommu_release_mapping(dma_mapping);
1278out:
1279 return retval;
1280}
1281
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001282static ssize_t __iommu_debug_attach_write(struct file *file,
1283 const char __user *ubuf,
1284 size_t count, loff_t *offset,
1285 bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001286{
1287 struct iommu_debug_device *ddev = file->private_data;
Patrick Daly6dd80252017-04-17 20:41:59 -07001288 struct device *dev = ddev->dev;
1289 struct iommu_domain *domain;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001290 ssize_t retval;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001291 int val;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001292
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001293 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1294 pr_err("Invalid format. Expected a hex or decimal integer");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001295 retval = -EFAULT;
1296 goto out;
1297 }
1298
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001299 if (val) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001300 if (ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001301 pr_err("Iommu-Debug is already attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001302 retval = -EINVAL;
1303 goto out;
1304 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001305
1306 domain = iommu_get_domain_for_dev(dev);
1307 if (domain) {
1308 pr_err("Another driver is using this device's iommu\n"
1309 "Iommu-Debug cannot be used concurrently\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001310 retval = -EINVAL;
1311 goto out;
1312 }
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001313 if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001314 retval = -EIO;
1315 goto out;
1316 }
1317 pr_err("Attached\n");
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001318 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001319 if (!ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001320 pr_err("Iommu-Debug is not attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001321 retval = -EINVAL;
1322 goto out;
1323 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001324 iommu_detach_group(ddev->domain, dev->iommu_group);
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001325 iommu_domain_free(ddev->domain);
1326 ddev->domain = NULL;
1327 pr_err("Detached\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001328 }
1329
1330 retval = count;
1331out:
1332 return retval;
1333}
1334
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001335static ssize_t iommu_debug_dma_attach_write(struct file *file,
1336 const char __user *ubuf,
1337 size_t count, loff_t *offset)
1338{
1339 return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
1340
1341}
1342
1343static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
1344 size_t count, loff_t *offset)
1345{
1346 struct iommu_debug_device *ddev = file->private_data;
1347 struct device *dev = ddev->dev;
1348 char c[2];
1349
1350 if (*offset)
1351 return 0;
1352
1353 if (!dev->archdata.mapping)
1354 c[0] = '0';
1355 else
1356 c[0] = dev->archdata.mapping->domain ? '1' : '0';
1357
1358 c[1] = '\n';
1359 if (copy_to_user(ubuf, &c, 2)) {
1360 pr_err("copy_to_user failed\n");
1361 return -EFAULT;
1362 }
1363 *offset = 1; /* non-zero means we're done */
1364
1365 return 2;
1366}
1367
1368static const struct file_operations iommu_debug_dma_attach_fops = {
1369 .open = simple_open,
1370 .write = iommu_debug_dma_attach_write,
1371 .read = iommu_debug_dma_attach_read,
1372};
1373
1374static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
1375 char __user *ubuf,
1376 size_t count, loff_t *offset)
1377{
1378 char buf[100];
1379 ssize_t retval;
1380 size_t buflen;
1381 int buf_len = sizeof(buf);
1382
1383 if (*offset)
1384 return 0;
1385
1386 memset(buf, 0, buf_len);
1387
1388 if (!test_virt_addr)
1389 strlcpy(buf, "FAIL\n", buf_len);
1390 else
1391 snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
1392
1393 buflen = strlen(buf);
1394 if (copy_to_user(ubuf, buf, buflen)) {
1395 pr_err("Couldn't copy_to_user\n");
1396 retval = -EFAULT;
1397 } else {
1398 *offset = 1; /* non-zero means we're done */
1399 retval = buflen;
1400 }
1401
1402 return retval;
1403}
1404
1405static const struct file_operations iommu_debug_test_virt_addr_fops = {
1406 .open = simple_open,
1407 .read = iommu_debug_test_virt_addr_read,
1408};
1409
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001410static ssize_t iommu_debug_attach_write(struct file *file,
1411 const char __user *ubuf,
1412 size_t count, loff_t *offset)
1413{
1414 return __iommu_debug_attach_write(file, ubuf, count, offset,
1415 false);
1416
1417}
1418
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001419static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
1420 size_t count, loff_t *offset)
1421{
1422 struct iommu_debug_device *ddev = file->private_data;
1423 char c[2];
1424
1425 if (*offset)
1426 return 0;
1427
1428 c[0] = ddev->domain ? '1' : '0';
1429 c[1] = '\n';
1430 if (copy_to_user(ubuf, &c, 2)) {
1431 pr_err("copy_to_user failed\n");
1432 return -EFAULT;
1433 }
1434 *offset = 1; /* non-zero means we're done */
1435
1436 return 2;
1437}
1438
1439static const struct file_operations iommu_debug_attach_fops = {
1440 .open = simple_open,
1441 .write = iommu_debug_attach_write,
1442 .read = iommu_debug_attach_read,
1443};
1444
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001445static ssize_t iommu_debug_attach_write_secure(struct file *file,
1446 const char __user *ubuf,
1447 size_t count, loff_t *offset)
1448{
1449 return __iommu_debug_attach_write(file, ubuf, count, offset,
1450 true);
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001451}
1452
1453static const struct file_operations iommu_debug_secure_attach_fops = {
1454 .open = simple_open,
1455 .write = iommu_debug_attach_write_secure,
1456 .read = iommu_debug_attach_read,
1457};
1458
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001459static ssize_t iommu_debug_pte_write(struct file *file,
1460 const char __user *ubuf,
1461 size_t count, loff_t *offset)
1462{
1463 struct iommu_debug_device *ddev = file->private_data;
1464 dma_addr_t iova;
1465
1466 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
1467 pr_err("Invalid format for iova\n");
1468 ddev->iova = 0;
1469 return -EINVAL;
1470 }
1471
1472 ddev->iova = iova;
1473 pr_err("Saved iova=%pa for future PTE commands\n", &iova);
1474 return count;
1475}
1476
1477
1478static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
1479 size_t count, loff_t *offset)
1480{
1481 struct iommu_debug_device *ddev = file->private_data;
1482 struct device *dev = ddev->dev;
1483 uint64_t pte;
1484 char buf[100];
1485 ssize_t retval;
1486 size_t buflen;
1487
1488 if (!dev->archdata.mapping) {
1489 pr_err("No mapping. Did you already attach?\n");
1490 return -EINVAL;
1491 }
1492 if (!dev->archdata.mapping->domain) {
1493 pr_err("No domain. Did you already attach?\n");
1494 return -EINVAL;
1495 }
1496
1497 if (*offset)
1498 return 0;
1499
1500 memset(buf, 0, sizeof(buf));
1501
1502 pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
1503 ddev->iova);
1504
1505 if (!pte)
1506 strlcpy(buf, "FAIL\n", sizeof(buf));
1507 else
1508 snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
1509
1510 buflen = strlen(buf);
1511 if (copy_to_user(ubuf, buf, buflen)) {
1512 pr_err("Couldn't copy_to_user\n");
1513 retval = -EFAULT;
1514 } else {
1515 *offset = 1; /* non-zero means we're done */
1516 retval = buflen;
1517 }
1518
1519 return retval;
1520}
1521
1522static const struct file_operations iommu_debug_pte_fops = {
1523 .open = simple_open,
1524 .write = iommu_debug_pte_write,
1525 .read = iommu_debug_pte_read,
1526};
1527
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001528static ssize_t iommu_debug_atos_write(struct file *file,
1529 const char __user *ubuf,
1530 size_t count, loff_t *offset)
1531{
1532 struct iommu_debug_device *ddev = file->private_data;
1533 dma_addr_t iova;
1534
Susheel Khiania4417e72016-07-12 11:28:32 +05301535 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001536 pr_err("Invalid format for iova\n");
1537 ddev->iova = 0;
1538 return -EINVAL;
1539 }
1540
1541 ddev->iova = iova;
1542 pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
1543 return count;
1544}
1545
1546static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
1547 size_t count, loff_t *offset)
1548{
1549 struct iommu_debug_device *ddev = file->private_data;
1550 phys_addr_t phys;
1551 char buf[100];
1552 ssize_t retval;
1553 size_t buflen;
1554
1555 if (!ddev->domain) {
1556 pr_err("No domain. Did you already attach?\n");
1557 return -EINVAL;
1558 }
1559
1560 if (*offset)
1561 return 0;
1562
1563 memset(buf, 0, 100);
1564
1565 phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001566 if (!phys) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001567 strlcpy(buf, "FAIL\n", 100);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001568 phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
1569 dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
1570 &ddev->iova, &phys);
1571 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001572 snprintf(buf, 100, "%pa\n", &phys);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001573 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001574
1575 buflen = strlen(buf);
1576 if (copy_to_user(ubuf, buf, buflen)) {
1577 pr_err("Couldn't copy_to_user\n");
1578 retval = -EFAULT;
1579 } else {
1580 *offset = 1; /* non-zero means we're done */
1581 retval = buflen;
1582 }
1583
1584 return retval;
1585}
1586
1587static const struct file_operations iommu_debug_atos_fops = {
1588 .open = simple_open,
1589 .write = iommu_debug_atos_write,
1590 .read = iommu_debug_atos_read,
1591};
1592
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001593static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
1594 size_t count, loff_t *offset)
1595{
1596 struct iommu_debug_device *ddev = file->private_data;
1597 struct device *dev = ddev->dev;
1598 phys_addr_t phys;
1599 char buf[100];
1600 ssize_t retval;
1601 size_t buflen;
1602
1603 if (!dev->archdata.mapping) {
1604 pr_err("No mapping. Did you already attach?\n");
1605 return -EINVAL;
1606 }
1607 if (!dev->archdata.mapping->domain) {
1608 pr_err("No domain. Did you already attach?\n");
1609 return -EINVAL;
1610 }
1611
1612 if (*offset)
1613 return 0;
1614
1615 memset(buf, 0, sizeof(buf));
1616
1617 phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
1618 ddev->iova);
1619 if (!phys)
1620 strlcpy(buf, "FAIL\n", sizeof(buf));
1621 else
1622 snprintf(buf, sizeof(buf), "%pa\n", &phys);
1623
1624 buflen = strlen(buf);
1625 if (copy_to_user(ubuf, buf, buflen)) {
1626 pr_err("Couldn't copy_to_user\n");
1627 retval = -EFAULT;
1628 } else {
1629 *offset = 1; /* non-zero means we're done */
1630 retval = buflen;
1631 }
1632
1633 return retval;
1634}
1635
1636static const struct file_operations iommu_debug_dma_atos_fops = {
1637 .open = simple_open,
1638 .write = iommu_debug_atos_write,
1639 .read = iommu_debug_dma_atos_read,
1640};
1641
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001642static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
1643 size_t count, loff_t *offset)
1644{
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301645 ssize_t retval = -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001646 int ret;
1647 char *comma1, *comma2, *comma3;
1648 char buf[100];
1649 dma_addr_t iova;
1650 phys_addr_t phys;
1651 size_t size;
1652 int prot;
1653 struct iommu_debug_device *ddev = file->private_data;
1654
1655 if (count >= 100) {
1656 pr_err("Value too large\n");
1657 return -EINVAL;
1658 }
1659
1660 if (!ddev->domain) {
1661 pr_err("No domain. Did you already attach?\n");
1662 return -EINVAL;
1663 }
1664
1665 memset(buf, 0, 100);
1666
1667 if (copy_from_user(buf, ubuf, count)) {
1668 pr_err("Couldn't copy from user\n");
1669 retval = -EFAULT;
1670 }
1671
1672 comma1 = strnchr(buf, count, ',');
1673 if (!comma1)
1674 goto invalid_format;
1675
1676 comma2 = strnchr(comma1 + 1, count, ',');
1677 if (!comma2)
1678 goto invalid_format;
1679
1680 comma3 = strnchr(comma2 + 1, count, ',');
1681 if (!comma3)
1682 goto invalid_format;
1683
1684 /* split up the words */
1685 *comma1 = *comma2 = *comma3 = '\0';
1686
Susheel Khiania4417e72016-07-12 11:28:32 +05301687 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001688 goto invalid_format;
1689
Susheel Khiania4417e72016-07-12 11:28:32 +05301690 if (kstrtoux(comma1 + 1, 0, &phys))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001691 goto invalid_format;
1692
Susheel Khiania4417e72016-07-12 11:28:32 +05301693 if (kstrtosize_t(comma2 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001694 goto invalid_format;
1695
1696 if (kstrtoint(comma3 + 1, 0, &prot))
1697 goto invalid_format;
1698
1699 ret = iommu_map(ddev->domain, iova, phys, size, prot);
1700 if (ret) {
1701 pr_err("iommu_map failed with %d\n", ret);
1702 retval = -EIO;
1703 goto out;
1704 }
1705
1706 retval = count;
1707 pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
1708 &iova, &phys, size, prot);
1709out:
1710 return retval;
1711
1712invalid_format:
1713 pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
1714 return -EINVAL;
1715}
1716
1717static const struct file_operations iommu_debug_map_fops = {
1718 .open = simple_open,
1719 .write = iommu_debug_map_write,
1720};
1721
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001722/*
1723 * Performs DMA mapping of a given virtual address and size to an iova address.
1724 * User input format: (addr,len,dma attr) where dma attr is:
1725 * 0: normal mapping
1726 * 1: force coherent mapping
1727 * 2: force non-cohernet mapping
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001728 * 3: use system cache
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001729 */
1730static ssize_t iommu_debug_dma_map_write(struct file *file,
1731 const char __user *ubuf, size_t count, loff_t *offset)
1732{
1733 ssize_t retval = -EINVAL;
1734 int ret;
1735 char *comma1, *comma2;
1736 char buf[100];
1737 unsigned long addr;
1738 void *v_addr;
1739 dma_addr_t iova;
1740 size_t size;
1741 unsigned int attr;
1742 unsigned long dma_attrs;
1743 struct iommu_debug_device *ddev = file->private_data;
1744 struct device *dev = ddev->dev;
1745
1746 if (count >= sizeof(buf)) {
1747 pr_err("Value too large\n");
1748 return -EINVAL;
1749 }
1750
1751 if (!dev->archdata.mapping) {
1752 pr_err("No mapping. Did you already attach?\n");
1753 retval = -EINVAL;
1754 goto out;
1755 }
1756 if (!dev->archdata.mapping->domain) {
1757 pr_err("No domain. Did you already attach?\n");
1758 retval = -EINVAL;
1759 goto out;
1760 }
1761
1762 memset(buf, 0, sizeof(buf));
1763
1764 if (copy_from_user(buf, ubuf, count)) {
1765 pr_err("Couldn't copy from user\n");
1766 retval = -EFAULT;
1767 goto out;
1768 }
1769
1770 comma1 = strnchr(buf, count, ',');
1771 if (!comma1)
1772 goto invalid_format;
1773
1774 comma2 = strnchr(comma1 + 1, count, ',');
1775 if (!comma2)
1776 goto invalid_format;
1777
1778 *comma1 = *comma2 = '\0';
1779
1780 if (kstrtoul(buf, 0, &addr))
1781 goto invalid_format;
1782 v_addr = (void *)addr;
1783
1784 if (kstrtosize_t(comma1 + 1, 0, &size))
1785 goto invalid_format;
1786
1787 if (kstrtouint(comma2 + 1, 0, &attr))
1788 goto invalid_format;
1789
1790 if (v_addr < test_virt_addr || v_addr > (test_virt_addr + SZ_1M - 1))
1791 goto invalid_addr;
1792
1793 if (attr == 0)
1794 dma_attrs = 0;
1795 else if (attr == 1)
1796 dma_attrs = DMA_ATTR_FORCE_COHERENT;
1797 else if (attr == 2)
1798 dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001799 else if (attr == 3)
1800 dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001801 else
1802 goto invalid_format;
1803
1804 iova = dma_map_single_attrs(dev, v_addr, size,
1805 DMA_TO_DEVICE, dma_attrs);
1806
1807 if (dma_mapping_error(dev, iova)) {
1808 pr_err("Failed to perform dma_map_single\n");
1809 ret = -EINVAL;
1810 goto out;
1811 }
1812
1813 retval = count;
1814 pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
1815 v_addr, &iova, size);
1816 ddev->iova = iova;
1817 pr_err("Saved iova=%pa for future PTE commands\n", &iova);
1818out:
1819 return retval;
1820
1821invalid_format:
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001822 pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001823 return retval;
1824
1825invalid_addr:
1826 pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
1827 return retval;
1828}
1829
1830static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
1831 size_t count, loff_t *offset)
1832{
1833 struct iommu_debug_device *ddev = file->private_data;
1834 struct device *dev = ddev->dev;
1835 char buf[100];
1836 ssize_t retval;
1837 size_t buflen;
1838 dma_addr_t iova;
1839
1840 if (!dev->archdata.mapping) {
1841 pr_err("No mapping. Did you already attach?\n");
1842 return -EINVAL;
1843 }
1844 if (!dev->archdata.mapping->domain) {
1845 pr_err("No domain. Did you already attach?\n");
1846 return -EINVAL;
1847 }
1848
1849 if (*offset)
1850 return 0;
1851
1852 memset(buf, 0, sizeof(buf));
1853
1854 iova = ddev->iova;
1855 snprintf(buf, sizeof(buf), "%pa\n", &iova);
1856
1857 buflen = strlen(buf);
1858 if (copy_to_user(ubuf, buf, buflen)) {
1859 pr_err("Couldn't copy_to_user\n");
1860 retval = -EFAULT;
1861 } else {
1862 *offset = 1; /* non-zero means we're done */
1863 retval = buflen;
1864 }
1865
1866 return retval;
1867}
1868
1869static const struct file_operations iommu_debug_dma_map_fops = {
1870 .open = simple_open,
1871 .write = iommu_debug_dma_map_write,
1872 .read = iommu_debug_dma_map_read,
1873};
1874
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001875static ssize_t iommu_debug_unmap_write(struct file *file,
1876 const char __user *ubuf,
1877 size_t count, loff_t *offset)
1878{
1879 ssize_t retval = 0;
1880 char *comma1;
1881 char buf[100];
1882 dma_addr_t iova;
1883 size_t size;
1884 size_t unmapped;
1885 struct iommu_debug_device *ddev = file->private_data;
1886
1887 if (count >= 100) {
1888 pr_err("Value too large\n");
1889 return -EINVAL;
1890 }
1891
1892 if (!ddev->domain) {
1893 pr_err("No domain. Did you already attach?\n");
1894 return -EINVAL;
1895 }
1896
1897 memset(buf, 0, 100);
1898
1899 if (copy_from_user(buf, ubuf, count)) {
1900 pr_err("Couldn't copy from user\n");
1901 retval = -EFAULT;
1902 goto out;
1903 }
1904
1905 comma1 = strnchr(buf, count, ',');
1906 if (!comma1)
1907 goto invalid_format;
1908
1909 /* split up the words */
1910 *comma1 = '\0';
1911
Susheel Khiania4417e72016-07-12 11:28:32 +05301912 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001913 goto invalid_format;
1914
Susheel Khiania4417e72016-07-12 11:28:32 +05301915 if (kstrtosize_t(comma1 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001916 goto invalid_format;
1917
1918 unmapped = iommu_unmap(ddev->domain, iova, size);
1919 if (unmapped != size) {
1920 pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
1921 size, unmapped);
1922 return -EIO;
1923 }
1924
1925 retval = count;
1926 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
1927out:
1928 return retval;
1929
1930invalid_format:
1931 pr_err("Invalid format. Expected: iova,len\n");
Patrick Daly5a5e3ff2016-10-13 19:31:50 -07001932 return -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001933}
1934
1935static const struct file_operations iommu_debug_unmap_fops = {
1936 .open = simple_open,
1937 .write = iommu_debug_unmap_write,
1938};
1939
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001940static ssize_t iommu_debug_dma_unmap_write(struct file *file,
1941 const char __user *ubuf,
1942 size_t count, loff_t *offset)
1943{
1944 ssize_t retval = 0;
1945 char *comma1, *comma2;
1946 char buf[100];
1947 size_t size;
1948 unsigned int attr;
1949 dma_addr_t iova;
1950 unsigned long dma_attrs;
1951 struct iommu_debug_device *ddev = file->private_data;
1952 struct device *dev = ddev->dev;
1953
1954 if (count >= sizeof(buf)) {
1955 pr_err("Value too large\n");
1956 return -EINVAL;
1957 }
1958
1959 if (!dev->archdata.mapping) {
1960 pr_err("No mapping. Did you already attach?\n");
1961 retval = -EINVAL;
1962 goto out;
1963 }
1964 if (!dev->archdata.mapping->domain) {
1965 pr_err("No domain. Did you already attach?\n");
1966 retval = -EINVAL;
1967 goto out;
1968 }
1969
1970 memset(buf, 0, sizeof(buf));
1971
1972 if (copy_from_user(buf, ubuf, count)) {
1973 pr_err("Couldn't copy from user\n");
1974 retval = -EFAULT;
1975 goto out;
1976 }
1977
1978 comma1 = strnchr(buf, count, ',');
1979 if (!comma1)
1980 goto invalid_format;
1981
1982 comma2 = strnchr(comma1 + 1, count, ',');
1983 if (!comma2)
1984 goto invalid_format;
1985
1986 *comma1 = *comma2 = '\0';
1987
1988 if (kstrtoux(buf, 0, &iova))
1989 goto invalid_format;
1990
1991 if (kstrtosize_t(comma1 + 1, 0, &size))
1992 goto invalid_format;
1993
1994 if (kstrtouint(comma2 + 1, 0, &attr))
1995 goto invalid_format;
1996
1997 if (attr == 0)
1998 dma_attrs = 0;
1999 else if (attr == 1)
2000 dma_attrs = DMA_ATTR_FORCE_COHERENT;
2001 else if (attr == 2)
2002 dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07002003 else if (attr == 3)
2004 dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002005 else
2006 goto invalid_format;
2007
2008 dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
2009
2010 retval = count;
2011 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
2012out:
2013 return retval;
2014
2015invalid_format:
2016 pr_err("Invalid format. Expected: iova,len, dma attr\n");
2017 return retval;
2018}
2019
2020static const struct file_operations iommu_debug_dma_unmap_fops = {
2021 .open = simple_open,
2022 .write = iommu_debug_dma_unmap_write,
2023};
2024
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002025static ssize_t iommu_debug_config_clocks_write(struct file *file,
2026 const char __user *ubuf,
2027 size_t count, loff_t *offset)
2028{
2029 char buf;
2030 struct iommu_debug_device *ddev = file->private_data;
2031 struct device *dev = ddev->dev;
2032
2033 /* we're expecting a single character plus (optionally) a newline */
2034 if (count > 2) {
2035 dev_err(dev, "Invalid value\n");
2036 return -EINVAL;
2037 }
2038
2039 if (!ddev->domain) {
2040 dev_err(dev, "No domain. Did you already attach?\n");
2041 return -EINVAL;
2042 }
2043
2044 if (copy_from_user(&buf, ubuf, 1)) {
2045 dev_err(dev, "Couldn't copy from user\n");
2046 return -EFAULT;
2047 }
2048
2049 switch (buf) {
2050 case '0':
2051 dev_err(dev, "Disabling config clocks\n");
2052 iommu_disable_config_clocks(ddev->domain);
2053 break;
2054 case '1':
2055 dev_err(dev, "Enabling config clocks\n");
2056 if (iommu_enable_config_clocks(ddev->domain))
2057 dev_err(dev, "Failed!\n");
2058 break;
2059 default:
2060 dev_err(dev, "Invalid value. Should be 0 or 1.\n");
2061 return -EINVAL;
2062 }
2063
2064 return count;
2065}
2066
2067static const struct file_operations iommu_debug_config_clocks_fops = {
2068 .open = simple_open,
2069 .write = iommu_debug_config_clocks_write,
2070};
2071
Patrick Daly9438f322017-04-05 18:03:19 -07002072static ssize_t iommu_debug_trigger_fault_write(
2073 struct file *file, const char __user *ubuf, size_t count,
2074 loff_t *offset)
2075{
2076 struct iommu_debug_device *ddev = file->private_data;
2077 unsigned long flags;
2078
2079 if (!ddev->domain) {
2080 pr_err("No domain. Did you already attach?\n");
2081 return -EINVAL;
2082 }
2083
2084 if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
2085 pr_err("Invalid flags format\n");
2086 return -EFAULT;
2087 }
2088
2089 iommu_trigger_fault(ddev->domain, flags);
2090
2091 return count;
2092}
2093
2094static const struct file_operations iommu_debug_trigger_fault_fops = {
2095 .open = simple_open,
2096 .write = iommu_debug_trigger_fault_write,
2097};
2098
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002099/*
2100 * The following will only work for drivers that implement the generic
2101 * device tree bindings described in
2102 * Documentation/devicetree/bindings/iommu/iommu.txt
2103 */
2104static int snarf_iommu_devices(struct device *dev, void *ignored)
2105{
2106 struct iommu_debug_device *ddev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002107 struct dentry *dir;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002108
2109 if (!of_find_property(dev->of_node, "iommus", NULL))
2110 return 0;
2111
Patrick Daly6dd80252017-04-17 20:41:59 -07002112 /* Hold a reference count */
2113 if (!iommu_group_get(dev))
2114 return 0;
2115
Mitchel Humpherys89924fd2015-07-09 14:50:22 -07002116 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002117 if (!ddev)
2118 return -ENODEV;
2119 ddev->dev = dev;
2120 dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
2121 if (!dir) {
2122 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
2123 dev_name(dev));
2124 goto err;
2125 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002126
Patrick Dalye4e39862015-11-20 20:00:50 -08002127 if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
2128 &iommu_debug_nr_iters_ops)) {
2129 pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
2130 dev_name(dev));
2131 goto err_rmdir;
2132 }
2133
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002134 if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
2135 &iommu_debug_test_virt_addr_fops)) {
2136 pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
2137 dev_name(dev));
2138 goto err_rmdir;
2139 }
2140
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002141 if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
2142 &iommu_debug_profiling_fops)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002143 pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
2144 dev_name(dev));
2145 goto err_rmdir;
2146 }
2147
Mitchel Humpherys020f90f2015-10-02 16:02:31 -07002148 if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
2149 &iommu_debug_secure_profiling_fops)) {
2150 pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
2151 dev_name(dev));
2152 goto err_rmdir;
2153 }
2154
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -07002155 if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
2156 &iommu_debug_profiling_fast_fops)) {
2157 pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
2158 dev_name(dev));
2159 goto err_rmdir;
2160 }
2161
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -07002162 if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
2163 &iommu_debug_profiling_fast_dma_api_fops)) {
2164 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
2165 dev_name(dev));
2166 goto err_rmdir;
2167 }
2168
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08002169 if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
2170 &iommu_debug_functional_fast_dma_api_fops)) {
2171 pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
2172 dev_name(dev));
2173 goto err_rmdir;
2174 }
2175
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08002176 if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
2177 &iommu_debug_functional_arm_dma_api_fops)) {
2178 pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
2179 dev_name(dev));
2180 goto err_rmdir;
2181 }
2182
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002183 if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
2184 &iommu_debug_dma_attach_fops)) {
2185 pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
2186 dev_name(dev));
2187 goto err_rmdir;
2188 }
2189
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002190 if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
2191 &iommu_debug_attach_fops)) {
2192 pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
2193 dev_name(dev));
2194 goto err_rmdir;
2195 }
2196
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07002197 if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
2198 &iommu_debug_secure_attach_fops)) {
2199 pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
2200 dev_name(dev));
2201 goto err_rmdir;
2202 }
2203
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002204 if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
2205 &iommu_debug_atos_fops)) {
2206 pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
2207 dev_name(dev));
2208 goto err_rmdir;
2209 }
2210
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002211 if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
2212 &iommu_debug_dma_atos_fops)) {
2213 pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
2214 dev_name(dev));
2215 goto err_rmdir;
2216 }
2217
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002218 if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
2219 &iommu_debug_map_fops)) {
2220 pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
2221 dev_name(dev));
2222 goto err_rmdir;
2223 }
2224
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002225 if (!debugfs_create_file("dma_map", 0600, dir, ddev,
2226 &iommu_debug_dma_map_fops)) {
2227 pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
2228 dev_name(dev));
2229 goto err_rmdir;
2230 }
2231
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002232 if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
2233 &iommu_debug_unmap_fops)) {
2234 pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
2235 dev_name(dev));
2236 goto err_rmdir;
2237 }
2238
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002239 if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
2240 &iommu_debug_dma_unmap_fops)) {
2241 pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
2242 dev_name(dev));
2243 goto err_rmdir;
2244 }
2245
2246 if (!debugfs_create_file("pte", 0600, dir, ddev,
2247 &iommu_debug_pte_fops)) {
2248 pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
2249 dev_name(dev));
2250 goto err_rmdir;
2251 }
2252
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002253 if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
2254 &iommu_debug_config_clocks_fops)) {
2255 pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
2256 dev_name(dev));
2257 goto err_rmdir;
2258 }
2259
Patrick Daly9438f322017-04-05 18:03:19 -07002260 if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
2261 &iommu_debug_trigger_fault_fops)) {
2262 pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
2263 dev_name(dev));
2264 goto err_rmdir;
2265 }
2266
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002267 list_add(&ddev->list, &iommu_debug_devices);
2268 return 0;
2269
2270err_rmdir:
2271 debugfs_remove_recursive(dir);
2272err:
2273 kfree(ddev);
2274 return 0;
2275}
2276
2277static int iommu_debug_init_tests(void)
2278{
2279 debugfs_tests_dir = debugfs_create_dir("tests",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002280 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002281 if (!debugfs_tests_dir) {
2282 pr_err("Couldn't create iommu/tests debugfs directory\n");
2283 return -ENODEV;
2284 }
2285
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002286 test_virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
2287
2288 if (!test_virt_addr)
2289 return -ENOMEM;
2290
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002291 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
2292 snarf_iommu_devices);
2293}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002294
2295static void iommu_debug_destroy_tests(void)
2296{
2297 debugfs_remove_recursive(debugfs_tests_dir);
2298}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002299#else
2300static inline int iommu_debug_init_tests(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002301static inline void iommu_debug_destroy_tests(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002302#endif
2303
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002304/*
2305 * This isn't really a "driver", we just need something in the device tree
2306 * so that our tests can run without any client drivers, and our tests rely
2307 * on parsing the device tree for nodes with the `iommus' property.
2308 */
2309static int iommu_debug_pass(struct platform_device *pdev)
2310{
2311 return 0;
2312}
2313
2314static const struct of_device_id iommu_debug_of_match[] = {
2315 { .compatible = "iommu-debug-test" },
2316 { },
2317};
2318
2319static struct platform_driver iommu_debug_driver = {
2320 .probe = iommu_debug_pass,
2321 .remove = iommu_debug_pass,
2322 .driver = {
2323 .name = "iommu-debug",
2324 .of_match_table = iommu_debug_of_match,
2325 },
2326};
2327
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002328static int iommu_debug_init(void)
2329{
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002330 if (iommu_debug_init_tests())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002331 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002332
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002333 return platform_driver_register(&iommu_debug_driver);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002334}
2335
2336static void iommu_debug_exit(void)
2337{
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002338 platform_driver_unregister(&iommu_debug_driver);
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002339 iommu_debug_destroy_tests();
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002340}
2341
2342module_init(iommu_debug_init);
2343module_exit(iommu_debug_exit);