blob: c98d8c29d9a8b611726b700e274665c987841c9f [file] [log] [blame]
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001/*
Charan Teja Reddy29f61402017-02-09 20:44:29 +05302 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
16
17#include <linux/debugfs.h>
18#include <linux/device.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/module.h>
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -070024#include <linux/uaccess.h>
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070025#include <linux/dma-contiguous.h>
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070026#include <soc/qcom/secure_buffer.h>
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -070027#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma-iommu.h>
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070030
Susheel Khiania4417e72016-07-12 11:28:32 +053031#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
32
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070033static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
34{
35 switch (attr) {
36 case DOMAIN_ATTR_GEOMETRY:
37 return "DOMAIN_ATTR_GEOMETRY";
38 case DOMAIN_ATTR_PAGING:
39 return "DOMAIN_ATTR_PAGING";
40 case DOMAIN_ATTR_WINDOWS:
41 return "DOMAIN_ATTR_WINDOWS";
42 case DOMAIN_ATTR_FSL_PAMU_STASH:
43 return "DOMAIN_ATTR_FSL_PAMU_STASH";
44 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
45 return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
46 case DOMAIN_ATTR_FSL_PAMUV1:
47 return "DOMAIN_ATTR_FSL_PAMUV1";
48 case DOMAIN_ATTR_NESTING:
49 return "DOMAIN_ATTR_NESTING";
50 case DOMAIN_ATTR_PT_BASE_ADDR:
51 return "DOMAIN_ATTR_PT_BASE_ADDR";
52 case DOMAIN_ATTR_SECURE_VMID:
53 return "DOMAIN_ATTR_SECURE_VMID";
54 case DOMAIN_ATTR_ATOMIC:
55 return "DOMAIN_ATTR_ATOMIC";
56 case DOMAIN_ATTR_CONTEXT_BANK:
57 return "DOMAIN_ATTR_CONTEXT_BANK";
58 case DOMAIN_ATTR_TTBR0:
59 return "DOMAIN_ATTR_TTBR0";
60 case DOMAIN_ATTR_CONTEXTIDR:
61 return "DOMAIN_ATTR_CONTEXTIDR";
62 case DOMAIN_ATTR_PROCID:
63 return "DOMAIN_ATTR_PROCID";
64 case DOMAIN_ATTR_DYNAMIC:
65 return "DOMAIN_ATTR_DYNAMIC";
66 case DOMAIN_ATTR_NON_FATAL_FAULTS:
67 return "DOMAIN_ATTR_NON_FATAL_FAULTS";
68 case DOMAIN_ATTR_S1_BYPASS:
69 return "DOMAIN_ATTR_S1_BYPASS";
70 case DOMAIN_ATTR_FAST:
71 return "DOMAIN_ATTR_FAST";
Patrick Dalyef6c1dc2016-11-16 14:35:23 -080072 case DOMAIN_ATTR_EARLY_MAP:
73 return "DOMAIN_ATTR_EARLY_MAP";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070074 default:
75 return "Unknown attr!";
76 }
77}
Susheel Khiania4417e72016-07-12 11:28:32 +053078#endif
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070079
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070080#ifdef CONFIG_IOMMU_DEBUG_TRACKING
81
82static DEFINE_MUTEX(iommu_debug_attachments_lock);
83static LIST_HEAD(iommu_debug_attachments);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070084
Patrick Dalyee7a25f2017-04-05 18:05:02 -070085/*
86 * Each group may have more than one domain; but each domain may
87 * only have one group.
88 * Used by debug tools to display the name of the device(s) associated
89 * with a particular domain.
90 */
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070091struct iommu_debug_attachment {
92 struct iommu_domain *domain;
Patrick Dalyee7a25f2017-04-05 18:05:02 -070093 struct iommu_group *group;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070094 struct list_head list;
95};
96
Susheel Khianie66aa5b2015-08-25 17:25:42 +053097void iommu_debug_attach_device(struct iommu_domain *domain,
98 struct device *dev)
99{
100 struct iommu_debug_attachment *attach;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700101 struct iommu_group *group;
102
103 group = iommu_group_get(dev);
104 if (!group)
105 return;
106
107 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
108 if (!attach)
109 return;
110
111 attach->domain = domain;
112 attach->group = group;
113 INIT_LIST_HEAD(&attach->list);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530114
115 mutex_lock(&iommu_debug_attachments_lock);
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700116 list_add(&attach->list, &iommu_debug_attachments);
117 mutex_unlock(&iommu_debug_attachments_lock);
118}
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530119
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700120void iommu_debug_domain_remove(struct iommu_domain *domain)
121{
122 struct iommu_debug_attachment *it, *tmp;
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530123
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700124 mutex_lock(&iommu_debug_attachments_lock);
125 list_for_each_entry_safe(it, tmp, &iommu_debug_attachments, list) {
126 if (it->domain != domain)
127 continue;
128 list_del(&it->list);
129 iommu_group_put(it->group);
130 kfree(it);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530131 }
132
133 mutex_unlock(&iommu_debug_attachments_lock);
134}
135
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700136#endif
137
138#ifdef CONFIG_IOMMU_TESTS
139
Susheel Khiania4417e72016-07-12 11:28:32 +0530140#ifdef CONFIG_64BIT
141
142#define kstrtoux kstrtou64
Patrick Daly9ef01862016-10-13 20:03:50 -0700143#define kstrtox_from_user kstrtoull_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530144#define kstrtosize_t kstrtoul
145
146#else
147
148#define kstrtoux kstrtou32
Patrick Daly9ef01862016-10-13 20:03:50 -0700149#define kstrtox_from_user kstrtouint_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530150#define kstrtosize_t kstrtouint
151
152#endif
153
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700154static LIST_HEAD(iommu_debug_devices);
155static struct dentry *debugfs_tests_dir;
Patrick Dalye4e39862015-11-20 20:00:50 -0800156static u32 iters_per_op = 1;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700157static void *test_virt_addr;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700158
159struct iommu_debug_device {
160 struct device *dev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700161 struct iommu_domain *domain;
162 u64 iova;
163 u64 phys;
164 size_t len;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700165 struct list_head list;
166};
167
168static int iommu_debug_build_phoney_sg_table(struct device *dev,
169 struct sg_table *table,
170 unsigned long total_size,
171 unsigned long chunk_size)
172{
173 unsigned long nents = total_size / chunk_size;
174 struct scatterlist *sg;
175 int i;
176 struct page *page;
177
178 if (!IS_ALIGNED(total_size, PAGE_SIZE))
179 return -EINVAL;
180 if (!IS_ALIGNED(total_size, chunk_size))
181 return -EINVAL;
182 if (sg_alloc_table(table, nents, GFP_KERNEL))
183 return -EINVAL;
184 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
185 if (!page)
186 goto free_table;
187
188 /* all the same page... why not. */
189 for_each_sg(table->sgl, sg, table->nents, i)
190 sg_set_page(sg, page, chunk_size, 0);
191
192 return 0;
193
194free_table:
195 sg_free_table(table);
196 return -ENOMEM;
197}
198
199static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
200 struct sg_table *table,
201 unsigned long chunk_size)
202{
203 __free_pages(sg_page(table->sgl), get_order(chunk_size));
204 sg_free_table(table);
205}
206
207static const char * const _size_to_string(unsigned long size)
208{
209 switch (size) {
210 case SZ_4K:
211 return "4K";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700212 case SZ_8K:
213 return "8K";
214 case SZ_16K:
215 return "16K";
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700216 case SZ_64K:
217 return "64K";
218 case SZ_2M:
219 return "2M";
220 case SZ_1M * 12:
221 return "12M";
222 case SZ_1M * 20:
223 return "20M";
224 }
225 return "unknown size, please add to _size_to_string";
226}
227
Patrick Dalye4e39862015-11-20 20:00:50 -0800228static int nr_iters_set(void *data, u64 val)
229{
230 if (!val)
231 val = 1;
232 if (val > 10000)
233 val = 10000;
234 *(u32 *)data = val;
235 return 0;
236}
237
238static int nr_iters_get(void *data, u64 *val)
239{
240 *val = *(u32 *)data;
241 return 0;
242}
243
244DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
245 nr_iters_get, nr_iters_set, "%llu\n");
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700246
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700247static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700248 enum iommu_attr attrs[],
249 void *attr_values[], int nattrs,
Susheel Khiania4417e72016-07-12 11:28:32 +0530250 const size_t sizes[])
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700251{
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700252 int i;
Susheel Khiania4417e72016-07-12 11:28:32 +0530253 const size_t *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700254 struct iommu_domain *domain;
255 unsigned long iova = 0x10000;
256 phys_addr_t paddr = 0xa000;
257
258 domain = iommu_domain_alloc(&platform_bus_type);
259 if (!domain) {
260 seq_puts(s, "Couldn't allocate domain\n");
261 return;
262 }
263
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700264 seq_puts(s, "Domain attributes: [ ");
265 for (i = 0; i < nattrs; ++i) {
266 /* not all attrs are ints, but this will get us by for now */
267 seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
268 *((int *)attr_values[i]),
269 i < nattrs ? " " : "");
Mitchel Humpherys679567c2015-08-28 10:51:24 -0700270 }
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700271 seq_puts(s, "]\n");
272 for (i = 0; i < nattrs; ++i) {
273 if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
274 seq_printf(s, "Couldn't set %d to the value at %p\n",
275 attrs[i], attr_values[i]);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700276 goto out_domain_free;
277 }
278 }
279
Patrick Daly6dd80252017-04-17 20:41:59 -0700280 if (iommu_attach_group(domain, dev->iommu_group)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700281 seq_puts(s,
282 "Couldn't attach new domain to device. Is it already attached?\n");
283 goto out_domain_free;
284 }
285
Patrick Dalye4e39862015-11-20 20:00:50 -0800286 seq_printf(s, "(average over %d iterations)\n", iters_per_op);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800287 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700288 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530289 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700290 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800291 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700292 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800293 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700294 struct timespec tbefore, tafter, diff;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700295 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700296
Patrick Dalye4e39862015-11-20 20:00:50 -0800297 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700298 getnstimeofday(&tbefore);
299 if (iommu_map(domain, iova, paddr, size,
300 IOMMU_READ | IOMMU_WRITE)) {
301 seq_puts(s, "Failed to map\n");
302 continue;
303 }
304 getnstimeofday(&tafter);
305 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800306 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700307
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700308 getnstimeofday(&tbefore);
309 unmapped = iommu_unmap(domain, iova, size);
310 if (unmapped != size) {
311 seq_printf(s,
312 "Only unmapped %zx instead of %zx\n",
313 unmapped, size);
314 continue;
315 }
316 getnstimeofday(&tafter);
317 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800318 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700319 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700320
Susheel Khiania4417e72016-07-12 11:28:32 +0530321 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
322 &map_elapsed_rem);
323 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
324 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700325
Patrick Daly3ca31e32015-11-20 20:33:04 -0800326 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
327 &map_elapsed_rem);
328 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
329 &unmap_elapsed_rem);
330
331 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
332 _size_to_string(size),
333 map_elapsed_us, map_elapsed_rem,
334 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700335 }
336
337 seq_putc(s, '\n');
Patrick Daly3ca31e32015-11-20 20:33:04 -0800338 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700339 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530340 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700341 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800342 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700343 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800344 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700345 struct timespec tbefore, tafter, diff;
346 struct sg_table table;
347 unsigned long chunk_size = SZ_4K;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700348 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700349
350 if (iommu_debug_build_phoney_sg_table(dev, &table, size,
351 chunk_size)) {
352 seq_puts(s,
353 "couldn't build phoney sg table! bailing...\n");
354 goto out_detach;
355 }
356
Patrick Dalye4e39862015-11-20 20:00:50 -0800357 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700358 getnstimeofday(&tbefore);
359 if (iommu_map_sg(domain, iova, table.sgl, table.nents,
360 IOMMU_READ | IOMMU_WRITE) != size) {
361 seq_puts(s, "Failed to map_sg\n");
362 goto next;
363 }
364 getnstimeofday(&tafter);
365 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800366 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700367
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700368 getnstimeofday(&tbefore);
369 unmapped = iommu_unmap(domain, iova, size);
370 if (unmapped != size) {
371 seq_printf(s,
372 "Only unmapped %zx instead of %zx\n",
373 unmapped, size);
374 goto next;
375 }
376 getnstimeofday(&tafter);
377 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800378 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700379 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700380
Susheel Khiania4417e72016-07-12 11:28:32 +0530381 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
382 &map_elapsed_rem);
383 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
384 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700385
Patrick Daly3ca31e32015-11-20 20:33:04 -0800386 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
387 &map_elapsed_rem);
388 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
389 &unmap_elapsed_rem);
390
391 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
392 _size_to_string(size),
393 map_elapsed_us, map_elapsed_rem,
394 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700395
396next:
397 iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
398 }
399
400out_detach:
Patrick Daly6dd80252017-04-17 20:41:59 -0700401 iommu_detach_group(domain, dev->iommu_group);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700402out_domain_free:
403 iommu_domain_free(domain);
404}
405
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700406static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700407{
408 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530409 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700410 SZ_1M * 20, 0 };
411 enum iommu_attr attrs[] = {
412 DOMAIN_ATTR_ATOMIC,
413 };
414 int htw_disable = 1, atomic = 1;
415 void *attr_values[] = { &htw_disable, &atomic };
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700416
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700417 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
418 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700419
420 return 0;
421}
422
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700423static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700424{
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700425 return single_open(file, iommu_debug_profiling_show, inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700426}
427
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700428static const struct file_operations iommu_debug_profiling_fops = {
429 .open = iommu_debug_profiling_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700430 .read = seq_read,
431 .llseek = seq_lseek,
432 .release = single_release,
433};
434
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700435static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
436{
437 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530438 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700439 SZ_1M * 20, 0 };
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700440
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700441 enum iommu_attr attrs[] = {
442 DOMAIN_ATTR_ATOMIC,
443 DOMAIN_ATTR_SECURE_VMID,
444 };
445 int one = 1, secure_vmid = VMID_CP_PIXEL;
446 void *attr_values[] = { &one, &secure_vmid };
447
448 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
449 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700450
451 return 0;
452}
453
454static int iommu_debug_secure_profiling_open(struct inode *inode,
455 struct file *file)
456{
457 return single_open(file, iommu_debug_secure_profiling_show,
458 inode->i_private);
459}
460
461static const struct file_operations iommu_debug_secure_profiling_fops = {
462 .open = iommu_debug_secure_profiling_open,
463 .read = seq_read,
464 .llseek = seq_lseek,
465 .release = single_release,
466};
467
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700468static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
469{
470 struct iommu_debug_device *ddev = s->private;
471 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
472 enum iommu_attr attrs[] = {
473 DOMAIN_ATTR_FAST,
474 DOMAIN_ATTR_ATOMIC,
475 };
476 int one = 1;
477 void *attr_values[] = { &one, &one };
478
479 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
480 ARRAY_SIZE(attrs), sizes);
481
482 return 0;
483}
484
485static int iommu_debug_profiling_fast_open(struct inode *inode,
486 struct file *file)
487{
488 return single_open(file, iommu_debug_profiling_fast_show,
489 inode->i_private);
490}
491
492static const struct file_operations iommu_debug_profiling_fast_fops = {
493 .open = iommu_debug_profiling_fast_open,
494 .read = seq_read,
495 .llseek = seq_lseek,
496 .release = single_release,
497};
498
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700499static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
500 void *ignored)
501{
502 int i, experiment;
503 struct iommu_debug_device *ddev = s->private;
504 struct device *dev = ddev->dev;
505 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
506 struct dma_iommu_mapping *mapping;
507 dma_addr_t dma_addr;
508 void *virt;
509 int fast = 1;
510 const char * const extra_labels[] = {
511 "not coherent",
512 "coherent",
513 };
514 unsigned long extra_attrs[] = {
515 0,
516 DMA_ATTR_SKIP_CPU_SYNC,
517 };
518
519 virt = kmalloc(1518, GFP_KERNEL);
520 if (!virt)
521 goto out;
522
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530523 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700524 if (!mapping) {
525 seq_puts(s, "fast_smmu_create_mapping failed\n");
526 goto out_kfree;
527 }
528
529 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
530 seq_puts(s, "iommu_domain_set_attr failed\n");
531 goto out_release_mapping;
532 }
533
534 if (arm_iommu_attach_device(dev, mapping)) {
535 seq_puts(s, "fast_smmu_attach_device failed\n");
536 goto out_release_mapping;
537 }
538
539 if (iommu_enable_config_clocks(mapping->domain)) {
540 seq_puts(s, "Couldn't enable clocks\n");
541 goto out_detach;
542 }
543 for (experiment = 0; experiment < 2; ++experiment) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530544 size_t map_avg = 0, unmap_avg = 0;
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700545
546 for (i = 0; i < 10; ++i) {
547 struct timespec tbefore, tafter, diff;
548 u64 ns;
549
550 getnstimeofday(&tbefore);
551 dma_addr = dma_map_single_attrs(
552 dev, virt, SZ_4K, DMA_TO_DEVICE,
553 extra_attrs[experiment]);
554 getnstimeofday(&tafter);
555 diff = timespec_sub(tafter, tbefore);
556 ns = timespec_to_ns(&diff);
557 if (dma_mapping_error(dev, dma_addr)) {
558 seq_puts(s, "dma_map_single failed\n");
559 goto out_disable_config_clocks;
560 }
561 map_elapsed_ns[i] = ns;
562
563 getnstimeofday(&tbefore);
564 dma_unmap_single_attrs(
565 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
566 extra_attrs[experiment]);
567 getnstimeofday(&tafter);
568 diff = timespec_sub(tafter, tbefore);
569 ns = timespec_to_ns(&diff);
570 unmap_elapsed_ns[i] = ns;
571 }
572
573 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
574 "dma_map_single_attrs");
575 for (i = 0; i < 10; ++i) {
576 map_avg += map_elapsed_ns[i];
577 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
578 i < 9 ? ", " : "");
579 }
580 map_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530581 seq_printf(s, "] (avg: %zu)\n", map_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700582
583 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
584 "dma_unmap_single_attrs");
585 for (i = 0; i < 10; ++i) {
586 unmap_avg += unmap_elapsed_ns[i];
587 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
588 i < 9 ? ", " : "");
589 }
590 unmap_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530591 seq_printf(s, "] (avg: %zu)\n", unmap_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700592 }
593
594out_disable_config_clocks:
595 iommu_disable_config_clocks(mapping->domain);
596out_detach:
597 arm_iommu_detach_device(dev);
598out_release_mapping:
599 arm_iommu_release_mapping(mapping);
600out_kfree:
601 kfree(virt);
602out:
603 return 0;
604}
605
606static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
607 struct file *file)
608{
609 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
610 inode->i_private);
611}
612
613static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
614 .open = iommu_debug_profiling_fast_dma_api_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800620static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
621{
622 int i, ret = 0;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530623 u64 iova;
624 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800625 void *virt;
626 phys_addr_t phys;
627 dma_addr_t dma_addr;
628
629 /*
630 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
631 * chunk that we can work with.
632 */
633 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
634 phys = virt_to_phys(virt);
635
636 /* fill the whole 4GB space */
637 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
638 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
639 if (dma_addr == DMA_ERROR_CODE) {
640 dev_err(dev, "Failed map on iter %d\n", i);
641 ret = -EINVAL;
642 goto out;
643 }
644 }
645
646 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
647 dev_err(dev,
648 "dma_map_single unexpectedly (VA should have been exhausted)\n");
649 ret = -EINVAL;
650 goto out;
651 }
652
653 /*
654 * free up 4K at the very beginning, then leave one 4K mapping,
655 * then free up 8K. This will result in the next 8K map to skip
656 * over the 4K hole and take the 8K one.
657 */
658 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
659 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
660 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
661
662 /* remap 8K */
663 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
664 if (dma_addr != SZ_8K) {
665 dma_addr_t expected = SZ_8K;
666
667 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
668 &dma_addr, &expected);
669 ret = -EINVAL;
670 goto out;
671 }
672
673 /*
674 * now remap 4K. We should get the first 4K chunk that was skipped
675 * over during the previous 8K map. If we missed a TLB invalidate
676 * at that point this should explode.
677 */
678 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
679 if (dma_addr != 0) {
680 dma_addr_t expected = 0;
681
682 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
683 &dma_addr, &expected);
684 ret = -EINVAL;
685 goto out;
686 }
687
688 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
689 dev_err(dev,
690 "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
691 ret = -EINVAL;
692 goto out;
693 }
694
695 /* we're all full again. unmap everything. */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530696 for (iova = 0; iova < max; iova += SZ_8K)
697 dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800698
699out:
700 free_pages((unsigned long)virt, get_order(SZ_8K));
701 return ret;
702}
703
704struct fib_state {
705 unsigned long cur;
706 unsigned long prev;
707};
708
709static void fib_init(struct fib_state *f)
710{
711 f->cur = f->prev = 1;
712}
713
714static unsigned long get_next_fib(struct fib_state *f)
715{
716 int next = f->cur + f->prev;
717
718 f->prev = f->cur;
719 f->cur = next;
720 return next;
721}
722
723/*
724 * Not actually random. Just testing the fibs (and max - the fibs).
725 */
726static int __rand_va_sweep(struct device *dev, struct seq_file *s,
727 const size_t size)
728{
729 u64 iova;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530730 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800731 int i, remapped, unmapped, ret = 0;
732 void *virt;
733 dma_addr_t dma_addr, dma_addr2;
734 struct fib_state fib;
735
736 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
737 if (!virt) {
738 if (size > SZ_8K) {
739 dev_err(dev,
740 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
741 _size_to_string(size));
742 return 0;
743 }
744 return -ENOMEM;
745 }
746
747 /* fill the whole 4GB space */
748 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
749 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
750 if (dma_addr == DMA_ERROR_CODE) {
751 dev_err(dev, "Failed map on iter %d\n", i);
752 ret = -EINVAL;
753 goto out;
754 }
755 }
756
757 /* now unmap "random" iovas */
758 unmapped = 0;
759 fib_init(&fib);
760 for (iova = get_next_fib(&fib) * size;
761 iova < max - size;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530762 iova = (u64)get_next_fib(&fib) * size) {
763 dma_addr = (dma_addr_t)(iova);
764 dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800765 if (dma_addr == dma_addr2) {
766 WARN(1,
767 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
768 __func__);
769 return -EINVAL;
770 }
771 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
772 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
773 unmapped += 2;
774 }
775
776 /* and map until everything fills back up */
777 for (remapped = 0; ; ++remapped) {
778 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
779 if (dma_addr == DMA_ERROR_CODE)
780 break;
781 }
782
783 if (unmapped != remapped) {
784 dev_err(dev,
785 "Unexpected random remap count! Unmapped %d but remapped %d\n",
786 unmapped, remapped);
787 ret = -EINVAL;
788 }
789
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530790 for (iova = 0; iova < max; iova += size)
791 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800792
793out:
794 free_pages((unsigned long)virt, get_order(size));
795 return ret;
796}
797
798static int __check_mapping(struct device *dev, struct iommu_domain *domain,
799 dma_addr_t iova, phys_addr_t expected)
800{
801 phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
802 phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
803
804 WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
805
806 if (res != expected) {
807 dev_err_ratelimited(dev,
808 "Bad translation for %pa! Expected: %pa Got: %pa\n",
809 &iova, &expected, &res);
810 return -EINVAL;
811 }
812
813 return 0;
814}
815
816static int __full_va_sweep(struct device *dev, struct seq_file *s,
817 const size_t size, struct iommu_domain *domain)
818{
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530819 u64 iova;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800820 dma_addr_t dma_addr;
821 void *virt;
822 phys_addr_t phys;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530823 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800824 int ret = 0, i;
825
826 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
827 if (!virt) {
828 if (size > SZ_8K) {
829 dev_err(dev,
830 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
831 _size_to_string(size));
832 return 0;
833 }
834 return -ENOMEM;
835 }
836 phys = virt_to_phys(virt);
837
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530838 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800839 unsigned long expected = iova;
840
841 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
842 if (dma_addr != expected) {
843 dev_err_ratelimited(dev,
844 "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
845 i, expected,
846 (unsigned long)dma_addr);
847 ret = -EINVAL;
848 goto out;
849 }
850 }
851
852 if (domain) {
853 /* check every mapping from 0..6M */
854 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
855 phys_addr_t expected = phys;
856
857 if (__check_mapping(dev, domain, iova, expected)) {
858 dev_err(dev, "iter: %d\n", i);
859 ret = -EINVAL;
860 goto out;
861 }
862 }
863 /* and from 4G..4G-6M */
864 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
865 phys_addr_t expected = phys;
866 unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
867
868 if (__check_mapping(dev, domain, theiova, expected)) {
869 dev_err(dev, "iter: %d\n", i);
870 ret = -EINVAL;
871 goto out;
872 }
873 }
874 }
875
876 /* at this point, our VA space should be full */
877 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
878 if (dma_addr != DMA_ERROR_CODE) {
879 dev_err_ratelimited(dev,
880 "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
881 (unsigned long)dma_addr);
882 ret = -EINVAL;
883 }
884
885out:
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530886 for (iova = 0; iova < max; iova += size)
887 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800888
889 free_pages((unsigned long)virt, get_order(size));
890 return ret;
891}
892
893#define ds_printf(d, s, fmt, ...) ({ \
894 dev_err(d, fmt, ##__VA_ARGS__); \
895 seq_printf(s, fmt, ##__VA_ARGS__); \
896 })
897
898static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
899 struct iommu_domain *domain, void *priv)
900{
901 int i, j, ret = 0;
902 size_t *sz, *sizes = priv;
903
904 for (j = 0; j < 1; ++j) {
905 for (sz = sizes; *sz; ++sz) {
906 for (i = 0; i < 2; ++i) {
907 ds_printf(dev, s, "Full VA sweep @%s %d",
908 _size_to_string(*sz), i);
909 if (__full_va_sweep(dev, s, *sz, domain)) {
910 ds_printf(dev, s, " -> FAILED\n");
911 ret = -EINVAL;
912 } else {
913 ds_printf(dev, s, " -> SUCCEEDED\n");
914 }
915 }
916 }
917 }
918
919 ds_printf(dev, s, "bonus map:");
920 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
921 ds_printf(dev, s, " -> FAILED\n");
922 ret = -EINVAL;
923 } else {
924 ds_printf(dev, s, " -> SUCCEEDED\n");
925 }
926
927 for (sz = sizes; *sz; ++sz) {
928 for (i = 0; i < 2; ++i) {
929 ds_printf(dev, s, "Rand VA sweep @%s %d",
930 _size_to_string(*sz), i);
931 if (__rand_va_sweep(dev, s, *sz)) {
932 ds_printf(dev, s, " -> FAILED\n");
933 ret = -EINVAL;
934 } else {
935 ds_printf(dev, s, " -> SUCCEEDED\n");
936 }
937 }
938 }
939
940 ds_printf(dev, s, "TLB stress sweep");
941 if (__tlb_stress_sweep(dev, s)) {
942 ds_printf(dev, s, " -> FAILED\n");
943 ret = -EINVAL;
944 } else {
945 ds_printf(dev, s, " -> SUCCEEDED\n");
946 }
947
948 ds_printf(dev, s, "second bonus map:");
949 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
950 ds_printf(dev, s, " -> FAILED\n");
951 ret = -EINVAL;
952 } else {
953 ds_printf(dev, s, " -> SUCCEEDED\n");
954 }
955
956 return ret;
957}
958
959static int __functional_dma_api_alloc_test(struct device *dev,
960 struct seq_file *s,
961 struct iommu_domain *domain,
962 void *ignored)
963{
964 size_t size = SZ_1K * 742;
965 int ret = 0;
966 u8 *data;
967 dma_addr_t iova;
968
969 /* Make sure we can allocate and use a buffer */
970 ds_printf(dev, s, "Allocating coherent buffer");
971 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
972 if (!data) {
973 ds_printf(dev, s, " -> FAILED\n");
974 ret = -EINVAL;
975 } else {
976 int i;
977
978 ds_printf(dev, s, " -> SUCCEEDED\n");
979 ds_printf(dev, s, "Using coherent buffer");
980 for (i = 0; i < 742; ++i) {
981 int ind = SZ_1K * i;
982 u8 *p = data + ind;
983 u8 val = i % 255;
984
985 memset(data, 0xa5, size);
986 *p = val;
987 (*p)++;
988 if ((*p) != val + 1) {
989 ds_printf(dev, s,
990 " -> FAILED on iter %d since %d != %d\n",
991 i, *p, val + 1);
992 ret = -EINVAL;
993 }
994 }
995 if (!ret)
996 ds_printf(dev, s, " -> SUCCEEDED\n");
997 dma_free_coherent(dev, size, data, iova);
998 }
999
1000 return ret;
1001}
1002
1003static int __functional_dma_api_basic_test(struct device *dev,
1004 struct seq_file *s,
1005 struct iommu_domain *domain,
1006 void *ignored)
1007{
1008 size_t size = 1518;
1009 int i, j, ret = 0;
1010 u8 *data;
1011 dma_addr_t iova;
1012 phys_addr_t pa, pa2;
1013
1014 ds_printf(dev, s, "Basic DMA API test");
1015 /* Make sure we can allocate and use a buffer */
1016 for (i = 0; i < 1000; ++i) {
1017 data = kmalloc(size, GFP_KERNEL);
1018 if (!data) {
1019 ds_printf(dev, s, " -> FAILED\n");
1020 ret = -EINVAL;
1021 goto out;
1022 }
1023 memset(data, 0xa5, size);
1024 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
1025 pa = iommu_iova_to_phys(domain, iova);
1026 pa2 = iommu_iova_to_phys_hard(domain, iova);
1027 if (pa != pa2) {
1028 dev_err(dev,
1029 "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
1030 &pa, &pa2);
1031 ret = -EINVAL;
1032 goto out;
1033 }
1034 pa2 = virt_to_phys(data);
1035 if (pa != pa2) {
1036 dev_err(dev,
1037 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
1038 &pa, &pa2);
1039 ret = -EINVAL;
1040 goto out;
1041 }
1042 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
1043 for (j = 0; j < size; ++j) {
1044 if (data[j] != 0xa5) {
1045 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
1046 ret = -EINVAL;
1047 goto out;
1048 }
1049 }
1050 kfree(data);
1051 }
1052
1053out:
1054 if (ret)
1055 ds_printf(dev, s, " -> FAILED\n");
1056 else
1057 ds_printf(dev, s, " -> SUCCEEDED\n");
1058
1059 return ret;
1060}
1061
1062/* Creates a fresh fast mapping and applies @fn to it */
1063static int __apply_to_new_mapping(struct seq_file *s,
1064 int (*fn)(struct device *dev,
1065 struct seq_file *s,
1066 struct iommu_domain *domain,
1067 void *priv),
1068 void *priv)
1069{
1070 struct dma_iommu_mapping *mapping;
1071 struct iommu_debug_device *ddev = s->private;
1072 struct device *dev = ddev->dev;
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301073 int ret = -EINVAL, fast = 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001074 phys_addr_t pt_phys;
1075
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301076 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1077 (SZ_1G * 4ULL));
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001078 if (!mapping)
1079 goto out;
1080
1081 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
1082 seq_puts(s, "iommu_domain_set_attr failed\n");
1083 goto out_release_mapping;
1084 }
1085
1086 if (arm_iommu_attach_device(dev, mapping))
1087 goto out_release_mapping;
1088
1089 if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
1090 &pt_phys)) {
1091 ds_printf(dev, s, "Couldn't get page table base address\n");
1092 goto out_release_mapping;
1093 }
1094
1095 dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
1096 if (iommu_enable_config_clocks(mapping->domain)) {
1097 ds_printf(dev, s, "Couldn't enable clocks\n");
1098 goto out_release_mapping;
1099 }
1100 ret = fn(dev, s, mapping->domain, priv);
1101 iommu_disable_config_clocks(mapping->domain);
1102
1103 arm_iommu_detach_device(dev);
1104out_release_mapping:
1105 arm_iommu_release_mapping(mapping);
1106out:
1107 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1108 return 0;
1109}
1110
1111static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
1112 void *ignored)
1113{
1114 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
1115 int ret = 0;
1116
1117 ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
1118 ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
1119 ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
1120 return ret;
1121}
1122
1123static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
1124 struct file *file)
1125{
1126 return single_open(file, iommu_debug_functional_fast_dma_api_show,
1127 inode->i_private);
1128}
1129
1130static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
1131 .open = iommu_debug_functional_fast_dma_api_open,
1132 .read = seq_read,
1133 .llseek = seq_lseek,
1134 .release = single_release,
1135};
1136
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001137static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
1138 void *ignored)
1139{
1140 struct dma_iommu_mapping *mapping;
1141 struct iommu_debug_device *ddev = s->private;
1142 struct device *dev = ddev->dev;
1143 size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
1144 int ret = -EINVAL;
1145
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301146 /* Make the size equal to MAX_ULONG */
1147 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1148 (SZ_1G * 4ULL - 1));
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001149 if (!mapping)
1150 goto out;
1151
1152 if (arm_iommu_attach_device(dev, mapping))
1153 goto out_release_mapping;
1154
1155 ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
1156 ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
1157
1158 arm_iommu_detach_device(dev);
1159out_release_mapping:
1160 arm_iommu_release_mapping(mapping);
1161out:
1162 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1163 return 0;
1164}
1165
1166static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
1167 struct file *file)
1168{
1169 return single_open(file, iommu_debug_functional_arm_dma_api_show,
1170 inode->i_private);
1171}
1172
1173static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
1174 .open = iommu_debug_functional_arm_dma_api_open,
1175 .read = seq_read,
1176 .llseek = seq_lseek,
1177 .release = single_release,
1178};
1179
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001180static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
1181 int val, bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001182{
Patrick Daly6dd80252017-04-17 20:41:59 -07001183 struct iommu_group *group = ddev->dev->iommu_group;
1184
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001185 ddev->domain = iommu_domain_alloc(&platform_bus_type);
1186 if (!ddev->domain) {
1187 pr_err("Couldn't allocate domain\n");
1188 return -ENOMEM;
1189 }
1190
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001191 if (is_secure && iommu_domain_set_attr(ddev->domain,
1192 DOMAIN_ATTR_SECURE_VMID,
1193 &val)) {
1194 pr_err("Couldn't set secure vmid to %d\n", val);
1195 goto out_domain_free;
1196 }
1197
Patrick Daly6dd80252017-04-17 20:41:59 -07001198 if (iommu_attach_group(ddev->domain, group)) {
1199 dev_err(ddev->dev, "Couldn't attach new domain to device\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001200 goto out_domain_free;
1201 }
1202
1203 return 0;
1204
1205out_domain_free:
1206 iommu_domain_free(ddev->domain);
1207 ddev->domain = NULL;
1208 return -EIO;
1209}
1210
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001211static ssize_t __iommu_debug_dma_attach_write(struct file *file,
1212 const char __user *ubuf,
1213 size_t count, loff_t *offset)
1214{
1215 struct iommu_debug_device *ddev = file->private_data;
1216 struct device *dev = ddev->dev;
1217 struct dma_iommu_mapping *dma_mapping;
1218 ssize_t retval = -EINVAL;
1219 int val;
1220
1221 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1222 pr_err("Invalid format. Expected a hex or decimal integer");
1223 retval = -EFAULT;
1224 goto out;
1225 }
1226
1227 if (val) {
1228 if (dev->archdata.mapping)
1229 if (dev->archdata.mapping->domain) {
1230 pr_err("Already attached.\n");
1231 retval = -EINVAL;
1232 goto out;
1233 }
1234 if (WARN(dev->archdata.iommu,
1235 "Attachment tracking out of sync with device\n")) {
1236 retval = -EINVAL;
1237 goto out;
1238 }
1239
1240 dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1241 (SZ_1G * 4ULL));
1242
1243 if (!dma_mapping)
1244 goto out;
1245
1246 if (arm_iommu_attach_device(dev, dma_mapping))
1247 goto out_release_mapping;
1248 pr_err("Attached\n");
1249 } else {
1250 if (!dev->archdata.mapping) {
1251 pr_err("No mapping. Did you already attach?\n");
1252 retval = -EINVAL;
1253 goto out;
1254 }
1255 if (!dev->archdata.mapping->domain) {
1256 pr_err("No domain. Did you already attach?\n");
1257 retval = -EINVAL;
1258 goto out;
1259 }
1260 arm_iommu_detach_device(dev);
1261 arm_iommu_release_mapping(dev->archdata.mapping);
1262 pr_err("Detached\n");
1263 }
1264 retval = count;
1265 return retval;
1266
1267out_release_mapping:
1268 arm_iommu_release_mapping(dma_mapping);
1269out:
1270 return retval;
1271}
1272
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001273static ssize_t __iommu_debug_attach_write(struct file *file,
1274 const char __user *ubuf,
1275 size_t count, loff_t *offset,
1276 bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001277{
1278 struct iommu_debug_device *ddev = file->private_data;
Patrick Daly6dd80252017-04-17 20:41:59 -07001279 struct device *dev = ddev->dev;
1280 struct iommu_domain *domain;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001281 ssize_t retval;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001282 int val;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001283
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001284 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1285 pr_err("Invalid format. Expected a hex or decimal integer");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001286 retval = -EFAULT;
1287 goto out;
1288 }
1289
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001290 if (val) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001291 if (ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001292 pr_err("Iommu-Debug is already attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001293 retval = -EINVAL;
1294 goto out;
1295 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001296
1297 domain = iommu_get_domain_for_dev(dev);
1298 if (domain) {
1299 pr_err("Another driver is using this device's iommu\n"
1300 "Iommu-Debug cannot be used concurrently\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001301 retval = -EINVAL;
1302 goto out;
1303 }
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001304 if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001305 retval = -EIO;
1306 goto out;
1307 }
1308 pr_err("Attached\n");
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001309 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001310 if (!ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001311 pr_err("Iommu-Debug is not attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001312 retval = -EINVAL;
1313 goto out;
1314 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001315 iommu_detach_group(ddev->domain, dev->iommu_group);
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001316 iommu_domain_free(ddev->domain);
1317 ddev->domain = NULL;
1318 pr_err("Detached\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001319 }
1320
1321 retval = count;
1322out:
1323 return retval;
1324}
1325
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001326static ssize_t iommu_debug_dma_attach_write(struct file *file,
1327 const char __user *ubuf,
1328 size_t count, loff_t *offset)
1329{
1330 return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
1331
1332}
1333
1334static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
1335 size_t count, loff_t *offset)
1336{
1337 struct iommu_debug_device *ddev = file->private_data;
1338 struct device *dev = ddev->dev;
1339 char c[2];
1340
1341 if (*offset)
1342 return 0;
1343
1344 if (!dev->archdata.mapping)
1345 c[0] = '0';
1346 else
1347 c[0] = dev->archdata.mapping->domain ? '1' : '0';
1348
1349 c[1] = '\n';
1350 if (copy_to_user(ubuf, &c, 2)) {
1351 pr_err("copy_to_user failed\n");
1352 return -EFAULT;
1353 }
1354 *offset = 1; /* non-zero means we're done */
1355
1356 return 2;
1357}
1358
1359static const struct file_operations iommu_debug_dma_attach_fops = {
1360 .open = simple_open,
1361 .write = iommu_debug_dma_attach_write,
1362 .read = iommu_debug_dma_attach_read,
1363};
1364
1365static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
1366 char __user *ubuf,
1367 size_t count, loff_t *offset)
1368{
1369 char buf[100];
1370 ssize_t retval;
1371 size_t buflen;
1372 int buf_len = sizeof(buf);
1373
1374 if (*offset)
1375 return 0;
1376
1377 memset(buf, 0, buf_len);
1378
1379 if (!test_virt_addr)
1380 strlcpy(buf, "FAIL\n", buf_len);
1381 else
1382 snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
1383
1384 buflen = strlen(buf);
1385 if (copy_to_user(ubuf, buf, buflen)) {
1386 pr_err("Couldn't copy_to_user\n");
1387 retval = -EFAULT;
1388 } else {
1389 *offset = 1; /* non-zero means we're done */
1390 retval = buflen;
1391 }
1392
1393 return retval;
1394}
1395
1396static const struct file_operations iommu_debug_test_virt_addr_fops = {
1397 .open = simple_open,
1398 .read = iommu_debug_test_virt_addr_read,
1399};
1400
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001401static ssize_t iommu_debug_attach_write(struct file *file,
1402 const char __user *ubuf,
1403 size_t count, loff_t *offset)
1404{
1405 return __iommu_debug_attach_write(file, ubuf, count, offset,
1406 false);
1407
1408}
1409
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001410static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
1411 size_t count, loff_t *offset)
1412{
1413 struct iommu_debug_device *ddev = file->private_data;
1414 char c[2];
1415
1416 if (*offset)
1417 return 0;
1418
1419 c[0] = ddev->domain ? '1' : '0';
1420 c[1] = '\n';
1421 if (copy_to_user(ubuf, &c, 2)) {
1422 pr_err("copy_to_user failed\n");
1423 return -EFAULT;
1424 }
1425 *offset = 1; /* non-zero means we're done */
1426
1427 return 2;
1428}
1429
1430static const struct file_operations iommu_debug_attach_fops = {
1431 .open = simple_open,
1432 .write = iommu_debug_attach_write,
1433 .read = iommu_debug_attach_read,
1434};
1435
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001436static ssize_t iommu_debug_attach_write_secure(struct file *file,
1437 const char __user *ubuf,
1438 size_t count, loff_t *offset)
1439{
1440 return __iommu_debug_attach_write(file, ubuf, count, offset,
1441 true);
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001442}
1443
1444static const struct file_operations iommu_debug_secure_attach_fops = {
1445 .open = simple_open,
1446 .write = iommu_debug_attach_write_secure,
1447 .read = iommu_debug_attach_read,
1448};
1449
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001450static ssize_t iommu_debug_pte_write(struct file *file,
1451 const char __user *ubuf,
1452 size_t count, loff_t *offset)
1453{
1454 struct iommu_debug_device *ddev = file->private_data;
1455 dma_addr_t iova;
1456
1457 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
1458 pr_err("Invalid format for iova\n");
1459 ddev->iova = 0;
1460 return -EINVAL;
1461 }
1462
1463 ddev->iova = iova;
1464 pr_err("Saved iova=%pa for future PTE commands\n", &iova);
1465 return count;
1466}
1467
1468
1469static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
1470 size_t count, loff_t *offset)
1471{
1472 struct iommu_debug_device *ddev = file->private_data;
1473 struct device *dev = ddev->dev;
1474 uint64_t pte;
1475 char buf[100];
1476 ssize_t retval;
1477 size_t buflen;
1478
1479 if (!dev->archdata.mapping) {
1480 pr_err("No mapping. Did you already attach?\n");
1481 return -EINVAL;
1482 }
1483 if (!dev->archdata.mapping->domain) {
1484 pr_err("No domain. Did you already attach?\n");
1485 return -EINVAL;
1486 }
1487
1488 if (*offset)
1489 return 0;
1490
1491 memset(buf, 0, sizeof(buf));
1492
1493 pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
1494 ddev->iova);
1495
1496 if (!pte)
1497 strlcpy(buf, "FAIL\n", sizeof(buf));
1498 else
1499 snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
1500
1501 buflen = strlen(buf);
1502 if (copy_to_user(ubuf, buf, buflen)) {
1503 pr_err("Couldn't copy_to_user\n");
1504 retval = -EFAULT;
1505 } else {
1506 *offset = 1; /* non-zero means we're done */
1507 retval = buflen;
1508 }
1509
1510 return retval;
1511}
1512
1513static const struct file_operations iommu_debug_pte_fops = {
1514 .open = simple_open,
1515 .write = iommu_debug_pte_write,
1516 .read = iommu_debug_pte_read,
1517};
1518
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001519static ssize_t iommu_debug_atos_write(struct file *file,
1520 const char __user *ubuf,
1521 size_t count, loff_t *offset)
1522{
1523 struct iommu_debug_device *ddev = file->private_data;
1524 dma_addr_t iova;
1525
Susheel Khiania4417e72016-07-12 11:28:32 +05301526 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001527 pr_err("Invalid format for iova\n");
1528 ddev->iova = 0;
1529 return -EINVAL;
1530 }
1531
1532 ddev->iova = iova;
1533 pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
1534 return count;
1535}
1536
1537static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
1538 size_t count, loff_t *offset)
1539{
1540 struct iommu_debug_device *ddev = file->private_data;
1541 phys_addr_t phys;
1542 char buf[100];
1543 ssize_t retval;
1544 size_t buflen;
1545
1546 if (!ddev->domain) {
1547 pr_err("No domain. Did you already attach?\n");
1548 return -EINVAL;
1549 }
1550
1551 if (*offset)
1552 return 0;
1553
1554 memset(buf, 0, 100);
1555
1556 phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001557 if (!phys) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001558 strlcpy(buf, "FAIL\n", 100);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001559 phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
1560 dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
1561 &ddev->iova, &phys);
1562 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001563 snprintf(buf, 100, "%pa\n", &phys);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001564 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001565
1566 buflen = strlen(buf);
1567 if (copy_to_user(ubuf, buf, buflen)) {
1568 pr_err("Couldn't copy_to_user\n");
1569 retval = -EFAULT;
1570 } else {
1571 *offset = 1; /* non-zero means we're done */
1572 retval = buflen;
1573 }
1574
1575 return retval;
1576}
1577
1578static const struct file_operations iommu_debug_atos_fops = {
1579 .open = simple_open,
1580 .write = iommu_debug_atos_write,
1581 .read = iommu_debug_atos_read,
1582};
1583
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001584static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
1585 size_t count, loff_t *offset)
1586{
1587 struct iommu_debug_device *ddev = file->private_data;
1588 struct device *dev = ddev->dev;
1589 phys_addr_t phys;
1590 char buf[100];
1591 ssize_t retval;
1592 size_t buflen;
1593
1594 if (!dev->archdata.mapping) {
1595 pr_err("No mapping. Did you already attach?\n");
1596 return -EINVAL;
1597 }
1598 if (!dev->archdata.mapping->domain) {
1599 pr_err("No domain. Did you already attach?\n");
1600 return -EINVAL;
1601 }
1602
1603 if (*offset)
1604 return 0;
1605
1606 memset(buf, 0, sizeof(buf));
1607
1608 phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
1609 ddev->iova);
1610 if (!phys)
1611 strlcpy(buf, "FAIL\n", sizeof(buf));
1612 else
1613 snprintf(buf, sizeof(buf), "%pa\n", &phys);
1614
1615 buflen = strlen(buf);
1616 if (copy_to_user(ubuf, buf, buflen)) {
1617 pr_err("Couldn't copy_to_user\n");
1618 retval = -EFAULT;
1619 } else {
1620 *offset = 1; /* non-zero means we're done */
1621 retval = buflen;
1622 }
1623
1624 return retval;
1625}
1626
1627static const struct file_operations iommu_debug_dma_atos_fops = {
1628 .open = simple_open,
1629 .write = iommu_debug_atos_write,
1630 .read = iommu_debug_dma_atos_read,
1631};
1632
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001633static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
1634 size_t count, loff_t *offset)
1635{
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301636 ssize_t retval = -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001637 int ret;
1638 char *comma1, *comma2, *comma3;
1639 char buf[100];
1640 dma_addr_t iova;
1641 phys_addr_t phys;
1642 size_t size;
1643 int prot;
1644 struct iommu_debug_device *ddev = file->private_data;
1645
1646 if (count >= 100) {
1647 pr_err("Value too large\n");
1648 return -EINVAL;
1649 }
1650
1651 if (!ddev->domain) {
1652 pr_err("No domain. Did you already attach?\n");
1653 return -EINVAL;
1654 }
1655
1656 memset(buf, 0, 100);
1657
1658 if (copy_from_user(buf, ubuf, count)) {
1659 pr_err("Couldn't copy from user\n");
1660 retval = -EFAULT;
1661 }
1662
1663 comma1 = strnchr(buf, count, ',');
1664 if (!comma1)
1665 goto invalid_format;
1666
1667 comma2 = strnchr(comma1 + 1, count, ',');
1668 if (!comma2)
1669 goto invalid_format;
1670
1671 comma3 = strnchr(comma2 + 1, count, ',');
1672 if (!comma3)
1673 goto invalid_format;
1674
1675 /* split up the words */
1676 *comma1 = *comma2 = *comma3 = '\0';
1677
Susheel Khiania4417e72016-07-12 11:28:32 +05301678 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001679 goto invalid_format;
1680
Susheel Khiania4417e72016-07-12 11:28:32 +05301681 if (kstrtoux(comma1 + 1, 0, &phys))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001682 goto invalid_format;
1683
Susheel Khiania4417e72016-07-12 11:28:32 +05301684 if (kstrtosize_t(comma2 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001685 goto invalid_format;
1686
1687 if (kstrtoint(comma3 + 1, 0, &prot))
1688 goto invalid_format;
1689
1690 ret = iommu_map(ddev->domain, iova, phys, size, prot);
1691 if (ret) {
1692 pr_err("iommu_map failed with %d\n", ret);
1693 retval = -EIO;
1694 goto out;
1695 }
1696
1697 retval = count;
1698 pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
1699 &iova, &phys, size, prot);
1700out:
1701 return retval;
1702
1703invalid_format:
1704 pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
1705 return -EINVAL;
1706}
1707
1708static const struct file_operations iommu_debug_map_fops = {
1709 .open = simple_open,
1710 .write = iommu_debug_map_write,
1711};
1712
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001713/*
1714 * Performs DMA mapping of a given virtual address and size to an iova address.
1715 * User input format: (addr,len,dma attr) where dma attr is:
1716 * 0: normal mapping
1717 * 1: force coherent mapping
1718 * 2: force non-cohernet mapping
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001719 * 3: use system cache
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001720 */
1721static ssize_t iommu_debug_dma_map_write(struct file *file,
1722 const char __user *ubuf, size_t count, loff_t *offset)
1723{
1724 ssize_t retval = -EINVAL;
1725 int ret;
1726 char *comma1, *comma2;
1727 char buf[100];
1728 unsigned long addr;
1729 void *v_addr;
1730 dma_addr_t iova;
1731 size_t size;
1732 unsigned int attr;
1733 unsigned long dma_attrs;
1734 struct iommu_debug_device *ddev = file->private_data;
1735 struct device *dev = ddev->dev;
1736
1737 if (count >= sizeof(buf)) {
1738 pr_err("Value too large\n");
1739 return -EINVAL;
1740 }
1741
1742 if (!dev->archdata.mapping) {
1743 pr_err("No mapping. Did you already attach?\n");
1744 retval = -EINVAL;
1745 goto out;
1746 }
1747 if (!dev->archdata.mapping->domain) {
1748 pr_err("No domain. Did you already attach?\n");
1749 retval = -EINVAL;
1750 goto out;
1751 }
1752
1753 memset(buf, 0, sizeof(buf));
1754
1755 if (copy_from_user(buf, ubuf, count)) {
1756 pr_err("Couldn't copy from user\n");
1757 retval = -EFAULT;
1758 goto out;
1759 }
1760
1761 comma1 = strnchr(buf, count, ',');
1762 if (!comma1)
1763 goto invalid_format;
1764
1765 comma2 = strnchr(comma1 + 1, count, ',');
1766 if (!comma2)
1767 goto invalid_format;
1768
1769 *comma1 = *comma2 = '\0';
1770
1771 if (kstrtoul(buf, 0, &addr))
1772 goto invalid_format;
1773 v_addr = (void *)addr;
1774
1775 if (kstrtosize_t(comma1 + 1, 0, &size))
1776 goto invalid_format;
1777
1778 if (kstrtouint(comma2 + 1, 0, &attr))
1779 goto invalid_format;
1780
1781 if (v_addr < test_virt_addr || v_addr > (test_virt_addr + SZ_1M - 1))
1782 goto invalid_addr;
1783
1784 if (attr == 0)
1785 dma_attrs = 0;
1786 else if (attr == 1)
1787 dma_attrs = DMA_ATTR_FORCE_COHERENT;
1788 else if (attr == 2)
1789 dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001790 else if (attr == 3)
1791 dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001792 else
1793 goto invalid_format;
1794
1795 iova = dma_map_single_attrs(dev, v_addr, size,
1796 DMA_TO_DEVICE, dma_attrs);
1797
1798 if (dma_mapping_error(dev, iova)) {
1799 pr_err("Failed to perform dma_map_single\n");
1800 ret = -EINVAL;
1801 goto out;
1802 }
1803
1804 retval = count;
1805 pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
1806 v_addr, &iova, size);
1807 ddev->iova = iova;
1808 pr_err("Saved iova=%pa for future PTE commands\n", &iova);
1809out:
1810 return retval;
1811
1812invalid_format:
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001813 pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001814 return retval;
1815
1816invalid_addr:
1817 pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
1818 return retval;
1819}
1820
1821static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
1822 size_t count, loff_t *offset)
1823{
1824 struct iommu_debug_device *ddev = file->private_data;
1825 struct device *dev = ddev->dev;
1826 char buf[100];
1827 ssize_t retval;
1828 size_t buflen;
1829 dma_addr_t iova;
1830
1831 if (!dev->archdata.mapping) {
1832 pr_err("No mapping. Did you already attach?\n");
1833 return -EINVAL;
1834 }
1835 if (!dev->archdata.mapping->domain) {
1836 pr_err("No domain. Did you already attach?\n");
1837 return -EINVAL;
1838 }
1839
1840 if (*offset)
1841 return 0;
1842
1843 memset(buf, 0, sizeof(buf));
1844
1845 iova = ddev->iova;
1846 snprintf(buf, sizeof(buf), "%pa\n", &iova);
1847
1848 buflen = strlen(buf);
1849 if (copy_to_user(ubuf, buf, buflen)) {
1850 pr_err("Couldn't copy_to_user\n");
1851 retval = -EFAULT;
1852 } else {
1853 *offset = 1; /* non-zero means we're done */
1854 retval = buflen;
1855 }
1856
1857 return retval;
1858}
1859
1860static const struct file_operations iommu_debug_dma_map_fops = {
1861 .open = simple_open,
1862 .write = iommu_debug_dma_map_write,
1863 .read = iommu_debug_dma_map_read,
1864};
1865
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001866static ssize_t iommu_debug_unmap_write(struct file *file,
1867 const char __user *ubuf,
1868 size_t count, loff_t *offset)
1869{
1870 ssize_t retval = 0;
1871 char *comma1;
1872 char buf[100];
1873 dma_addr_t iova;
1874 size_t size;
1875 size_t unmapped;
1876 struct iommu_debug_device *ddev = file->private_data;
1877
1878 if (count >= 100) {
1879 pr_err("Value too large\n");
1880 return -EINVAL;
1881 }
1882
1883 if (!ddev->domain) {
1884 pr_err("No domain. Did you already attach?\n");
1885 return -EINVAL;
1886 }
1887
1888 memset(buf, 0, 100);
1889
1890 if (copy_from_user(buf, ubuf, count)) {
1891 pr_err("Couldn't copy from user\n");
1892 retval = -EFAULT;
1893 goto out;
1894 }
1895
1896 comma1 = strnchr(buf, count, ',');
1897 if (!comma1)
1898 goto invalid_format;
1899
1900 /* split up the words */
1901 *comma1 = '\0';
1902
Susheel Khiania4417e72016-07-12 11:28:32 +05301903 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001904 goto invalid_format;
1905
Susheel Khiania4417e72016-07-12 11:28:32 +05301906 if (kstrtosize_t(comma1 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001907 goto invalid_format;
1908
1909 unmapped = iommu_unmap(ddev->domain, iova, size);
1910 if (unmapped != size) {
1911 pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
1912 size, unmapped);
1913 return -EIO;
1914 }
1915
1916 retval = count;
1917 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
1918out:
1919 return retval;
1920
1921invalid_format:
1922 pr_err("Invalid format. Expected: iova,len\n");
Patrick Daly5a5e3ff2016-10-13 19:31:50 -07001923 return -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001924}
1925
1926static const struct file_operations iommu_debug_unmap_fops = {
1927 .open = simple_open,
1928 .write = iommu_debug_unmap_write,
1929};
1930
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001931static ssize_t iommu_debug_dma_unmap_write(struct file *file,
1932 const char __user *ubuf,
1933 size_t count, loff_t *offset)
1934{
1935 ssize_t retval = 0;
1936 char *comma1, *comma2;
1937 char buf[100];
1938 size_t size;
1939 unsigned int attr;
1940 dma_addr_t iova;
1941 unsigned long dma_attrs;
1942 struct iommu_debug_device *ddev = file->private_data;
1943 struct device *dev = ddev->dev;
1944
1945 if (count >= sizeof(buf)) {
1946 pr_err("Value too large\n");
1947 return -EINVAL;
1948 }
1949
1950 if (!dev->archdata.mapping) {
1951 pr_err("No mapping. Did you already attach?\n");
1952 retval = -EINVAL;
1953 goto out;
1954 }
1955 if (!dev->archdata.mapping->domain) {
1956 pr_err("No domain. Did you already attach?\n");
1957 retval = -EINVAL;
1958 goto out;
1959 }
1960
1961 memset(buf, 0, sizeof(buf));
1962
1963 if (copy_from_user(buf, ubuf, count)) {
1964 pr_err("Couldn't copy from user\n");
1965 retval = -EFAULT;
1966 goto out;
1967 }
1968
1969 comma1 = strnchr(buf, count, ',');
1970 if (!comma1)
1971 goto invalid_format;
1972
1973 comma2 = strnchr(comma1 + 1, count, ',');
1974 if (!comma2)
1975 goto invalid_format;
1976
1977 *comma1 = *comma2 = '\0';
1978
1979 if (kstrtoux(buf, 0, &iova))
1980 goto invalid_format;
1981
1982 if (kstrtosize_t(comma1 + 1, 0, &size))
1983 goto invalid_format;
1984
1985 if (kstrtouint(comma2 + 1, 0, &attr))
1986 goto invalid_format;
1987
1988 if (attr == 0)
1989 dma_attrs = 0;
1990 else if (attr == 1)
1991 dma_attrs = DMA_ATTR_FORCE_COHERENT;
1992 else if (attr == 2)
1993 dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001994 else if (attr == 3)
1995 dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001996 else
1997 goto invalid_format;
1998
1999 dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
2000
2001 retval = count;
2002 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
2003out:
2004 return retval;
2005
2006invalid_format:
2007 pr_err("Invalid format. Expected: iova,len, dma attr\n");
2008 return retval;
2009}
2010
2011static const struct file_operations iommu_debug_dma_unmap_fops = {
2012 .open = simple_open,
2013 .write = iommu_debug_dma_unmap_write,
2014};
2015
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002016static ssize_t iommu_debug_config_clocks_write(struct file *file,
2017 const char __user *ubuf,
2018 size_t count, loff_t *offset)
2019{
2020 char buf;
2021 struct iommu_debug_device *ddev = file->private_data;
2022 struct device *dev = ddev->dev;
2023
2024 /* we're expecting a single character plus (optionally) a newline */
2025 if (count > 2) {
2026 dev_err(dev, "Invalid value\n");
2027 return -EINVAL;
2028 }
2029
2030 if (!ddev->domain) {
2031 dev_err(dev, "No domain. Did you already attach?\n");
2032 return -EINVAL;
2033 }
2034
2035 if (copy_from_user(&buf, ubuf, 1)) {
2036 dev_err(dev, "Couldn't copy from user\n");
2037 return -EFAULT;
2038 }
2039
2040 switch (buf) {
2041 case '0':
2042 dev_err(dev, "Disabling config clocks\n");
2043 iommu_disable_config_clocks(ddev->domain);
2044 break;
2045 case '1':
2046 dev_err(dev, "Enabling config clocks\n");
2047 if (iommu_enable_config_clocks(ddev->domain))
2048 dev_err(dev, "Failed!\n");
2049 break;
2050 default:
2051 dev_err(dev, "Invalid value. Should be 0 or 1.\n");
2052 return -EINVAL;
2053 }
2054
2055 return count;
2056}
2057
2058static const struct file_operations iommu_debug_config_clocks_fops = {
2059 .open = simple_open,
2060 .write = iommu_debug_config_clocks_write,
2061};
2062
Patrick Daly9438f322017-04-05 18:03:19 -07002063static ssize_t iommu_debug_trigger_fault_write(
2064 struct file *file, const char __user *ubuf, size_t count,
2065 loff_t *offset)
2066{
2067 struct iommu_debug_device *ddev = file->private_data;
2068 unsigned long flags;
2069
2070 if (!ddev->domain) {
2071 pr_err("No domain. Did you already attach?\n");
2072 return -EINVAL;
2073 }
2074
2075 if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
2076 pr_err("Invalid flags format\n");
2077 return -EFAULT;
2078 }
2079
2080 iommu_trigger_fault(ddev->domain, flags);
2081
2082 return count;
2083}
2084
2085static const struct file_operations iommu_debug_trigger_fault_fops = {
2086 .open = simple_open,
2087 .write = iommu_debug_trigger_fault_write,
2088};
2089
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002090/*
2091 * The following will only work for drivers that implement the generic
2092 * device tree bindings described in
2093 * Documentation/devicetree/bindings/iommu/iommu.txt
2094 */
2095static int snarf_iommu_devices(struct device *dev, void *ignored)
2096{
2097 struct iommu_debug_device *ddev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002098 struct dentry *dir;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002099
2100 if (!of_find_property(dev->of_node, "iommus", NULL))
2101 return 0;
2102
Patrick Daly6dd80252017-04-17 20:41:59 -07002103 /* Hold a reference count */
2104 if (!iommu_group_get(dev))
2105 return 0;
2106
Mitchel Humpherys89924fd2015-07-09 14:50:22 -07002107 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002108 if (!ddev)
2109 return -ENODEV;
2110 ddev->dev = dev;
2111 dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
2112 if (!dir) {
2113 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
2114 dev_name(dev));
2115 goto err;
2116 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002117
Patrick Dalye4e39862015-11-20 20:00:50 -08002118 if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
2119 &iommu_debug_nr_iters_ops)) {
2120 pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
2121 dev_name(dev));
2122 goto err_rmdir;
2123 }
2124
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002125 if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
2126 &iommu_debug_test_virt_addr_fops)) {
2127 pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
2128 dev_name(dev));
2129 goto err_rmdir;
2130 }
2131
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002132 if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
2133 &iommu_debug_profiling_fops)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002134 pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
2135 dev_name(dev));
2136 goto err_rmdir;
2137 }
2138
Mitchel Humpherys020f90f2015-10-02 16:02:31 -07002139 if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
2140 &iommu_debug_secure_profiling_fops)) {
2141 pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
2142 dev_name(dev));
2143 goto err_rmdir;
2144 }
2145
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -07002146 if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
2147 &iommu_debug_profiling_fast_fops)) {
2148 pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
2149 dev_name(dev));
2150 goto err_rmdir;
2151 }
2152
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -07002153 if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
2154 &iommu_debug_profiling_fast_dma_api_fops)) {
2155 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
2156 dev_name(dev));
2157 goto err_rmdir;
2158 }
2159
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08002160 if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
2161 &iommu_debug_functional_fast_dma_api_fops)) {
2162 pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
2163 dev_name(dev));
2164 goto err_rmdir;
2165 }
2166
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08002167 if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
2168 &iommu_debug_functional_arm_dma_api_fops)) {
2169 pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
2170 dev_name(dev));
2171 goto err_rmdir;
2172 }
2173
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002174 if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
2175 &iommu_debug_dma_attach_fops)) {
2176 pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
2177 dev_name(dev));
2178 goto err_rmdir;
2179 }
2180
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002181 if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
2182 &iommu_debug_attach_fops)) {
2183 pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
2184 dev_name(dev));
2185 goto err_rmdir;
2186 }
2187
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07002188 if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
2189 &iommu_debug_secure_attach_fops)) {
2190 pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
2191 dev_name(dev));
2192 goto err_rmdir;
2193 }
2194
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002195 if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
2196 &iommu_debug_atos_fops)) {
2197 pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
2198 dev_name(dev));
2199 goto err_rmdir;
2200 }
2201
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002202 if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
2203 &iommu_debug_dma_atos_fops)) {
2204 pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
2205 dev_name(dev));
2206 goto err_rmdir;
2207 }
2208
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002209 if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
2210 &iommu_debug_map_fops)) {
2211 pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
2212 dev_name(dev));
2213 goto err_rmdir;
2214 }
2215
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002216 if (!debugfs_create_file("dma_map", 0600, dir, ddev,
2217 &iommu_debug_dma_map_fops)) {
2218 pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
2219 dev_name(dev));
2220 goto err_rmdir;
2221 }
2222
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002223 if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
2224 &iommu_debug_unmap_fops)) {
2225 pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
2226 dev_name(dev));
2227 goto err_rmdir;
2228 }
2229
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002230 if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
2231 &iommu_debug_dma_unmap_fops)) {
2232 pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
2233 dev_name(dev));
2234 goto err_rmdir;
2235 }
2236
2237 if (!debugfs_create_file("pte", 0600, dir, ddev,
2238 &iommu_debug_pte_fops)) {
2239 pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
2240 dev_name(dev));
2241 goto err_rmdir;
2242 }
2243
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002244 if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
2245 &iommu_debug_config_clocks_fops)) {
2246 pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
2247 dev_name(dev));
2248 goto err_rmdir;
2249 }
2250
Patrick Daly9438f322017-04-05 18:03:19 -07002251 if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
2252 &iommu_debug_trigger_fault_fops)) {
2253 pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
2254 dev_name(dev));
2255 goto err_rmdir;
2256 }
2257
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002258 list_add(&ddev->list, &iommu_debug_devices);
2259 return 0;
2260
2261err_rmdir:
2262 debugfs_remove_recursive(dir);
2263err:
2264 kfree(ddev);
2265 return 0;
2266}
2267
2268static int iommu_debug_init_tests(void)
2269{
2270 debugfs_tests_dir = debugfs_create_dir("tests",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002271 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002272 if (!debugfs_tests_dir) {
2273 pr_err("Couldn't create iommu/tests debugfs directory\n");
2274 return -ENODEV;
2275 }
2276
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002277 test_virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
2278
2279 if (!test_virt_addr)
2280 return -ENOMEM;
2281
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002282 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
2283 snarf_iommu_devices);
2284}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002285
2286static void iommu_debug_destroy_tests(void)
2287{
2288 debugfs_remove_recursive(debugfs_tests_dir);
2289}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002290#else
2291static inline int iommu_debug_init_tests(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002292static inline void iommu_debug_destroy_tests(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002293#endif
2294
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002295/*
2296 * This isn't really a "driver", we just need something in the device tree
2297 * so that our tests can run without any client drivers, and our tests rely
2298 * on parsing the device tree for nodes with the `iommus' property.
2299 */
2300static int iommu_debug_pass(struct platform_device *pdev)
2301{
2302 return 0;
2303}
2304
2305static const struct of_device_id iommu_debug_of_match[] = {
2306 { .compatible = "iommu-debug-test" },
2307 { },
2308};
2309
2310static struct platform_driver iommu_debug_driver = {
2311 .probe = iommu_debug_pass,
2312 .remove = iommu_debug_pass,
2313 .driver = {
2314 .name = "iommu-debug",
2315 .of_match_table = iommu_debug_of_match,
2316 },
2317};
2318
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002319static int iommu_debug_init(void)
2320{
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002321 if (iommu_debug_init_tests())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002322 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002323
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002324 return platform_driver_register(&iommu_debug_driver);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002325}
2326
2327static void iommu_debug_exit(void)
2328{
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002329 platform_driver_unregister(&iommu_debug_driver);
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002330 iommu_debug_destroy_tests();
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002331}
2332
2333module_init(iommu_debug_init);
2334module_exit(iommu_debug_exit);