blob: 56eff61be5aa1595aea7c4f6043562cf435c1a57 [file] [log] [blame]
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001/*
Charan Teja Reddy29f61402017-02-09 20:44:29 +05302 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
16
17#include <linux/debugfs.h>
18#include <linux/device.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/module.h>
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -070024#include <linux/uaccess.h>
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070025#include <linux/dma-contiguous.h>
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070026#include <soc/qcom/secure_buffer.h>
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -070027#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma-iommu.h>
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070030
Susheel Khiania4417e72016-07-12 11:28:32 +053031#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
32
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070033static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
34{
35 switch (attr) {
36 case DOMAIN_ATTR_GEOMETRY:
37 return "DOMAIN_ATTR_GEOMETRY";
38 case DOMAIN_ATTR_PAGING:
39 return "DOMAIN_ATTR_PAGING";
40 case DOMAIN_ATTR_WINDOWS:
41 return "DOMAIN_ATTR_WINDOWS";
42 case DOMAIN_ATTR_FSL_PAMU_STASH:
43 return "DOMAIN_ATTR_FSL_PAMU_STASH";
44 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
45 return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
46 case DOMAIN_ATTR_FSL_PAMUV1:
47 return "DOMAIN_ATTR_FSL_PAMUV1";
48 case DOMAIN_ATTR_NESTING:
49 return "DOMAIN_ATTR_NESTING";
50 case DOMAIN_ATTR_PT_BASE_ADDR:
51 return "DOMAIN_ATTR_PT_BASE_ADDR";
52 case DOMAIN_ATTR_SECURE_VMID:
53 return "DOMAIN_ATTR_SECURE_VMID";
54 case DOMAIN_ATTR_ATOMIC:
55 return "DOMAIN_ATTR_ATOMIC";
56 case DOMAIN_ATTR_CONTEXT_BANK:
57 return "DOMAIN_ATTR_CONTEXT_BANK";
58 case DOMAIN_ATTR_TTBR0:
59 return "DOMAIN_ATTR_TTBR0";
60 case DOMAIN_ATTR_CONTEXTIDR:
61 return "DOMAIN_ATTR_CONTEXTIDR";
62 case DOMAIN_ATTR_PROCID:
63 return "DOMAIN_ATTR_PROCID";
64 case DOMAIN_ATTR_DYNAMIC:
65 return "DOMAIN_ATTR_DYNAMIC";
66 case DOMAIN_ATTR_NON_FATAL_FAULTS:
67 return "DOMAIN_ATTR_NON_FATAL_FAULTS";
68 case DOMAIN_ATTR_S1_BYPASS:
69 return "DOMAIN_ATTR_S1_BYPASS";
70 case DOMAIN_ATTR_FAST:
71 return "DOMAIN_ATTR_FAST";
Patrick Dalyef6c1dc2016-11-16 14:35:23 -080072 case DOMAIN_ATTR_EARLY_MAP:
73 return "DOMAIN_ATTR_EARLY_MAP";
Charan Teja Reddyc682e472017-04-20 19:11:20 +053074 case DOMAIN_ATTR_CB_STALL_DISABLE:
75 return "DOMAIN_ATTR_CB_STALL_DISABLE";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070076 default:
77 return "Unknown attr!";
78 }
79}
Susheel Khiania4417e72016-07-12 11:28:32 +053080#endif
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070081
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070082#ifdef CONFIG_IOMMU_DEBUG_TRACKING
83
84static DEFINE_MUTEX(iommu_debug_attachments_lock);
85static LIST_HEAD(iommu_debug_attachments);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070086
Patrick Dalyee7a25f2017-04-05 18:05:02 -070087/*
88 * Each group may have more than one domain; but each domain may
89 * only have one group.
90 * Used by debug tools to display the name of the device(s) associated
91 * with a particular domain.
92 */
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070093struct iommu_debug_attachment {
94 struct iommu_domain *domain;
Patrick Dalyee7a25f2017-04-05 18:05:02 -070095 struct iommu_group *group;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070096 struct list_head list;
97};
98
Susheel Khianie66aa5b2015-08-25 17:25:42 +053099void iommu_debug_attach_device(struct iommu_domain *domain,
100 struct device *dev)
101{
102 struct iommu_debug_attachment *attach;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700103 struct iommu_group *group;
104
105 group = iommu_group_get(dev);
106 if (!group)
107 return;
108
109 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
110 if (!attach)
111 return;
112
113 attach->domain = domain;
114 attach->group = group;
115 INIT_LIST_HEAD(&attach->list);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530116
117 mutex_lock(&iommu_debug_attachments_lock);
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700118 list_add(&attach->list, &iommu_debug_attachments);
119 mutex_unlock(&iommu_debug_attachments_lock);
120}
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530121
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700122void iommu_debug_domain_remove(struct iommu_domain *domain)
123{
124 struct iommu_debug_attachment *it, *tmp;
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530125
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700126 mutex_lock(&iommu_debug_attachments_lock);
127 list_for_each_entry_safe(it, tmp, &iommu_debug_attachments, list) {
128 if (it->domain != domain)
129 continue;
130 list_del(&it->list);
131 iommu_group_put(it->group);
132 kfree(it);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530133 }
134
135 mutex_unlock(&iommu_debug_attachments_lock);
136}
137
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700138#endif
139
140#ifdef CONFIG_IOMMU_TESTS
141
Susheel Khiania4417e72016-07-12 11:28:32 +0530142#ifdef CONFIG_64BIT
143
144#define kstrtoux kstrtou64
Patrick Daly9ef01862016-10-13 20:03:50 -0700145#define kstrtox_from_user kstrtoull_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530146#define kstrtosize_t kstrtoul
147
148#else
149
150#define kstrtoux kstrtou32
Patrick Daly9ef01862016-10-13 20:03:50 -0700151#define kstrtox_from_user kstrtouint_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530152#define kstrtosize_t kstrtouint
153
154#endif
155
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700156static LIST_HEAD(iommu_debug_devices);
157static struct dentry *debugfs_tests_dir;
Patrick Dalye4e39862015-11-20 20:00:50 -0800158static u32 iters_per_op = 1;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700159static void *test_virt_addr;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700160
161struct iommu_debug_device {
162 struct device *dev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700163 struct iommu_domain *domain;
164 u64 iova;
165 u64 phys;
166 size_t len;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700167 struct list_head list;
168};
169
170static int iommu_debug_build_phoney_sg_table(struct device *dev,
171 struct sg_table *table,
172 unsigned long total_size,
173 unsigned long chunk_size)
174{
175 unsigned long nents = total_size / chunk_size;
176 struct scatterlist *sg;
177 int i;
178 struct page *page;
179
180 if (!IS_ALIGNED(total_size, PAGE_SIZE))
181 return -EINVAL;
182 if (!IS_ALIGNED(total_size, chunk_size))
183 return -EINVAL;
184 if (sg_alloc_table(table, nents, GFP_KERNEL))
185 return -EINVAL;
186 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
187 if (!page)
188 goto free_table;
189
190 /* all the same page... why not. */
191 for_each_sg(table->sgl, sg, table->nents, i)
192 sg_set_page(sg, page, chunk_size, 0);
193
194 return 0;
195
196free_table:
197 sg_free_table(table);
198 return -ENOMEM;
199}
200
201static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
202 struct sg_table *table,
203 unsigned long chunk_size)
204{
205 __free_pages(sg_page(table->sgl), get_order(chunk_size));
206 sg_free_table(table);
207}
208
209static const char * const _size_to_string(unsigned long size)
210{
211 switch (size) {
212 case SZ_4K:
213 return "4K";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700214 case SZ_8K:
215 return "8K";
216 case SZ_16K:
217 return "16K";
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700218 case SZ_64K:
219 return "64K";
220 case SZ_2M:
221 return "2M";
222 case SZ_1M * 12:
223 return "12M";
224 case SZ_1M * 20:
225 return "20M";
226 }
227 return "unknown size, please add to _size_to_string";
228}
229
Patrick Dalye4e39862015-11-20 20:00:50 -0800230static int nr_iters_set(void *data, u64 val)
231{
232 if (!val)
233 val = 1;
234 if (val > 10000)
235 val = 10000;
236 *(u32 *)data = val;
237 return 0;
238}
239
240static int nr_iters_get(void *data, u64 *val)
241{
242 *val = *(u32 *)data;
243 return 0;
244}
245
246DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
247 nr_iters_get, nr_iters_set, "%llu\n");
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700248
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700249static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700250 enum iommu_attr attrs[],
251 void *attr_values[], int nattrs,
Susheel Khiania4417e72016-07-12 11:28:32 +0530252 const size_t sizes[])
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700253{
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700254 int i;
Susheel Khiania4417e72016-07-12 11:28:32 +0530255 const size_t *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700256 struct iommu_domain *domain;
257 unsigned long iova = 0x10000;
258 phys_addr_t paddr = 0xa000;
259
260 domain = iommu_domain_alloc(&platform_bus_type);
261 if (!domain) {
262 seq_puts(s, "Couldn't allocate domain\n");
263 return;
264 }
265
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700266 seq_puts(s, "Domain attributes: [ ");
267 for (i = 0; i < nattrs; ++i) {
268 /* not all attrs are ints, but this will get us by for now */
269 seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
270 *((int *)attr_values[i]),
271 i < nattrs ? " " : "");
Mitchel Humpherys679567c2015-08-28 10:51:24 -0700272 }
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700273 seq_puts(s, "]\n");
274 for (i = 0; i < nattrs; ++i) {
275 if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
276 seq_printf(s, "Couldn't set %d to the value at %p\n",
277 attrs[i], attr_values[i]);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700278 goto out_domain_free;
279 }
280 }
281
Patrick Daly6dd80252017-04-17 20:41:59 -0700282 if (iommu_attach_group(domain, dev->iommu_group)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700283 seq_puts(s,
284 "Couldn't attach new domain to device. Is it already attached?\n");
285 goto out_domain_free;
286 }
287
Patrick Dalye4e39862015-11-20 20:00:50 -0800288 seq_printf(s, "(average over %d iterations)\n", iters_per_op);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800289 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700290 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530291 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700292 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800293 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700294 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800295 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700296 struct timespec tbefore, tafter, diff;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700297 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700298
Patrick Dalye4e39862015-11-20 20:00:50 -0800299 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700300 getnstimeofday(&tbefore);
301 if (iommu_map(domain, iova, paddr, size,
302 IOMMU_READ | IOMMU_WRITE)) {
303 seq_puts(s, "Failed to map\n");
304 continue;
305 }
306 getnstimeofday(&tafter);
307 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800308 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700309
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700310 getnstimeofday(&tbefore);
311 unmapped = iommu_unmap(domain, iova, size);
312 if (unmapped != size) {
313 seq_printf(s,
314 "Only unmapped %zx instead of %zx\n",
315 unmapped, size);
316 continue;
317 }
318 getnstimeofday(&tafter);
319 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800320 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700321 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700322
Susheel Khiania4417e72016-07-12 11:28:32 +0530323 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
324 &map_elapsed_rem);
325 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
326 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700327
Patrick Daly3ca31e32015-11-20 20:33:04 -0800328 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
329 &map_elapsed_rem);
330 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
331 &unmap_elapsed_rem);
332
333 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
334 _size_to_string(size),
335 map_elapsed_us, map_elapsed_rem,
336 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700337 }
338
339 seq_putc(s, '\n');
Patrick Daly3ca31e32015-11-20 20:33:04 -0800340 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700341 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530342 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700343 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800344 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700345 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800346 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700347 struct timespec tbefore, tafter, diff;
348 struct sg_table table;
349 unsigned long chunk_size = SZ_4K;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700350 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700351
352 if (iommu_debug_build_phoney_sg_table(dev, &table, size,
353 chunk_size)) {
354 seq_puts(s,
355 "couldn't build phoney sg table! bailing...\n");
356 goto out_detach;
357 }
358
Patrick Dalye4e39862015-11-20 20:00:50 -0800359 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700360 getnstimeofday(&tbefore);
361 if (iommu_map_sg(domain, iova, table.sgl, table.nents,
362 IOMMU_READ | IOMMU_WRITE) != size) {
363 seq_puts(s, "Failed to map_sg\n");
364 goto next;
365 }
366 getnstimeofday(&tafter);
367 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800368 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700369
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700370 getnstimeofday(&tbefore);
371 unmapped = iommu_unmap(domain, iova, size);
372 if (unmapped != size) {
373 seq_printf(s,
374 "Only unmapped %zx instead of %zx\n",
375 unmapped, size);
376 goto next;
377 }
378 getnstimeofday(&tafter);
379 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800380 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700381 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700382
Susheel Khiania4417e72016-07-12 11:28:32 +0530383 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
384 &map_elapsed_rem);
385 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
386 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700387
Patrick Daly3ca31e32015-11-20 20:33:04 -0800388 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
389 &map_elapsed_rem);
390 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
391 &unmap_elapsed_rem);
392
393 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
394 _size_to_string(size),
395 map_elapsed_us, map_elapsed_rem,
396 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700397
398next:
399 iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
400 }
401
402out_detach:
Patrick Daly6dd80252017-04-17 20:41:59 -0700403 iommu_detach_group(domain, dev->iommu_group);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700404out_domain_free:
405 iommu_domain_free(domain);
406}
407
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700408static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700409{
410 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530411 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700412 SZ_1M * 20, 0 };
413 enum iommu_attr attrs[] = {
414 DOMAIN_ATTR_ATOMIC,
415 };
416 int htw_disable = 1, atomic = 1;
417 void *attr_values[] = { &htw_disable, &atomic };
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700418
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700419 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
420 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700421
422 return 0;
423}
424
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700425static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700426{
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700427 return single_open(file, iommu_debug_profiling_show, inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700428}
429
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700430static const struct file_operations iommu_debug_profiling_fops = {
431 .open = iommu_debug_profiling_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700432 .read = seq_read,
433 .llseek = seq_lseek,
434 .release = single_release,
435};
436
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700437static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
438{
439 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530440 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700441 SZ_1M * 20, 0 };
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700442
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700443 enum iommu_attr attrs[] = {
444 DOMAIN_ATTR_ATOMIC,
445 DOMAIN_ATTR_SECURE_VMID,
446 };
447 int one = 1, secure_vmid = VMID_CP_PIXEL;
448 void *attr_values[] = { &one, &secure_vmid };
449
450 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
451 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700452
453 return 0;
454}
455
456static int iommu_debug_secure_profiling_open(struct inode *inode,
457 struct file *file)
458{
459 return single_open(file, iommu_debug_secure_profiling_show,
460 inode->i_private);
461}
462
463static const struct file_operations iommu_debug_secure_profiling_fops = {
464 .open = iommu_debug_secure_profiling_open,
465 .read = seq_read,
466 .llseek = seq_lseek,
467 .release = single_release,
468};
469
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700470static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
471{
472 struct iommu_debug_device *ddev = s->private;
473 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
474 enum iommu_attr attrs[] = {
475 DOMAIN_ATTR_FAST,
476 DOMAIN_ATTR_ATOMIC,
477 };
478 int one = 1;
479 void *attr_values[] = { &one, &one };
480
481 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
482 ARRAY_SIZE(attrs), sizes);
483
484 return 0;
485}
486
487static int iommu_debug_profiling_fast_open(struct inode *inode,
488 struct file *file)
489{
490 return single_open(file, iommu_debug_profiling_fast_show,
491 inode->i_private);
492}
493
494static const struct file_operations iommu_debug_profiling_fast_fops = {
495 .open = iommu_debug_profiling_fast_open,
496 .read = seq_read,
497 .llseek = seq_lseek,
498 .release = single_release,
499};
500
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700501static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
502 void *ignored)
503{
504 int i, experiment;
505 struct iommu_debug_device *ddev = s->private;
506 struct device *dev = ddev->dev;
507 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
508 struct dma_iommu_mapping *mapping;
509 dma_addr_t dma_addr;
510 void *virt;
511 int fast = 1;
512 const char * const extra_labels[] = {
513 "not coherent",
514 "coherent",
515 };
516 unsigned long extra_attrs[] = {
517 0,
518 DMA_ATTR_SKIP_CPU_SYNC,
519 };
520
521 virt = kmalloc(1518, GFP_KERNEL);
522 if (!virt)
523 goto out;
524
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530525 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700526 if (!mapping) {
527 seq_puts(s, "fast_smmu_create_mapping failed\n");
528 goto out_kfree;
529 }
530
531 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
532 seq_puts(s, "iommu_domain_set_attr failed\n");
533 goto out_release_mapping;
534 }
535
536 if (arm_iommu_attach_device(dev, mapping)) {
537 seq_puts(s, "fast_smmu_attach_device failed\n");
538 goto out_release_mapping;
539 }
540
541 if (iommu_enable_config_clocks(mapping->domain)) {
542 seq_puts(s, "Couldn't enable clocks\n");
543 goto out_detach;
544 }
545 for (experiment = 0; experiment < 2; ++experiment) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530546 size_t map_avg = 0, unmap_avg = 0;
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700547
548 for (i = 0; i < 10; ++i) {
549 struct timespec tbefore, tafter, diff;
550 u64 ns;
551
552 getnstimeofday(&tbefore);
553 dma_addr = dma_map_single_attrs(
554 dev, virt, SZ_4K, DMA_TO_DEVICE,
555 extra_attrs[experiment]);
556 getnstimeofday(&tafter);
557 diff = timespec_sub(tafter, tbefore);
558 ns = timespec_to_ns(&diff);
559 if (dma_mapping_error(dev, dma_addr)) {
560 seq_puts(s, "dma_map_single failed\n");
561 goto out_disable_config_clocks;
562 }
563 map_elapsed_ns[i] = ns;
564
565 getnstimeofday(&tbefore);
566 dma_unmap_single_attrs(
567 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
568 extra_attrs[experiment]);
569 getnstimeofday(&tafter);
570 diff = timespec_sub(tafter, tbefore);
571 ns = timespec_to_ns(&diff);
572 unmap_elapsed_ns[i] = ns;
573 }
574
575 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
576 "dma_map_single_attrs");
577 for (i = 0; i < 10; ++i) {
578 map_avg += map_elapsed_ns[i];
579 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
580 i < 9 ? ", " : "");
581 }
582 map_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530583 seq_printf(s, "] (avg: %zu)\n", map_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700584
585 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
586 "dma_unmap_single_attrs");
587 for (i = 0; i < 10; ++i) {
588 unmap_avg += unmap_elapsed_ns[i];
589 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
590 i < 9 ? ", " : "");
591 }
592 unmap_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530593 seq_printf(s, "] (avg: %zu)\n", unmap_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700594 }
595
596out_disable_config_clocks:
597 iommu_disable_config_clocks(mapping->domain);
598out_detach:
599 arm_iommu_detach_device(dev);
600out_release_mapping:
601 arm_iommu_release_mapping(mapping);
602out_kfree:
603 kfree(virt);
604out:
605 return 0;
606}
607
608static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
609 struct file *file)
610{
611 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
612 inode->i_private);
613}
614
615static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
616 .open = iommu_debug_profiling_fast_dma_api_open,
617 .read = seq_read,
618 .llseek = seq_lseek,
619 .release = single_release,
620};
621
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800622static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
623{
624 int i, ret = 0;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530625 u64 iova;
626 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800627 void *virt;
628 phys_addr_t phys;
629 dma_addr_t dma_addr;
630
631 /*
632 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
633 * chunk that we can work with.
634 */
635 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
636 phys = virt_to_phys(virt);
637
638 /* fill the whole 4GB space */
639 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
640 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
641 if (dma_addr == DMA_ERROR_CODE) {
642 dev_err(dev, "Failed map on iter %d\n", i);
643 ret = -EINVAL;
644 goto out;
645 }
646 }
647
648 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
649 dev_err(dev,
650 "dma_map_single unexpectedly (VA should have been exhausted)\n");
651 ret = -EINVAL;
652 goto out;
653 }
654
655 /*
656 * free up 4K at the very beginning, then leave one 4K mapping,
657 * then free up 8K. This will result in the next 8K map to skip
658 * over the 4K hole and take the 8K one.
659 */
660 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
661 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
662 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
663
664 /* remap 8K */
665 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
666 if (dma_addr != SZ_8K) {
667 dma_addr_t expected = SZ_8K;
668
669 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
670 &dma_addr, &expected);
671 ret = -EINVAL;
672 goto out;
673 }
674
675 /*
676 * now remap 4K. We should get the first 4K chunk that was skipped
677 * over during the previous 8K map. If we missed a TLB invalidate
678 * at that point this should explode.
679 */
680 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
681 if (dma_addr != 0) {
682 dma_addr_t expected = 0;
683
684 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
685 &dma_addr, &expected);
686 ret = -EINVAL;
687 goto out;
688 }
689
690 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
691 dev_err(dev,
692 "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
693 ret = -EINVAL;
694 goto out;
695 }
696
697 /* we're all full again. unmap everything. */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530698 for (iova = 0; iova < max; iova += SZ_8K)
699 dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800700
701out:
702 free_pages((unsigned long)virt, get_order(SZ_8K));
703 return ret;
704}
705
706struct fib_state {
707 unsigned long cur;
708 unsigned long prev;
709};
710
711static void fib_init(struct fib_state *f)
712{
713 f->cur = f->prev = 1;
714}
715
716static unsigned long get_next_fib(struct fib_state *f)
717{
718 int next = f->cur + f->prev;
719
720 f->prev = f->cur;
721 f->cur = next;
722 return next;
723}
724
725/*
726 * Not actually random. Just testing the fibs (and max - the fibs).
727 */
728static int __rand_va_sweep(struct device *dev, struct seq_file *s,
729 const size_t size)
730{
731 u64 iova;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530732 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800733 int i, remapped, unmapped, ret = 0;
734 void *virt;
735 dma_addr_t dma_addr, dma_addr2;
736 struct fib_state fib;
737
738 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
739 if (!virt) {
740 if (size > SZ_8K) {
741 dev_err(dev,
742 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
743 _size_to_string(size));
744 return 0;
745 }
746 return -ENOMEM;
747 }
748
749 /* fill the whole 4GB space */
750 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
751 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
752 if (dma_addr == DMA_ERROR_CODE) {
753 dev_err(dev, "Failed map on iter %d\n", i);
754 ret = -EINVAL;
755 goto out;
756 }
757 }
758
759 /* now unmap "random" iovas */
760 unmapped = 0;
761 fib_init(&fib);
762 for (iova = get_next_fib(&fib) * size;
763 iova < max - size;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530764 iova = (u64)get_next_fib(&fib) * size) {
765 dma_addr = (dma_addr_t)(iova);
766 dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800767 if (dma_addr == dma_addr2) {
768 WARN(1,
769 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
770 __func__);
771 return -EINVAL;
772 }
773 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
774 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
775 unmapped += 2;
776 }
777
778 /* and map until everything fills back up */
779 for (remapped = 0; ; ++remapped) {
780 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
781 if (dma_addr == DMA_ERROR_CODE)
782 break;
783 }
784
785 if (unmapped != remapped) {
786 dev_err(dev,
787 "Unexpected random remap count! Unmapped %d but remapped %d\n",
788 unmapped, remapped);
789 ret = -EINVAL;
790 }
791
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530792 for (iova = 0; iova < max; iova += size)
793 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800794
795out:
796 free_pages((unsigned long)virt, get_order(size));
797 return ret;
798}
799
800static int __check_mapping(struct device *dev, struct iommu_domain *domain,
801 dma_addr_t iova, phys_addr_t expected)
802{
803 phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
804 phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
805
806 WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
807
808 if (res != expected) {
809 dev_err_ratelimited(dev,
810 "Bad translation for %pa! Expected: %pa Got: %pa\n",
811 &iova, &expected, &res);
812 return -EINVAL;
813 }
814
815 return 0;
816}
817
818static int __full_va_sweep(struct device *dev, struct seq_file *s,
819 const size_t size, struct iommu_domain *domain)
820{
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530821 u64 iova;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800822 dma_addr_t dma_addr;
823 void *virt;
824 phys_addr_t phys;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530825 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800826 int ret = 0, i;
827
828 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
829 if (!virt) {
830 if (size > SZ_8K) {
831 dev_err(dev,
832 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
833 _size_to_string(size));
834 return 0;
835 }
836 return -ENOMEM;
837 }
838 phys = virt_to_phys(virt);
839
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530840 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800841 unsigned long expected = iova;
842
843 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
844 if (dma_addr != expected) {
845 dev_err_ratelimited(dev,
846 "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
847 i, expected,
848 (unsigned long)dma_addr);
849 ret = -EINVAL;
850 goto out;
851 }
852 }
853
854 if (domain) {
855 /* check every mapping from 0..6M */
856 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
857 phys_addr_t expected = phys;
858
859 if (__check_mapping(dev, domain, iova, expected)) {
860 dev_err(dev, "iter: %d\n", i);
861 ret = -EINVAL;
862 goto out;
863 }
864 }
865 /* and from 4G..4G-6M */
866 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
867 phys_addr_t expected = phys;
868 unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
869
870 if (__check_mapping(dev, domain, theiova, expected)) {
871 dev_err(dev, "iter: %d\n", i);
872 ret = -EINVAL;
873 goto out;
874 }
875 }
876 }
877
878 /* at this point, our VA space should be full */
879 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
880 if (dma_addr != DMA_ERROR_CODE) {
881 dev_err_ratelimited(dev,
882 "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
883 (unsigned long)dma_addr);
884 ret = -EINVAL;
885 }
886
887out:
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530888 for (iova = 0; iova < max; iova += size)
889 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800890
891 free_pages((unsigned long)virt, get_order(size));
892 return ret;
893}
894
895#define ds_printf(d, s, fmt, ...) ({ \
896 dev_err(d, fmt, ##__VA_ARGS__); \
897 seq_printf(s, fmt, ##__VA_ARGS__); \
898 })
899
900static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
901 struct iommu_domain *domain, void *priv)
902{
903 int i, j, ret = 0;
904 size_t *sz, *sizes = priv;
905
906 for (j = 0; j < 1; ++j) {
907 for (sz = sizes; *sz; ++sz) {
908 for (i = 0; i < 2; ++i) {
909 ds_printf(dev, s, "Full VA sweep @%s %d",
910 _size_to_string(*sz), i);
911 if (__full_va_sweep(dev, s, *sz, domain)) {
912 ds_printf(dev, s, " -> FAILED\n");
913 ret = -EINVAL;
914 } else {
915 ds_printf(dev, s, " -> SUCCEEDED\n");
916 }
917 }
918 }
919 }
920
921 ds_printf(dev, s, "bonus map:");
922 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
923 ds_printf(dev, s, " -> FAILED\n");
924 ret = -EINVAL;
925 } else {
926 ds_printf(dev, s, " -> SUCCEEDED\n");
927 }
928
929 for (sz = sizes; *sz; ++sz) {
930 for (i = 0; i < 2; ++i) {
931 ds_printf(dev, s, "Rand VA sweep @%s %d",
932 _size_to_string(*sz), i);
933 if (__rand_va_sweep(dev, s, *sz)) {
934 ds_printf(dev, s, " -> FAILED\n");
935 ret = -EINVAL;
936 } else {
937 ds_printf(dev, s, " -> SUCCEEDED\n");
938 }
939 }
940 }
941
942 ds_printf(dev, s, "TLB stress sweep");
943 if (__tlb_stress_sweep(dev, s)) {
944 ds_printf(dev, s, " -> FAILED\n");
945 ret = -EINVAL;
946 } else {
947 ds_printf(dev, s, " -> SUCCEEDED\n");
948 }
949
950 ds_printf(dev, s, "second bonus map:");
951 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
952 ds_printf(dev, s, " -> FAILED\n");
953 ret = -EINVAL;
954 } else {
955 ds_printf(dev, s, " -> SUCCEEDED\n");
956 }
957
958 return ret;
959}
960
961static int __functional_dma_api_alloc_test(struct device *dev,
962 struct seq_file *s,
963 struct iommu_domain *domain,
964 void *ignored)
965{
966 size_t size = SZ_1K * 742;
967 int ret = 0;
968 u8 *data;
969 dma_addr_t iova;
970
971 /* Make sure we can allocate and use a buffer */
972 ds_printf(dev, s, "Allocating coherent buffer");
973 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
974 if (!data) {
975 ds_printf(dev, s, " -> FAILED\n");
976 ret = -EINVAL;
977 } else {
978 int i;
979
980 ds_printf(dev, s, " -> SUCCEEDED\n");
981 ds_printf(dev, s, "Using coherent buffer");
982 for (i = 0; i < 742; ++i) {
983 int ind = SZ_1K * i;
984 u8 *p = data + ind;
985 u8 val = i % 255;
986
987 memset(data, 0xa5, size);
988 *p = val;
989 (*p)++;
990 if ((*p) != val + 1) {
991 ds_printf(dev, s,
992 " -> FAILED on iter %d since %d != %d\n",
993 i, *p, val + 1);
994 ret = -EINVAL;
995 }
996 }
997 if (!ret)
998 ds_printf(dev, s, " -> SUCCEEDED\n");
999 dma_free_coherent(dev, size, data, iova);
1000 }
1001
1002 return ret;
1003}
1004
1005static int __functional_dma_api_basic_test(struct device *dev,
1006 struct seq_file *s,
1007 struct iommu_domain *domain,
1008 void *ignored)
1009{
1010 size_t size = 1518;
1011 int i, j, ret = 0;
1012 u8 *data;
1013 dma_addr_t iova;
1014 phys_addr_t pa, pa2;
1015
1016 ds_printf(dev, s, "Basic DMA API test");
1017 /* Make sure we can allocate and use a buffer */
1018 for (i = 0; i < 1000; ++i) {
1019 data = kmalloc(size, GFP_KERNEL);
1020 if (!data) {
1021 ds_printf(dev, s, " -> FAILED\n");
1022 ret = -EINVAL;
1023 goto out;
1024 }
1025 memset(data, 0xa5, size);
1026 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
1027 pa = iommu_iova_to_phys(domain, iova);
1028 pa2 = iommu_iova_to_phys_hard(domain, iova);
1029 if (pa != pa2) {
1030 dev_err(dev,
1031 "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
1032 &pa, &pa2);
1033 ret = -EINVAL;
1034 goto out;
1035 }
1036 pa2 = virt_to_phys(data);
1037 if (pa != pa2) {
1038 dev_err(dev,
1039 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
1040 &pa, &pa2);
1041 ret = -EINVAL;
1042 goto out;
1043 }
1044 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
1045 for (j = 0; j < size; ++j) {
1046 if (data[j] != 0xa5) {
1047 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
1048 ret = -EINVAL;
1049 goto out;
1050 }
1051 }
1052 kfree(data);
1053 }
1054
1055out:
1056 if (ret)
1057 ds_printf(dev, s, " -> FAILED\n");
1058 else
1059 ds_printf(dev, s, " -> SUCCEEDED\n");
1060
1061 return ret;
1062}
1063
1064/* Creates a fresh fast mapping and applies @fn to it */
1065static int __apply_to_new_mapping(struct seq_file *s,
1066 int (*fn)(struct device *dev,
1067 struct seq_file *s,
1068 struct iommu_domain *domain,
1069 void *priv),
1070 void *priv)
1071{
1072 struct dma_iommu_mapping *mapping;
1073 struct iommu_debug_device *ddev = s->private;
1074 struct device *dev = ddev->dev;
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301075 int ret = -EINVAL, fast = 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001076 phys_addr_t pt_phys;
1077
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301078 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1079 (SZ_1G * 4ULL));
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001080 if (!mapping)
1081 goto out;
1082
1083 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
1084 seq_puts(s, "iommu_domain_set_attr failed\n");
1085 goto out_release_mapping;
1086 }
1087
1088 if (arm_iommu_attach_device(dev, mapping))
1089 goto out_release_mapping;
1090
1091 if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
1092 &pt_phys)) {
1093 ds_printf(dev, s, "Couldn't get page table base address\n");
1094 goto out_release_mapping;
1095 }
1096
1097 dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
1098 if (iommu_enable_config_clocks(mapping->domain)) {
1099 ds_printf(dev, s, "Couldn't enable clocks\n");
1100 goto out_release_mapping;
1101 }
1102 ret = fn(dev, s, mapping->domain, priv);
1103 iommu_disable_config_clocks(mapping->domain);
1104
1105 arm_iommu_detach_device(dev);
1106out_release_mapping:
1107 arm_iommu_release_mapping(mapping);
1108out:
1109 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1110 return 0;
1111}
1112
1113static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
1114 void *ignored)
1115{
1116 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
1117 int ret = 0;
1118
1119 ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
1120 ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
1121 ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
1122 return ret;
1123}
1124
1125static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
1126 struct file *file)
1127{
1128 return single_open(file, iommu_debug_functional_fast_dma_api_show,
1129 inode->i_private);
1130}
1131
1132static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
1133 .open = iommu_debug_functional_fast_dma_api_open,
1134 .read = seq_read,
1135 .llseek = seq_lseek,
1136 .release = single_release,
1137};
1138
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001139static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
1140 void *ignored)
1141{
1142 struct dma_iommu_mapping *mapping;
1143 struct iommu_debug_device *ddev = s->private;
1144 struct device *dev = ddev->dev;
1145 size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
1146 int ret = -EINVAL;
1147
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301148 /* Make the size equal to MAX_ULONG */
1149 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1150 (SZ_1G * 4ULL - 1));
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001151 if (!mapping)
1152 goto out;
1153
1154 if (arm_iommu_attach_device(dev, mapping))
1155 goto out_release_mapping;
1156
1157 ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
1158 ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
1159
1160 arm_iommu_detach_device(dev);
1161out_release_mapping:
1162 arm_iommu_release_mapping(mapping);
1163out:
1164 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1165 return 0;
1166}
1167
1168static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
1169 struct file *file)
1170{
1171 return single_open(file, iommu_debug_functional_arm_dma_api_show,
1172 inode->i_private);
1173}
1174
1175static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
1176 .open = iommu_debug_functional_arm_dma_api_open,
1177 .read = seq_read,
1178 .llseek = seq_lseek,
1179 .release = single_release,
1180};
1181
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001182static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
1183 int val, bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001184{
Patrick Daly6dd80252017-04-17 20:41:59 -07001185 struct iommu_group *group = ddev->dev->iommu_group;
1186
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001187 ddev->domain = iommu_domain_alloc(&platform_bus_type);
1188 if (!ddev->domain) {
1189 pr_err("Couldn't allocate domain\n");
1190 return -ENOMEM;
1191 }
1192
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001193 if (is_secure && iommu_domain_set_attr(ddev->domain,
1194 DOMAIN_ATTR_SECURE_VMID,
1195 &val)) {
1196 pr_err("Couldn't set secure vmid to %d\n", val);
1197 goto out_domain_free;
1198 }
1199
Patrick Daly6dd80252017-04-17 20:41:59 -07001200 if (iommu_attach_group(ddev->domain, group)) {
1201 dev_err(ddev->dev, "Couldn't attach new domain to device\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001202 goto out_domain_free;
1203 }
1204
1205 return 0;
1206
1207out_domain_free:
1208 iommu_domain_free(ddev->domain);
1209 ddev->domain = NULL;
1210 return -EIO;
1211}
1212
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001213static ssize_t __iommu_debug_dma_attach_write(struct file *file,
1214 const char __user *ubuf,
1215 size_t count, loff_t *offset)
1216{
1217 struct iommu_debug_device *ddev = file->private_data;
1218 struct device *dev = ddev->dev;
1219 struct dma_iommu_mapping *dma_mapping;
1220 ssize_t retval = -EINVAL;
1221 int val;
1222
1223 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1224 pr_err("Invalid format. Expected a hex or decimal integer");
1225 retval = -EFAULT;
1226 goto out;
1227 }
1228
1229 if (val) {
1230 if (dev->archdata.mapping)
1231 if (dev->archdata.mapping->domain) {
1232 pr_err("Already attached.\n");
1233 retval = -EINVAL;
1234 goto out;
1235 }
1236 if (WARN(dev->archdata.iommu,
1237 "Attachment tracking out of sync with device\n")) {
1238 retval = -EINVAL;
1239 goto out;
1240 }
1241
1242 dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1243 (SZ_1G * 4ULL));
1244
1245 if (!dma_mapping)
1246 goto out;
1247
1248 if (arm_iommu_attach_device(dev, dma_mapping))
1249 goto out_release_mapping;
1250 pr_err("Attached\n");
1251 } else {
1252 if (!dev->archdata.mapping) {
1253 pr_err("No mapping. Did you already attach?\n");
1254 retval = -EINVAL;
1255 goto out;
1256 }
1257 if (!dev->archdata.mapping->domain) {
1258 pr_err("No domain. Did you already attach?\n");
1259 retval = -EINVAL;
1260 goto out;
1261 }
1262 arm_iommu_detach_device(dev);
1263 arm_iommu_release_mapping(dev->archdata.mapping);
1264 pr_err("Detached\n");
1265 }
1266 retval = count;
1267 return retval;
1268
1269out_release_mapping:
1270 arm_iommu_release_mapping(dma_mapping);
1271out:
1272 return retval;
1273}
1274
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001275static ssize_t __iommu_debug_attach_write(struct file *file,
1276 const char __user *ubuf,
1277 size_t count, loff_t *offset,
1278 bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001279{
1280 struct iommu_debug_device *ddev = file->private_data;
Patrick Daly6dd80252017-04-17 20:41:59 -07001281 struct device *dev = ddev->dev;
1282 struct iommu_domain *domain;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001283 ssize_t retval;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001284 int val;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001285
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001286 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1287 pr_err("Invalid format. Expected a hex or decimal integer");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001288 retval = -EFAULT;
1289 goto out;
1290 }
1291
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001292 if (val) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001293 if (ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001294 pr_err("Iommu-Debug is already attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001295 retval = -EINVAL;
1296 goto out;
1297 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001298
1299 domain = iommu_get_domain_for_dev(dev);
1300 if (domain) {
1301 pr_err("Another driver is using this device's iommu\n"
1302 "Iommu-Debug cannot be used concurrently\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001303 retval = -EINVAL;
1304 goto out;
1305 }
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001306 if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001307 retval = -EIO;
1308 goto out;
1309 }
1310 pr_err("Attached\n");
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001311 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001312 if (!ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001313 pr_err("Iommu-Debug is not attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001314 retval = -EINVAL;
1315 goto out;
1316 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001317 iommu_detach_group(ddev->domain, dev->iommu_group);
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001318 iommu_domain_free(ddev->domain);
1319 ddev->domain = NULL;
1320 pr_err("Detached\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001321 }
1322
1323 retval = count;
1324out:
1325 return retval;
1326}
1327
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001328static ssize_t iommu_debug_dma_attach_write(struct file *file,
1329 const char __user *ubuf,
1330 size_t count, loff_t *offset)
1331{
1332 return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
1333
1334}
1335
1336static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
1337 size_t count, loff_t *offset)
1338{
1339 struct iommu_debug_device *ddev = file->private_data;
1340 struct device *dev = ddev->dev;
1341 char c[2];
1342
1343 if (*offset)
1344 return 0;
1345
1346 if (!dev->archdata.mapping)
1347 c[0] = '0';
1348 else
1349 c[0] = dev->archdata.mapping->domain ? '1' : '0';
1350
1351 c[1] = '\n';
1352 if (copy_to_user(ubuf, &c, 2)) {
1353 pr_err("copy_to_user failed\n");
1354 return -EFAULT;
1355 }
1356 *offset = 1; /* non-zero means we're done */
1357
1358 return 2;
1359}
1360
1361static const struct file_operations iommu_debug_dma_attach_fops = {
1362 .open = simple_open,
1363 .write = iommu_debug_dma_attach_write,
1364 .read = iommu_debug_dma_attach_read,
1365};
1366
1367static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
1368 char __user *ubuf,
1369 size_t count, loff_t *offset)
1370{
1371 char buf[100];
1372 ssize_t retval;
1373 size_t buflen;
1374 int buf_len = sizeof(buf);
1375
1376 if (*offset)
1377 return 0;
1378
1379 memset(buf, 0, buf_len);
1380
1381 if (!test_virt_addr)
1382 strlcpy(buf, "FAIL\n", buf_len);
1383 else
1384 snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
1385
1386 buflen = strlen(buf);
1387 if (copy_to_user(ubuf, buf, buflen)) {
1388 pr_err("Couldn't copy_to_user\n");
1389 retval = -EFAULT;
1390 } else {
1391 *offset = 1; /* non-zero means we're done */
1392 retval = buflen;
1393 }
1394
1395 return retval;
1396}
1397
1398static const struct file_operations iommu_debug_test_virt_addr_fops = {
1399 .open = simple_open,
1400 .read = iommu_debug_test_virt_addr_read,
1401};
1402
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001403static ssize_t iommu_debug_attach_write(struct file *file,
1404 const char __user *ubuf,
1405 size_t count, loff_t *offset)
1406{
1407 return __iommu_debug_attach_write(file, ubuf, count, offset,
1408 false);
1409
1410}
1411
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001412static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
1413 size_t count, loff_t *offset)
1414{
1415 struct iommu_debug_device *ddev = file->private_data;
1416 char c[2];
1417
1418 if (*offset)
1419 return 0;
1420
1421 c[0] = ddev->domain ? '1' : '0';
1422 c[1] = '\n';
1423 if (copy_to_user(ubuf, &c, 2)) {
1424 pr_err("copy_to_user failed\n");
1425 return -EFAULT;
1426 }
1427 *offset = 1; /* non-zero means we're done */
1428
1429 return 2;
1430}
1431
1432static const struct file_operations iommu_debug_attach_fops = {
1433 .open = simple_open,
1434 .write = iommu_debug_attach_write,
1435 .read = iommu_debug_attach_read,
1436};
1437
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001438static ssize_t iommu_debug_attach_write_secure(struct file *file,
1439 const char __user *ubuf,
1440 size_t count, loff_t *offset)
1441{
1442 return __iommu_debug_attach_write(file, ubuf, count, offset,
1443 true);
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001444}
1445
1446static const struct file_operations iommu_debug_secure_attach_fops = {
1447 .open = simple_open,
1448 .write = iommu_debug_attach_write_secure,
1449 .read = iommu_debug_attach_read,
1450};
1451
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001452static ssize_t iommu_debug_pte_write(struct file *file,
1453 const char __user *ubuf,
1454 size_t count, loff_t *offset)
1455{
1456 struct iommu_debug_device *ddev = file->private_data;
1457 dma_addr_t iova;
1458
1459 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
1460 pr_err("Invalid format for iova\n");
1461 ddev->iova = 0;
1462 return -EINVAL;
1463 }
1464
1465 ddev->iova = iova;
1466 pr_err("Saved iova=%pa for future PTE commands\n", &iova);
1467 return count;
1468}
1469
1470
1471static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
1472 size_t count, loff_t *offset)
1473{
1474 struct iommu_debug_device *ddev = file->private_data;
1475 struct device *dev = ddev->dev;
1476 uint64_t pte;
1477 char buf[100];
1478 ssize_t retval;
1479 size_t buflen;
1480
1481 if (!dev->archdata.mapping) {
1482 pr_err("No mapping. Did you already attach?\n");
1483 return -EINVAL;
1484 }
1485 if (!dev->archdata.mapping->domain) {
1486 pr_err("No domain. Did you already attach?\n");
1487 return -EINVAL;
1488 }
1489
1490 if (*offset)
1491 return 0;
1492
1493 memset(buf, 0, sizeof(buf));
1494
1495 pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
1496 ddev->iova);
1497
1498 if (!pte)
1499 strlcpy(buf, "FAIL\n", sizeof(buf));
1500 else
1501 snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
1502
1503 buflen = strlen(buf);
1504 if (copy_to_user(ubuf, buf, buflen)) {
1505 pr_err("Couldn't copy_to_user\n");
1506 retval = -EFAULT;
1507 } else {
1508 *offset = 1; /* non-zero means we're done */
1509 retval = buflen;
1510 }
1511
1512 return retval;
1513}
1514
1515static const struct file_operations iommu_debug_pte_fops = {
1516 .open = simple_open,
1517 .write = iommu_debug_pte_write,
1518 .read = iommu_debug_pte_read,
1519};
1520
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001521static ssize_t iommu_debug_atos_write(struct file *file,
1522 const char __user *ubuf,
1523 size_t count, loff_t *offset)
1524{
1525 struct iommu_debug_device *ddev = file->private_data;
1526 dma_addr_t iova;
1527
Susheel Khiania4417e72016-07-12 11:28:32 +05301528 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001529 pr_err("Invalid format for iova\n");
1530 ddev->iova = 0;
1531 return -EINVAL;
1532 }
1533
1534 ddev->iova = iova;
1535 pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
1536 return count;
1537}
1538
1539static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
1540 size_t count, loff_t *offset)
1541{
1542 struct iommu_debug_device *ddev = file->private_data;
1543 phys_addr_t phys;
1544 char buf[100];
1545 ssize_t retval;
1546 size_t buflen;
1547
1548 if (!ddev->domain) {
1549 pr_err("No domain. Did you already attach?\n");
1550 return -EINVAL;
1551 }
1552
1553 if (*offset)
1554 return 0;
1555
1556 memset(buf, 0, 100);
1557
1558 phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001559 if (!phys) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001560 strlcpy(buf, "FAIL\n", 100);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001561 phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
1562 dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
1563 &ddev->iova, &phys);
1564 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001565 snprintf(buf, 100, "%pa\n", &phys);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001566 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001567
1568 buflen = strlen(buf);
1569 if (copy_to_user(ubuf, buf, buflen)) {
1570 pr_err("Couldn't copy_to_user\n");
1571 retval = -EFAULT;
1572 } else {
1573 *offset = 1; /* non-zero means we're done */
1574 retval = buflen;
1575 }
1576
1577 return retval;
1578}
1579
1580static const struct file_operations iommu_debug_atos_fops = {
1581 .open = simple_open,
1582 .write = iommu_debug_atos_write,
1583 .read = iommu_debug_atos_read,
1584};
1585
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001586static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
1587 size_t count, loff_t *offset)
1588{
1589 struct iommu_debug_device *ddev = file->private_data;
1590 struct device *dev = ddev->dev;
1591 phys_addr_t phys;
1592 char buf[100];
1593 ssize_t retval;
1594 size_t buflen;
1595
1596 if (!dev->archdata.mapping) {
1597 pr_err("No mapping. Did you already attach?\n");
1598 return -EINVAL;
1599 }
1600 if (!dev->archdata.mapping->domain) {
1601 pr_err("No domain. Did you already attach?\n");
1602 return -EINVAL;
1603 }
1604
1605 if (*offset)
1606 return 0;
1607
1608 memset(buf, 0, sizeof(buf));
1609
1610 phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
1611 ddev->iova);
1612 if (!phys)
1613 strlcpy(buf, "FAIL\n", sizeof(buf));
1614 else
1615 snprintf(buf, sizeof(buf), "%pa\n", &phys);
1616
1617 buflen = strlen(buf);
1618 if (copy_to_user(ubuf, buf, buflen)) {
1619 pr_err("Couldn't copy_to_user\n");
1620 retval = -EFAULT;
1621 } else {
1622 *offset = 1; /* non-zero means we're done */
1623 retval = buflen;
1624 }
1625
1626 return retval;
1627}
1628
1629static const struct file_operations iommu_debug_dma_atos_fops = {
1630 .open = simple_open,
1631 .write = iommu_debug_atos_write,
1632 .read = iommu_debug_dma_atos_read,
1633};
1634
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001635static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
1636 size_t count, loff_t *offset)
1637{
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301638 ssize_t retval = -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001639 int ret;
1640 char *comma1, *comma2, *comma3;
1641 char buf[100];
1642 dma_addr_t iova;
1643 phys_addr_t phys;
1644 size_t size;
1645 int prot;
1646 struct iommu_debug_device *ddev = file->private_data;
1647
1648 if (count >= 100) {
1649 pr_err("Value too large\n");
1650 return -EINVAL;
1651 }
1652
1653 if (!ddev->domain) {
1654 pr_err("No domain. Did you already attach?\n");
1655 return -EINVAL;
1656 }
1657
1658 memset(buf, 0, 100);
1659
1660 if (copy_from_user(buf, ubuf, count)) {
1661 pr_err("Couldn't copy from user\n");
1662 retval = -EFAULT;
1663 }
1664
1665 comma1 = strnchr(buf, count, ',');
1666 if (!comma1)
1667 goto invalid_format;
1668
1669 comma2 = strnchr(comma1 + 1, count, ',');
1670 if (!comma2)
1671 goto invalid_format;
1672
1673 comma3 = strnchr(comma2 + 1, count, ',');
1674 if (!comma3)
1675 goto invalid_format;
1676
1677 /* split up the words */
1678 *comma1 = *comma2 = *comma3 = '\0';
1679
Susheel Khiania4417e72016-07-12 11:28:32 +05301680 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001681 goto invalid_format;
1682
Susheel Khiania4417e72016-07-12 11:28:32 +05301683 if (kstrtoux(comma1 + 1, 0, &phys))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001684 goto invalid_format;
1685
Susheel Khiania4417e72016-07-12 11:28:32 +05301686 if (kstrtosize_t(comma2 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001687 goto invalid_format;
1688
1689 if (kstrtoint(comma3 + 1, 0, &prot))
1690 goto invalid_format;
1691
1692 ret = iommu_map(ddev->domain, iova, phys, size, prot);
1693 if (ret) {
1694 pr_err("iommu_map failed with %d\n", ret);
1695 retval = -EIO;
1696 goto out;
1697 }
1698
1699 retval = count;
1700 pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
1701 &iova, &phys, size, prot);
1702out:
1703 return retval;
1704
1705invalid_format:
1706 pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
1707 return -EINVAL;
1708}
1709
1710static const struct file_operations iommu_debug_map_fops = {
1711 .open = simple_open,
1712 .write = iommu_debug_map_write,
1713};
1714
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001715/*
1716 * Performs DMA mapping of a given virtual address and size to an iova address.
1717 * User input format: (addr,len,dma attr) where dma attr is:
1718 * 0: normal mapping
1719 * 1: force coherent mapping
1720 * 2: force non-cohernet mapping
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001721 * 3: use system cache
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001722 */
1723static ssize_t iommu_debug_dma_map_write(struct file *file,
1724 const char __user *ubuf, size_t count, loff_t *offset)
1725{
1726 ssize_t retval = -EINVAL;
1727 int ret;
1728 char *comma1, *comma2;
1729 char buf[100];
1730 unsigned long addr;
1731 void *v_addr;
1732 dma_addr_t iova;
1733 size_t size;
1734 unsigned int attr;
1735 unsigned long dma_attrs;
1736 struct iommu_debug_device *ddev = file->private_data;
1737 struct device *dev = ddev->dev;
1738
1739 if (count >= sizeof(buf)) {
1740 pr_err("Value too large\n");
1741 return -EINVAL;
1742 }
1743
1744 if (!dev->archdata.mapping) {
1745 pr_err("No mapping. Did you already attach?\n");
1746 retval = -EINVAL;
1747 goto out;
1748 }
1749 if (!dev->archdata.mapping->domain) {
1750 pr_err("No domain. Did you already attach?\n");
1751 retval = -EINVAL;
1752 goto out;
1753 }
1754
1755 memset(buf, 0, sizeof(buf));
1756
1757 if (copy_from_user(buf, ubuf, count)) {
1758 pr_err("Couldn't copy from user\n");
1759 retval = -EFAULT;
1760 goto out;
1761 }
1762
1763 comma1 = strnchr(buf, count, ',');
1764 if (!comma1)
1765 goto invalid_format;
1766
1767 comma2 = strnchr(comma1 + 1, count, ',');
1768 if (!comma2)
1769 goto invalid_format;
1770
1771 *comma1 = *comma2 = '\0';
1772
1773 if (kstrtoul(buf, 0, &addr))
1774 goto invalid_format;
1775 v_addr = (void *)addr;
1776
1777 if (kstrtosize_t(comma1 + 1, 0, &size))
1778 goto invalid_format;
1779
1780 if (kstrtouint(comma2 + 1, 0, &attr))
1781 goto invalid_format;
1782
1783 if (v_addr < test_virt_addr || v_addr > (test_virt_addr + SZ_1M - 1))
1784 goto invalid_addr;
1785
1786 if (attr == 0)
1787 dma_attrs = 0;
1788 else if (attr == 1)
1789 dma_attrs = DMA_ATTR_FORCE_COHERENT;
1790 else if (attr == 2)
1791 dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001792 else if (attr == 3)
1793 dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001794 else
1795 goto invalid_format;
1796
1797 iova = dma_map_single_attrs(dev, v_addr, size,
1798 DMA_TO_DEVICE, dma_attrs);
1799
1800 if (dma_mapping_error(dev, iova)) {
1801 pr_err("Failed to perform dma_map_single\n");
1802 ret = -EINVAL;
1803 goto out;
1804 }
1805
1806 retval = count;
1807 pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
1808 v_addr, &iova, size);
1809 ddev->iova = iova;
1810 pr_err("Saved iova=%pa for future PTE commands\n", &iova);
1811out:
1812 return retval;
1813
1814invalid_format:
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001815 pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001816 return retval;
1817
1818invalid_addr:
1819 pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
1820 return retval;
1821}
1822
1823static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
1824 size_t count, loff_t *offset)
1825{
1826 struct iommu_debug_device *ddev = file->private_data;
1827 struct device *dev = ddev->dev;
1828 char buf[100];
1829 ssize_t retval;
1830 size_t buflen;
1831 dma_addr_t iova;
1832
1833 if (!dev->archdata.mapping) {
1834 pr_err("No mapping. Did you already attach?\n");
1835 return -EINVAL;
1836 }
1837 if (!dev->archdata.mapping->domain) {
1838 pr_err("No domain. Did you already attach?\n");
1839 return -EINVAL;
1840 }
1841
1842 if (*offset)
1843 return 0;
1844
1845 memset(buf, 0, sizeof(buf));
1846
1847 iova = ddev->iova;
1848 snprintf(buf, sizeof(buf), "%pa\n", &iova);
1849
1850 buflen = strlen(buf);
1851 if (copy_to_user(ubuf, buf, buflen)) {
1852 pr_err("Couldn't copy_to_user\n");
1853 retval = -EFAULT;
1854 } else {
1855 *offset = 1; /* non-zero means we're done */
1856 retval = buflen;
1857 }
1858
1859 return retval;
1860}
1861
1862static const struct file_operations iommu_debug_dma_map_fops = {
1863 .open = simple_open,
1864 .write = iommu_debug_dma_map_write,
1865 .read = iommu_debug_dma_map_read,
1866};
1867
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001868static ssize_t iommu_debug_unmap_write(struct file *file,
1869 const char __user *ubuf,
1870 size_t count, loff_t *offset)
1871{
1872 ssize_t retval = 0;
1873 char *comma1;
1874 char buf[100];
1875 dma_addr_t iova;
1876 size_t size;
1877 size_t unmapped;
1878 struct iommu_debug_device *ddev = file->private_data;
1879
1880 if (count >= 100) {
1881 pr_err("Value too large\n");
1882 return -EINVAL;
1883 }
1884
1885 if (!ddev->domain) {
1886 pr_err("No domain. Did you already attach?\n");
1887 return -EINVAL;
1888 }
1889
1890 memset(buf, 0, 100);
1891
1892 if (copy_from_user(buf, ubuf, count)) {
1893 pr_err("Couldn't copy from user\n");
1894 retval = -EFAULT;
1895 goto out;
1896 }
1897
1898 comma1 = strnchr(buf, count, ',');
1899 if (!comma1)
1900 goto invalid_format;
1901
1902 /* split up the words */
1903 *comma1 = '\0';
1904
Susheel Khiania4417e72016-07-12 11:28:32 +05301905 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001906 goto invalid_format;
1907
Susheel Khiania4417e72016-07-12 11:28:32 +05301908 if (kstrtosize_t(comma1 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001909 goto invalid_format;
1910
1911 unmapped = iommu_unmap(ddev->domain, iova, size);
1912 if (unmapped != size) {
1913 pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
1914 size, unmapped);
1915 return -EIO;
1916 }
1917
1918 retval = count;
1919 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
1920out:
1921 return retval;
1922
1923invalid_format:
1924 pr_err("Invalid format. Expected: iova,len\n");
Patrick Daly5a5e3ff2016-10-13 19:31:50 -07001925 return -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001926}
1927
1928static const struct file_operations iommu_debug_unmap_fops = {
1929 .open = simple_open,
1930 .write = iommu_debug_unmap_write,
1931};
1932
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001933static ssize_t iommu_debug_dma_unmap_write(struct file *file,
1934 const char __user *ubuf,
1935 size_t count, loff_t *offset)
1936{
1937 ssize_t retval = 0;
1938 char *comma1, *comma2;
1939 char buf[100];
1940 size_t size;
1941 unsigned int attr;
1942 dma_addr_t iova;
1943 unsigned long dma_attrs;
1944 struct iommu_debug_device *ddev = file->private_data;
1945 struct device *dev = ddev->dev;
1946
1947 if (count >= sizeof(buf)) {
1948 pr_err("Value too large\n");
1949 return -EINVAL;
1950 }
1951
1952 if (!dev->archdata.mapping) {
1953 pr_err("No mapping. Did you already attach?\n");
1954 retval = -EINVAL;
1955 goto out;
1956 }
1957 if (!dev->archdata.mapping->domain) {
1958 pr_err("No domain. Did you already attach?\n");
1959 retval = -EINVAL;
1960 goto out;
1961 }
1962
1963 memset(buf, 0, sizeof(buf));
1964
1965 if (copy_from_user(buf, ubuf, count)) {
1966 pr_err("Couldn't copy from user\n");
1967 retval = -EFAULT;
1968 goto out;
1969 }
1970
1971 comma1 = strnchr(buf, count, ',');
1972 if (!comma1)
1973 goto invalid_format;
1974
1975 comma2 = strnchr(comma1 + 1, count, ',');
1976 if (!comma2)
1977 goto invalid_format;
1978
1979 *comma1 = *comma2 = '\0';
1980
1981 if (kstrtoux(buf, 0, &iova))
1982 goto invalid_format;
1983
1984 if (kstrtosize_t(comma1 + 1, 0, &size))
1985 goto invalid_format;
1986
1987 if (kstrtouint(comma2 + 1, 0, &attr))
1988 goto invalid_format;
1989
1990 if (attr == 0)
1991 dma_attrs = 0;
1992 else if (attr == 1)
1993 dma_attrs = DMA_ATTR_FORCE_COHERENT;
1994 else if (attr == 2)
1995 dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001996 else if (attr == 3)
1997 dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001998 else
1999 goto invalid_format;
2000
2001 dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
2002
2003 retval = count;
2004 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
2005out:
2006 return retval;
2007
2008invalid_format:
2009 pr_err("Invalid format. Expected: iova,len, dma attr\n");
2010 return retval;
2011}
2012
2013static const struct file_operations iommu_debug_dma_unmap_fops = {
2014 .open = simple_open,
2015 .write = iommu_debug_dma_unmap_write,
2016};
2017
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002018static ssize_t iommu_debug_config_clocks_write(struct file *file,
2019 const char __user *ubuf,
2020 size_t count, loff_t *offset)
2021{
2022 char buf;
2023 struct iommu_debug_device *ddev = file->private_data;
2024 struct device *dev = ddev->dev;
2025
2026 /* we're expecting a single character plus (optionally) a newline */
2027 if (count > 2) {
2028 dev_err(dev, "Invalid value\n");
2029 return -EINVAL;
2030 }
2031
2032 if (!ddev->domain) {
2033 dev_err(dev, "No domain. Did you already attach?\n");
2034 return -EINVAL;
2035 }
2036
2037 if (copy_from_user(&buf, ubuf, 1)) {
2038 dev_err(dev, "Couldn't copy from user\n");
2039 return -EFAULT;
2040 }
2041
2042 switch (buf) {
2043 case '0':
2044 dev_err(dev, "Disabling config clocks\n");
2045 iommu_disable_config_clocks(ddev->domain);
2046 break;
2047 case '1':
2048 dev_err(dev, "Enabling config clocks\n");
2049 if (iommu_enable_config_clocks(ddev->domain))
2050 dev_err(dev, "Failed!\n");
2051 break;
2052 default:
2053 dev_err(dev, "Invalid value. Should be 0 or 1.\n");
2054 return -EINVAL;
2055 }
2056
2057 return count;
2058}
2059
2060static const struct file_operations iommu_debug_config_clocks_fops = {
2061 .open = simple_open,
2062 .write = iommu_debug_config_clocks_write,
2063};
2064
Patrick Daly9438f322017-04-05 18:03:19 -07002065static ssize_t iommu_debug_trigger_fault_write(
2066 struct file *file, const char __user *ubuf, size_t count,
2067 loff_t *offset)
2068{
2069 struct iommu_debug_device *ddev = file->private_data;
2070 unsigned long flags;
2071
2072 if (!ddev->domain) {
2073 pr_err("No domain. Did you already attach?\n");
2074 return -EINVAL;
2075 }
2076
2077 if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
2078 pr_err("Invalid flags format\n");
2079 return -EFAULT;
2080 }
2081
2082 iommu_trigger_fault(ddev->domain, flags);
2083
2084 return count;
2085}
2086
2087static const struct file_operations iommu_debug_trigger_fault_fops = {
2088 .open = simple_open,
2089 .write = iommu_debug_trigger_fault_write,
2090};
2091
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002092/*
2093 * The following will only work for drivers that implement the generic
2094 * device tree bindings described in
2095 * Documentation/devicetree/bindings/iommu/iommu.txt
2096 */
2097static int snarf_iommu_devices(struct device *dev, void *ignored)
2098{
2099 struct iommu_debug_device *ddev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002100 struct dentry *dir;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002101
2102 if (!of_find_property(dev->of_node, "iommus", NULL))
2103 return 0;
2104
Patrick Daly6dd80252017-04-17 20:41:59 -07002105 /* Hold a reference count */
2106 if (!iommu_group_get(dev))
2107 return 0;
2108
Mitchel Humpherys89924fd2015-07-09 14:50:22 -07002109 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002110 if (!ddev)
2111 return -ENODEV;
2112 ddev->dev = dev;
2113 dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
2114 if (!dir) {
2115 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
2116 dev_name(dev));
2117 goto err;
2118 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002119
Patrick Dalye4e39862015-11-20 20:00:50 -08002120 if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
2121 &iommu_debug_nr_iters_ops)) {
2122 pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
2123 dev_name(dev));
2124 goto err_rmdir;
2125 }
2126
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002127 if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
2128 &iommu_debug_test_virt_addr_fops)) {
2129 pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
2130 dev_name(dev));
2131 goto err_rmdir;
2132 }
2133
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002134 if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
2135 &iommu_debug_profiling_fops)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002136 pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
2137 dev_name(dev));
2138 goto err_rmdir;
2139 }
2140
Mitchel Humpherys020f90f2015-10-02 16:02:31 -07002141 if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
2142 &iommu_debug_secure_profiling_fops)) {
2143 pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
2144 dev_name(dev));
2145 goto err_rmdir;
2146 }
2147
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -07002148 if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
2149 &iommu_debug_profiling_fast_fops)) {
2150 pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
2151 dev_name(dev));
2152 goto err_rmdir;
2153 }
2154
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -07002155 if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
2156 &iommu_debug_profiling_fast_dma_api_fops)) {
2157 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
2158 dev_name(dev));
2159 goto err_rmdir;
2160 }
2161
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08002162 if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
2163 &iommu_debug_functional_fast_dma_api_fops)) {
2164 pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
2165 dev_name(dev));
2166 goto err_rmdir;
2167 }
2168
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08002169 if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
2170 &iommu_debug_functional_arm_dma_api_fops)) {
2171 pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
2172 dev_name(dev));
2173 goto err_rmdir;
2174 }
2175
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002176 if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
2177 &iommu_debug_dma_attach_fops)) {
2178 pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
2179 dev_name(dev));
2180 goto err_rmdir;
2181 }
2182
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002183 if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
2184 &iommu_debug_attach_fops)) {
2185 pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
2186 dev_name(dev));
2187 goto err_rmdir;
2188 }
2189
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07002190 if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
2191 &iommu_debug_secure_attach_fops)) {
2192 pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
2193 dev_name(dev));
2194 goto err_rmdir;
2195 }
2196
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002197 if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
2198 &iommu_debug_atos_fops)) {
2199 pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
2200 dev_name(dev));
2201 goto err_rmdir;
2202 }
2203
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002204 if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
2205 &iommu_debug_dma_atos_fops)) {
2206 pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
2207 dev_name(dev));
2208 goto err_rmdir;
2209 }
2210
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002211 if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
2212 &iommu_debug_map_fops)) {
2213 pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
2214 dev_name(dev));
2215 goto err_rmdir;
2216 }
2217
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002218 if (!debugfs_create_file("dma_map", 0600, dir, ddev,
2219 &iommu_debug_dma_map_fops)) {
2220 pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
2221 dev_name(dev));
2222 goto err_rmdir;
2223 }
2224
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002225 if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
2226 &iommu_debug_unmap_fops)) {
2227 pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
2228 dev_name(dev));
2229 goto err_rmdir;
2230 }
2231
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002232 if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
2233 &iommu_debug_dma_unmap_fops)) {
2234 pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
2235 dev_name(dev));
2236 goto err_rmdir;
2237 }
2238
2239 if (!debugfs_create_file("pte", 0600, dir, ddev,
2240 &iommu_debug_pte_fops)) {
2241 pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
2242 dev_name(dev));
2243 goto err_rmdir;
2244 }
2245
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002246 if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
2247 &iommu_debug_config_clocks_fops)) {
2248 pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
2249 dev_name(dev));
2250 goto err_rmdir;
2251 }
2252
Patrick Daly9438f322017-04-05 18:03:19 -07002253 if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
2254 &iommu_debug_trigger_fault_fops)) {
2255 pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
2256 dev_name(dev));
2257 goto err_rmdir;
2258 }
2259
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002260 list_add(&ddev->list, &iommu_debug_devices);
2261 return 0;
2262
2263err_rmdir:
2264 debugfs_remove_recursive(dir);
2265err:
2266 kfree(ddev);
2267 return 0;
2268}
2269
2270static int iommu_debug_init_tests(void)
2271{
2272 debugfs_tests_dir = debugfs_create_dir("tests",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002273 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002274 if (!debugfs_tests_dir) {
2275 pr_err("Couldn't create iommu/tests debugfs directory\n");
2276 return -ENODEV;
2277 }
2278
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002279 test_virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
2280
2281 if (!test_virt_addr)
2282 return -ENOMEM;
2283
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002284 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
2285 snarf_iommu_devices);
2286}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002287
2288static void iommu_debug_destroy_tests(void)
2289{
2290 debugfs_remove_recursive(debugfs_tests_dir);
2291}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002292#else
2293static inline int iommu_debug_init_tests(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002294static inline void iommu_debug_destroy_tests(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002295#endif
2296
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002297/*
2298 * This isn't really a "driver", we just need something in the device tree
2299 * so that our tests can run without any client drivers, and our tests rely
2300 * on parsing the device tree for nodes with the `iommus' property.
2301 */
2302static int iommu_debug_pass(struct platform_device *pdev)
2303{
2304 return 0;
2305}
2306
2307static const struct of_device_id iommu_debug_of_match[] = {
2308 { .compatible = "iommu-debug-test" },
2309 { },
2310};
2311
2312static struct platform_driver iommu_debug_driver = {
2313 .probe = iommu_debug_pass,
2314 .remove = iommu_debug_pass,
2315 .driver = {
2316 .name = "iommu-debug",
2317 .of_match_table = iommu_debug_of_match,
2318 },
2319};
2320
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002321static int iommu_debug_init(void)
2322{
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002323 if (iommu_debug_init_tests())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002324 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002325
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002326 return platform_driver_register(&iommu_debug_driver);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002327}
2328
2329static void iommu_debug_exit(void)
2330{
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002331 platform_driver_unregister(&iommu_debug_driver);
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002332 iommu_debug_destroy_tests();
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002333}
2334
2335module_init(iommu_debug_init);
2336module_exit(iommu_debug_exit);