blob: 0c49a64f1dd2962d7bb6ea2c0c00f91a3d8f9e92 [file] [log] [blame]
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001/*
Charan Teja Reddy29f61402017-02-09 20:44:29 +05302 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
16
17#include <linux/debugfs.h>
18#include <linux/device.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/module.h>
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -070024#include <linux/uaccess.h>
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070025#include <linux/dma-contiguous.h>
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070026#include <soc/qcom/secure_buffer.h>
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -070027#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma-iommu.h>
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070030
Susheel Khiania4417e72016-07-12 11:28:32 +053031#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
32
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070033static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
34{
35 switch (attr) {
36 case DOMAIN_ATTR_GEOMETRY:
37 return "DOMAIN_ATTR_GEOMETRY";
38 case DOMAIN_ATTR_PAGING:
39 return "DOMAIN_ATTR_PAGING";
40 case DOMAIN_ATTR_WINDOWS:
41 return "DOMAIN_ATTR_WINDOWS";
42 case DOMAIN_ATTR_FSL_PAMU_STASH:
43 return "DOMAIN_ATTR_FSL_PAMU_STASH";
44 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
45 return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
46 case DOMAIN_ATTR_FSL_PAMUV1:
47 return "DOMAIN_ATTR_FSL_PAMUV1";
48 case DOMAIN_ATTR_NESTING:
49 return "DOMAIN_ATTR_NESTING";
50 case DOMAIN_ATTR_PT_BASE_ADDR:
51 return "DOMAIN_ATTR_PT_BASE_ADDR";
52 case DOMAIN_ATTR_SECURE_VMID:
53 return "DOMAIN_ATTR_SECURE_VMID";
54 case DOMAIN_ATTR_ATOMIC:
55 return "DOMAIN_ATTR_ATOMIC";
56 case DOMAIN_ATTR_CONTEXT_BANK:
57 return "DOMAIN_ATTR_CONTEXT_BANK";
58 case DOMAIN_ATTR_TTBR0:
59 return "DOMAIN_ATTR_TTBR0";
60 case DOMAIN_ATTR_CONTEXTIDR:
61 return "DOMAIN_ATTR_CONTEXTIDR";
62 case DOMAIN_ATTR_PROCID:
63 return "DOMAIN_ATTR_PROCID";
64 case DOMAIN_ATTR_DYNAMIC:
65 return "DOMAIN_ATTR_DYNAMIC";
66 case DOMAIN_ATTR_NON_FATAL_FAULTS:
67 return "DOMAIN_ATTR_NON_FATAL_FAULTS";
68 case DOMAIN_ATTR_S1_BYPASS:
69 return "DOMAIN_ATTR_S1_BYPASS";
70 case DOMAIN_ATTR_FAST:
71 return "DOMAIN_ATTR_FAST";
Patrick Dalyef6c1dc2016-11-16 14:35:23 -080072 case DOMAIN_ATTR_EARLY_MAP:
73 return "DOMAIN_ATTR_EARLY_MAP";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070074 default:
75 return "Unknown attr!";
76 }
77}
Susheel Khiania4417e72016-07-12 11:28:32 +053078#endif
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070079
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070080#ifdef CONFIG_IOMMU_DEBUG_TRACKING
81
82static DEFINE_MUTEX(iommu_debug_attachments_lock);
83static LIST_HEAD(iommu_debug_attachments);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070084
Patrick Dalyee7a25f2017-04-05 18:05:02 -070085/*
86 * Each group may have more than one domain; but each domain may
87 * only have one group.
88 * Used by debug tools to display the name of the device(s) associated
89 * with a particular domain.
90 */
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070091struct iommu_debug_attachment {
92 struct iommu_domain *domain;
Patrick Dalyee7a25f2017-04-05 18:05:02 -070093 struct iommu_group *group;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070094 struct list_head list;
95};
96
Susheel Khianie66aa5b2015-08-25 17:25:42 +053097void iommu_debug_attach_device(struct iommu_domain *domain,
98 struct device *dev)
99{
100 struct iommu_debug_attachment *attach;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700101 struct iommu_group *group;
102
103 group = iommu_group_get(dev);
104 if (!group)
105 return;
106
107 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
108 if (!attach)
109 return;
110
111 attach->domain = domain;
112 attach->group = group;
113 INIT_LIST_HEAD(&attach->list);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530114
115 mutex_lock(&iommu_debug_attachments_lock);
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700116 list_add(&attach->list, &iommu_debug_attachments);
117 mutex_unlock(&iommu_debug_attachments_lock);
118}
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530119
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700120void iommu_debug_domain_remove(struct iommu_domain *domain)
121{
122 struct iommu_debug_attachment *it, *tmp;
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530123
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700124 mutex_lock(&iommu_debug_attachments_lock);
125 list_for_each_entry_safe(it, tmp, &iommu_debug_attachments, list) {
126 if (it->domain != domain)
127 continue;
128 list_del(&it->list);
129 iommu_group_put(it->group);
130 kfree(it);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530131 }
132
133 mutex_unlock(&iommu_debug_attachments_lock);
134}
135
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700136#endif
137
138#ifdef CONFIG_IOMMU_TESTS
139
Susheel Khiania4417e72016-07-12 11:28:32 +0530140#ifdef CONFIG_64BIT
141
142#define kstrtoux kstrtou64
Patrick Daly9ef01862016-10-13 20:03:50 -0700143#define kstrtox_from_user kstrtoull_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530144#define kstrtosize_t kstrtoul
145
146#else
147
148#define kstrtoux kstrtou32
Patrick Daly9ef01862016-10-13 20:03:50 -0700149#define kstrtox_from_user kstrtouint_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530150#define kstrtosize_t kstrtouint
151
152#endif
153
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700154static LIST_HEAD(iommu_debug_devices);
155static struct dentry *debugfs_tests_dir;
Patrick Dalye4e39862015-11-20 20:00:50 -0800156static u32 iters_per_op = 1;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700157
158struct iommu_debug_device {
159 struct device *dev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700160 struct iommu_domain *domain;
161 u64 iova;
162 u64 phys;
163 size_t len;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700164 struct list_head list;
165};
166
167static int iommu_debug_build_phoney_sg_table(struct device *dev,
168 struct sg_table *table,
169 unsigned long total_size,
170 unsigned long chunk_size)
171{
172 unsigned long nents = total_size / chunk_size;
173 struct scatterlist *sg;
174 int i;
175 struct page *page;
176
177 if (!IS_ALIGNED(total_size, PAGE_SIZE))
178 return -EINVAL;
179 if (!IS_ALIGNED(total_size, chunk_size))
180 return -EINVAL;
181 if (sg_alloc_table(table, nents, GFP_KERNEL))
182 return -EINVAL;
183 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
184 if (!page)
185 goto free_table;
186
187 /* all the same page... why not. */
188 for_each_sg(table->sgl, sg, table->nents, i)
189 sg_set_page(sg, page, chunk_size, 0);
190
191 return 0;
192
193free_table:
194 sg_free_table(table);
195 return -ENOMEM;
196}
197
198static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
199 struct sg_table *table,
200 unsigned long chunk_size)
201{
202 __free_pages(sg_page(table->sgl), get_order(chunk_size));
203 sg_free_table(table);
204}
205
206static const char * const _size_to_string(unsigned long size)
207{
208 switch (size) {
209 case SZ_4K:
210 return "4K";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700211 case SZ_8K:
212 return "8K";
213 case SZ_16K:
214 return "16K";
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700215 case SZ_64K:
216 return "64K";
217 case SZ_2M:
218 return "2M";
219 case SZ_1M * 12:
220 return "12M";
221 case SZ_1M * 20:
222 return "20M";
223 }
224 return "unknown size, please add to _size_to_string";
225}
226
Patrick Dalye4e39862015-11-20 20:00:50 -0800227static int nr_iters_set(void *data, u64 val)
228{
229 if (!val)
230 val = 1;
231 if (val > 10000)
232 val = 10000;
233 *(u32 *)data = val;
234 return 0;
235}
236
237static int nr_iters_get(void *data, u64 *val)
238{
239 *val = *(u32 *)data;
240 return 0;
241}
242
243DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
244 nr_iters_get, nr_iters_set, "%llu\n");
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700245
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700246static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700247 enum iommu_attr attrs[],
248 void *attr_values[], int nattrs,
Susheel Khiania4417e72016-07-12 11:28:32 +0530249 const size_t sizes[])
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700250{
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700251 int i;
Susheel Khiania4417e72016-07-12 11:28:32 +0530252 const size_t *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700253 struct iommu_domain *domain;
254 unsigned long iova = 0x10000;
255 phys_addr_t paddr = 0xa000;
256
257 domain = iommu_domain_alloc(&platform_bus_type);
258 if (!domain) {
259 seq_puts(s, "Couldn't allocate domain\n");
260 return;
261 }
262
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700263 seq_puts(s, "Domain attributes: [ ");
264 for (i = 0; i < nattrs; ++i) {
265 /* not all attrs are ints, but this will get us by for now */
266 seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
267 *((int *)attr_values[i]),
268 i < nattrs ? " " : "");
Mitchel Humpherys679567c2015-08-28 10:51:24 -0700269 }
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700270 seq_puts(s, "]\n");
271 for (i = 0; i < nattrs; ++i) {
272 if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
273 seq_printf(s, "Couldn't set %d to the value at %p\n",
274 attrs[i], attr_values[i]);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700275 goto out_domain_free;
276 }
277 }
278
Patrick Daly6dd80252017-04-17 20:41:59 -0700279 if (iommu_attach_group(domain, dev->iommu_group)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700280 seq_puts(s,
281 "Couldn't attach new domain to device. Is it already attached?\n");
282 goto out_domain_free;
283 }
284
Patrick Dalye4e39862015-11-20 20:00:50 -0800285 seq_printf(s, "(average over %d iterations)\n", iters_per_op);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800286 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700287 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530288 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700289 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800290 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700291 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800292 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700293 struct timespec tbefore, tafter, diff;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700294 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700295
Patrick Dalye4e39862015-11-20 20:00:50 -0800296 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700297 getnstimeofday(&tbefore);
298 if (iommu_map(domain, iova, paddr, size,
299 IOMMU_READ | IOMMU_WRITE)) {
300 seq_puts(s, "Failed to map\n");
301 continue;
302 }
303 getnstimeofday(&tafter);
304 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800305 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700306
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700307 getnstimeofday(&tbefore);
308 unmapped = iommu_unmap(domain, iova, size);
309 if (unmapped != size) {
310 seq_printf(s,
311 "Only unmapped %zx instead of %zx\n",
312 unmapped, size);
313 continue;
314 }
315 getnstimeofday(&tafter);
316 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800317 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700318 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700319
Susheel Khiania4417e72016-07-12 11:28:32 +0530320 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
321 &map_elapsed_rem);
322 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
323 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700324
Patrick Daly3ca31e32015-11-20 20:33:04 -0800325 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
326 &map_elapsed_rem);
327 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
328 &unmap_elapsed_rem);
329
330 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
331 _size_to_string(size),
332 map_elapsed_us, map_elapsed_rem,
333 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700334 }
335
336 seq_putc(s, '\n');
Patrick Daly3ca31e32015-11-20 20:33:04 -0800337 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700338 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530339 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700340 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800341 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700342 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800343 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700344 struct timespec tbefore, tafter, diff;
345 struct sg_table table;
346 unsigned long chunk_size = SZ_4K;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700347 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700348
349 if (iommu_debug_build_phoney_sg_table(dev, &table, size,
350 chunk_size)) {
351 seq_puts(s,
352 "couldn't build phoney sg table! bailing...\n");
353 goto out_detach;
354 }
355
Patrick Dalye4e39862015-11-20 20:00:50 -0800356 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700357 getnstimeofday(&tbefore);
358 if (iommu_map_sg(domain, iova, table.sgl, table.nents,
359 IOMMU_READ | IOMMU_WRITE) != size) {
360 seq_puts(s, "Failed to map_sg\n");
361 goto next;
362 }
363 getnstimeofday(&tafter);
364 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800365 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700366
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700367 getnstimeofday(&tbefore);
368 unmapped = iommu_unmap(domain, iova, size);
369 if (unmapped != size) {
370 seq_printf(s,
371 "Only unmapped %zx instead of %zx\n",
372 unmapped, size);
373 goto next;
374 }
375 getnstimeofday(&tafter);
376 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800377 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700378 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700379
Susheel Khiania4417e72016-07-12 11:28:32 +0530380 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
381 &map_elapsed_rem);
382 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
383 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700384
Patrick Daly3ca31e32015-11-20 20:33:04 -0800385 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
386 &map_elapsed_rem);
387 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
388 &unmap_elapsed_rem);
389
390 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
391 _size_to_string(size),
392 map_elapsed_us, map_elapsed_rem,
393 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700394
395next:
396 iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
397 }
398
399out_detach:
Patrick Daly6dd80252017-04-17 20:41:59 -0700400 iommu_detach_group(domain, dev->iommu_group);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700401out_domain_free:
402 iommu_domain_free(domain);
403}
404
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700405static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700406{
407 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530408 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700409 SZ_1M * 20, 0 };
410 enum iommu_attr attrs[] = {
411 DOMAIN_ATTR_ATOMIC,
412 };
413 int htw_disable = 1, atomic = 1;
414 void *attr_values[] = { &htw_disable, &atomic };
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700415
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700416 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
417 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700418
419 return 0;
420}
421
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700422static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700423{
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700424 return single_open(file, iommu_debug_profiling_show, inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700425}
426
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700427static const struct file_operations iommu_debug_profiling_fops = {
428 .open = iommu_debug_profiling_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700429 .read = seq_read,
430 .llseek = seq_lseek,
431 .release = single_release,
432};
433
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700434static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
435{
436 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530437 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700438 SZ_1M * 20, 0 };
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700439
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700440 enum iommu_attr attrs[] = {
441 DOMAIN_ATTR_ATOMIC,
442 DOMAIN_ATTR_SECURE_VMID,
443 };
444 int one = 1, secure_vmid = VMID_CP_PIXEL;
445 void *attr_values[] = { &one, &secure_vmid };
446
447 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
448 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700449
450 return 0;
451}
452
453static int iommu_debug_secure_profiling_open(struct inode *inode,
454 struct file *file)
455{
456 return single_open(file, iommu_debug_secure_profiling_show,
457 inode->i_private);
458}
459
460static const struct file_operations iommu_debug_secure_profiling_fops = {
461 .open = iommu_debug_secure_profiling_open,
462 .read = seq_read,
463 .llseek = seq_lseek,
464 .release = single_release,
465};
466
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700467static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
468{
469 struct iommu_debug_device *ddev = s->private;
470 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
471 enum iommu_attr attrs[] = {
472 DOMAIN_ATTR_FAST,
473 DOMAIN_ATTR_ATOMIC,
474 };
475 int one = 1;
476 void *attr_values[] = { &one, &one };
477
478 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
479 ARRAY_SIZE(attrs), sizes);
480
481 return 0;
482}
483
484static int iommu_debug_profiling_fast_open(struct inode *inode,
485 struct file *file)
486{
487 return single_open(file, iommu_debug_profiling_fast_show,
488 inode->i_private);
489}
490
491static const struct file_operations iommu_debug_profiling_fast_fops = {
492 .open = iommu_debug_profiling_fast_open,
493 .read = seq_read,
494 .llseek = seq_lseek,
495 .release = single_release,
496};
497
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700498static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
499 void *ignored)
500{
501 int i, experiment;
502 struct iommu_debug_device *ddev = s->private;
503 struct device *dev = ddev->dev;
504 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
505 struct dma_iommu_mapping *mapping;
506 dma_addr_t dma_addr;
507 void *virt;
508 int fast = 1;
509 const char * const extra_labels[] = {
510 "not coherent",
511 "coherent",
512 };
513 unsigned long extra_attrs[] = {
514 0,
515 DMA_ATTR_SKIP_CPU_SYNC,
516 };
517
518 virt = kmalloc(1518, GFP_KERNEL);
519 if (!virt)
520 goto out;
521
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530522 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700523 if (!mapping) {
524 seq_puts(s, "fast_smmu_create_mapping failed\n");
525 goto out_kfree;
526 }
527
528 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
529 seq_puts(s, "iommu_domain_set_attr failed\n");
530 goto out_release_mapping;
531 }
532
533 if (arm_iommu_attach_device(dev, mapping)) {
534 seq_puts(s, "fast_smmu_attach_device failed\n");
535 goto out_release_mapping;
536 }
537
538 if (iommu_enable_config_clocks(mapping->domain)) {
539 seq_puts(s, "Couldn't enable clocks\n");
540 goto out_detach;
541 }
542 for (experiment = 0; experiment < 2; ++experiment) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530543 size_t map_avg = 0, unmap_avg = 0;
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700544
545 for (i = 0; i < 10; ++i) {
546 struct timespec tbefore, tafter, diff;
547 u64 ns;
548
549 getnstimeofday(&tbefore);
550 dma_addr = dma_map_single_attrs(
551 dev, virt, SZ_4K, DMA_TO_DEVICE,
552 extra_attrs[experiment]);
553 getnstimeofday(&tafter);
554 diff = timespec_sub(tafter, tbefore);
555 ns = timespec_to_ns(&diff);
556 if (dma_mapping_error(dev, dma_addr)) {
557 seq_puts(s, "dma_map_single failed\n");
558 goto out_disable_config_clocks;
559 }
560 map_elapsed_ns[i] = ns;
561
562 getnstimeofday(&tbefore);
563 dma_unmap_single_attrs(
564 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
565 extra_attrs[experiment]);
566 getnstimeofday(&tafter);
567 diff = timespec_sub(tafter, tbefore);
568 ns = timespec_to_ns(&diff);
569 unmap_elapsed_ns[i] = ns;
570 }
571
572 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
573 "dma_map_single_attrs");
574 for (i = 0; i < 10; ++i) {
575 map_avg += map_elapsed_ns[i];
576 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
577 i < 9 ? ", " : "");
578 }
579 map_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530580 seq_printf(s, "] (avg: %zu)\n", map_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700581
582 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
583 "dma_unmap_single_attrs");
584 for (i = 0; i < 10; ++i) {
585 unmap_avg += unmap_elapsed_ns[i];
586 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
587 i < 9 ? ", " : "");
588 }
589 unmap_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530590 seq_printf(s, "] (avg: %zu)\n", unmap_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700591 }
592
593out_disable_config_clocks:
594 iommu_disable_config_clocks(mapping->domain);
595out_detach:
596 arm_iommu_detach_device(dev);
597out_release_mapping:
598 arm_iommu_release_mapping(mapping);
599out_kfree:
600 kfree(virt);
601out:
602 return 0;
603}
604
605static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
606 struct file *file)
607{
608 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
609 inode->i_private);
610}
611
612static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
613 .open = iommu_debug_profiling_fast_dma_api_open,
614 .read = seq_read,
615 .llseek = seq_lseek,
616 .release = single_release,
617};
618
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800619static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
620{
621 int i, ret = 0;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530622 u64 iova;
623 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800624 void *virt;
625 phys_addr_t phys;
626 dma_addr_t dma_addr;
627
628 /*
629 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
630 * chunk that we can work with.
631 */
632 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
633 phys = virt_to_phys(virt);
634
635 /* fill the whole 4GB space */
636 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
637 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
638 if (dma_addr == DMA_ERROR_CODE) {
639 dev_err(dev, "Failed map on iter %d\n", i);
640 ret = -EINVAL;
641 goto out;
642 }
643 }
644
645 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
646 dev_err(dev,
647 "dma_map_single unexpectedly (VA should have been exhausted)\n");
648 ret = -EINVAL;
649 goto out;
650 }
651
652 /*
653 * free up 4K at the very beginning, then leave one 4K mapping,
654 * then free up 8K. This will result in the next 8K map to skip
655 * over the 4K hole and take the 8K one.
656 */
657 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
658 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
659 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
660
661 /* remap 8K */
662 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
663 if (dma_addr != SZ_8K) {
664 dma_addr_t expected = SZ_8K;
665
666 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
667 &dma_addr, &expected);
668 ret = -EINVAL;
669 goto out;
670 }
671
672 /*
673 * now remap 4K. We should get the first 4K chunk that was skipped
674 * over during the previous 8K map. If we missed a TLB invalidate
675 * at that point this should explode.
676 */
677 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
678 if (dma_addr != 0) {
679 dma_addr_t expected = 0;
680
681 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
682 &dma_addr, &expected);
683 ret = -EINVAL;
684 goto out;
685 }
686
687 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
688 dev_err(dev,
689 "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
690 ret = -EINVAL;
691 goto out;
692 }
693
694 /* we're all full again. unmap everything. */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530695 for (iova = 0; iova < max; iova += SZ_8K)
696 dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800697
698out:
699 free_pages((unsigned long)virt, get_order(SZ_8K));
700 return ret;
701}
702
703struct fib_state {
704 unsigned long cur;
705 unsigned long prev;
706};
707
708static void fib_init(struct fib_state *f)
709{
710 f->cur = f->prev = 1;
711}
712
713static unsigned long get_next_fib(struct fib_state *f)
714{
715 int next = f->cur + f->prev;
716
717 f->prev = f->cur;
718 f->cur = next;
719 return next;
720}
721
722/*
723 * Not actually random. Just testing the fibs (and max - the fibs).
724 */
725static int __rand_va_sweep(struct device *dev, struct seq_file *s,
726 const size_t size)
727{
728 u64 iova;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530729 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800730 int i, remapped, unmapped, ret = 0;
731 void *virt;
732 dma_addr_t dma_addr, dma_addr2;
733 struct fib_state fib;
734
735 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
736 if (!virt) {
737 if (size > SZ_8K) {
738 dev_err(dev,
739 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
740 _size_to_string(size));
741 return 0;
742 }
743 return -ENOMEM;
744 }
745
746 /* fill the whole 4GB space */
747 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
748 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
749 if (dma_addr == DMA_ERROR_CODE) {
750 dev_err(dev, "Failed map on iter %d\n", i);
751 ret = -EINVAL;
752 goto out;
753 }
754 }
755
756 /* now unmap "random" iovas */
757 unmapped = 0;
758 fib_init(&fib);
759 for (iova = get_next_fib(&fib) * size;
760 iova < max - size;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530761 iova = (u64)get_next_fib(&fib) * size) {
762 dma_addr = (dma_addr_t)(iova);
763 dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800764 if (dma_addr == dma_addr2) {
765 WARN(1,
766 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
767 __func__);
768 return -EINVAL;
769 }
770 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
771 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
772 unmapped += 2;
773 }
774
775 /* and map until everything fills back up */
776 for (remapped = 0; ; ++remapped) {
777 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
778 if (dma_addr == DMA_ERROR_CODE)
779 break;
780 }
781
782 if (unmapped != remapped) {
783 dev_err(dev,
784 "Unexpected random remap count! Unmapped %d but remapped %d\n",
785 unmapped, remapped);
786 ret = -EINVAL;
787 }
788
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530789 for (iova = 0; iova < max; iova += size)
790 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800791
792out:
793 free_pages((unsigned long)virt, get_order(size));
794 return ret;
795}
796
797static int __check_mapping(struct device *dev, struct iommu_domain *domain,
798 dma_addr_t iova, phys_addr_t expected)
799{
800 phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
801 phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
802
803 WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
804
805 if (res != expected) {
806 dev_err_ratelimited(dev,
807 "Bad translation for %pa! Expected: %pa Got: %pa\n",
808 &iova, &expected, &res);
809 return -EINVAL;
810 }
811
812 return 0;
813}
814
815static int __full_va_sweep(struct device *dev, struct seq_file *s,
816 const size_t size, struct iommu_domain *domain)
817{
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530818 u64 iova;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800819 dma_addr_t dma_addr;
820 void *virt;
821 phys_addr_t phys;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530822 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800823 int ret = 0, i;
824
825 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
826 if (!virt) {
827 if (size > SZ_8K) {
828 dev_err(dev,
829 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
830 _size_to_string(size));
831 return 0;
832 }
833 return -ENOMEM;
834 }
835 phys = virt_to_phys(virt);
836
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530837 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800838 unsigned long expected = iova;
839
840 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
841 if (dma_addr != expected) {
842 dev_err_ratelimited(dev,
843 "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
844 i, expected,
845 (unsigned long)dma_addr);
846 ret = -EINVAL;
847 goto out;
848 }
849 }
850
851 if (domain) {
852 /* check every mapping from 0..6M */
853 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
854 phys_addr_t expected = phys;
855
856 if (__check_mapping(dev, domain, iova, expected)) {
857 dev_err(dev, "iter: %d\n", i);
858 ret = -EINVAL;
859 goto out;
860 }
861 }
862 /* and from 4G..4G-6M */
863 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
864 phys_addr_t expected = phys;
865 unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
866
867 if (__check_mapping(dev, domain, theiova, expected)) {
868 dev_err(dev, "iter: %d\n", i);
869 ret = -EINVAL;
870 goto out;
871 }
872 }
873 }
874
875 /* at this point, our VA space should be full */
876 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
877 if (dma_addr != DMA_ERROR_CODE) {
878 dev_err_ratelimited(dev,
879 "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
880 (unsigned long)dma_addr);
881 ret = -EINVAL;
882 }
883
884out:
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530885 for (iova = 0; iova < max; iova += size)
886 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800887
888 free_pages((unsigned long)virt, get_order(size));
889 return ret;
890}
891
892#define ds_printf(d, s, fmt, ...) ({ \
893 dev_err(d, fmt, ##__VA_ARGS__); \
894 seq_printf(s, fmt, ##__VA_ARGS__); \
895 })
896
897static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
898 struct iommu_domain *domain, void *priv)
899{
900 int i, j, ret = 0;
901 size_t *sz, *sizes = priv;
902
903 for (j = 0; j < 1; ++j) {
904 for (sz = sizes; *sz; ++sz) {
905 for (i = 0; i < 2; ++i) {
906 ds_printf(dev, s, "Full VA sweep @%s %d",
907 _size_to_string(*sz), i);
908 if (__full_va_sweep(dev, s, *sz, domain)) {
909 ds_printf(dev, s, " -> FAILED\n");
910 ret = -EINVAL;
911 } else {
912 ds_printf(dev, s, " -> SUCCEEDED\n");
913 }
914 }
915 }
916 }
917
918 ds_printf(dev, s, "bonus map:");
919 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
920 ds_printf(dev, s, " -> FAILED\n");
921 ret = -EINVAL;
922 } else {
923 ds_printf(dev, s, " -> SUCCEEDED\n");
924 }
925
926 for (sz = sizes; *sz; ++sz) {
927 for (i = 0; i < 2; ++i) {
928 ds_printf(dev, s, "Rand VA sweep @%s %d",
929 _size_to_string(*sz), i);
930 if (__rand_va_sweep(dev, s, *sz)) {
931 ds_printf(dev, s, " -> FAILED\n");
932 ret = -EINVAL;
933 } else {
934 ds_printf(dev, s, " -> SUCCEEDED\n");
935 }
936 }
937 }
938
939 ds_printf(dev, s, "TLB stress sweep");
940 if (__tlb_stress_sweep(dev, s)) {
941 ds_printf(dev, s, " -> FAILED\n");
942 ret = -EINVAL;
943 } else {
944 ds_printf(dev, s, " -> SUCCEEDED\n");
945 }
946
947 ds_printf(dev, s, "second bonus map:");
948 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
949 ds_printf(dev, s, " -> FAILED\n");
950 ret = -EINVAL;
951 } else {
952 ds_printf(dev, s, " -> SUCCEEDED\n");
953 }
954
955 return ret;
956}
957
958static int __functional_dma_api_alloc_test(struct device *dev,
959 struct seq_file *s,
960 struct iommu_domain *domain,
961 void *ignored)
962{
963 size_t size = SZ_1K * 742;
964 int ret = 0;
965 u8 *data;
966 dma_addr_t iova;
967
968 /* Make sure we can allocate and use a buffer */
969 ds_printf(dev, s, "Allocating coherent buffer");
970 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
971 if (!data) {
972 ds_printf(dev, s, " -> FAILED\n");
973 ret = -EINVAL;
974 } else {
975 int i;
976
977 ds_printf(dev, s, " -> SUCCEEDED\n");
978 ds_printf(dev, s, "Using coherent buffer");
979 for (i = 0; i < 742; ++i) {
980 int ind = SZ_1K * i;
981 u8 *p = data + ind;
982 u8 val = i % 255;
983
984 memset(data, 0xa5, size);
985 *p = val;
986 (*p)++;
987 if ((*p) != val + 1) {
988 ds_printf(dev, s,
989 " -> FAILED on iter %d since %d != %d\n",
990 i, *p, val + 1);
991 ret = -EINVAL;
992 }
993 }
994 if (!ret)
995 ds_printf(dev, s, " -> SUCCEEDED\n");
996 dma_free_coherent(dev, size, data, iova);
997 }
998
999 return ret;
1000}
1001
1002static int __functional_dma_api_basic_test(struct device *dev,
1003 struct seq_file *s,
1004 struct iommu_domain *domain,
1005 void *ignored)
1006{
1007 size_t size = 1518;
1008 int i, j, ret = 0;
1009 u8 *data;
1010 dma_addr_t iova;
1011 phys_addr_t pa, pa2;
1012
1013 ds_printf(dev, s, "Basic DMA API test");
1014 /* Make sure we can allocate and use a buffer */
1015 for (i = 0; i < 1000; ++i) {
1016 data = kmalloc(size, GFP_KERNEL);
1017 if (!data) {
1018 ds_printf(dev, s, " -> FAILED\n");
1019 ret = -EINVAL;
1020 goto out;
1021 }
1022 memset(data, 0xa5, size);
1023 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
1024 pa = iommu_iova_to_phys(domain, iova);
1025 pa2 = iommu_iova_to_phys_hard(domain, iova);
1026 if (pa != pa2) {
1027 dev_err(dev,
1028 "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
1029 &pa, &pa2);
1030 ret = -EINVAL;
1031 goto out;
1032 }
1033 pa2 = virt_to_phys(data);
1034 if (pa != pa2) {
1035 dev_err(dev,
1036 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
1037 &pa, &pa2);
1038 ret = -EINVAL;
1039 goto out;
1040 }
1041 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
1042 for (j = 0; j < size; ++j) {
1043 if (data[j] != 0xa5) {
1044 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
1045 ret = -EINVAL;
1046 goto out;
1047 }
1048 }
1049 kfree(data);
1050 }
1051
1052out:
1053 if (ret)
1054 ds_printf(dev, s, " -> FAILED\n");
1055 else
1056 ds_printf(dev, s, " -> SUCCEEDED\n");
1057
1058 return ret;
1059}
1060
1061/* Creates a fresh fast mapping and applies @fn to it */
1062static int __apply_to_new_mapping(struct seq_file *s,
1063 int (*fn)(struct device *dev,
1064 struct seq_file *s,
1065 struct iommu_domain *domain,
1066 void *priv),
1067 void *priv)
1068{
1069 struct dma_iommu_mapping *mapping;
1070 struct iommu_debug_device *ddev = s->private;
1071 struct device *dev = ddev->dev;
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301072 int ret = -EINVAL, fast = 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001073 phys_addr_t pt_phys;
1074
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301075 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1076 (SZ_1G * 4ULL));
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001077 if (!mapping)
1078 goto out;
1079
1080 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
1081 seq_puts(s, "iommu_domain_set_attr failed\n");
1082 goto out_release_mapping;
1083 }
1084
1085 if (arm_iommu_attach_device(dev, mapping))
1086 goto out_release_mapping;
1087
1088 if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
1089 &pt_phys)) {
1090 ds_printf(dev, s, "Couldn't get page table base address\n");
1091 goto out_release_mapping;
1092 }
1093
1094 dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
1095 if (iommu_enable_config_clocks(mapping->domain)) {
1096 ds_printf(dev, s, "Couldn't enable clocks\n");
1097 goto out_release_mapping;
1098 }
1099 ret = fn(dev, s, mapping->domain, priv);
1100 iommu_disable_config_clocks(mapping->domain);
1101
1102 arm_iommu_detach_device(dev);
1103out_release_mapping:
1104 arm_iommu_release_mapping(mapping);
1105out:
1106 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1107 return 0;
1108}
1109
1110static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
1111 void *ignored)
1112{
1113 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
1114 int ret = 0;
1115
1116 ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
1117 ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
1118 ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
1119 return ret;
1120}
1121
1122static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
1123 struct file *file)
1124{
1125 return single_open(file, iommu_debug_functional_fast_dma_api_show,
1126 inode->i_private);
1127}
1128
1129static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
1130 .open = iommu_debug_functional_fast_dma_api_open,
1131 .read = seq_read,
1132 .llseek = seq_lseek,
1133 .release = single_release,
1134};
1135
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001136static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
1137 void *ignored)
1138{
1139 struct dma_iommu_mapping *mapping;
1140 struct iommu_debug_device *ddev = s->private;
1141 struct device *dev = ddev->dev;
1142 size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
1143 int ret = -EINVAL;
1144
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301145 /* Make the size equal to MAX_ULONG */
1146 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1147 (SZ_1G * 4ULL - 1));
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001148 if (!mapping)
1149 goto out;
1150
1151 if (arm_iommu_attach_device(dev, mapping))
1152 goto out_release_mapping;
1153
1154 ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
1155 ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
1156
1157 arm_iommu_detach_device(dev);
1158out_release_mapping:
1159 arm_iommu_release_mapping(mapping);
1160out:
1161 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1162 return 0;
1163}
1164
1165static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
1166 struct file *file)
1167{
1168 return single_open(file, iommu_debug_functional_arm_dma_api_show,
1169 inode->i_private);
1170}
1171
1172static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
1173 .open = iommu_debug_functional_arm_dma_api_open,
1174 .read = seq_read,
1175 .llseek = seq_lseek,
1176 .release = single_release,
1177};
1178
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001179static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
1180 int val, bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001181{
Patrick Daly6dd80252017-04-17 20:41:59 -07001182 struct iommu_group *group = ddev->dev->iommu_group;
1183
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001184 ddev->domain = iommu_domain_alloc(&platform_bus_type);
1185 if (!ddev->domain) {
1186 pr_err("Couldn't allocate domain\n");
1187 return -ENOMEM;
1188 }
1189
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001190 if (is_secure && iommu_domain_set_attr(ddev->domain,
1191 DOMAIN_ATTR_SECURE_VMID,
1192 &val)) {
1193 pr_err("Couldn't set secure vmid to %d\n", val);
1194 goto out_domain_free;
1195 }
1196
Patrick Daly6dd80252017-04-17 20:41:59 -07001197 if (iommu_attach_group(ddev->domain, group)) {
1198 dev_err(ddev->dev, "Couldn't attach new domain to device\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001199 goto out_domain_free;
1200 }
1201
1202 return 0;
1203
1204out_domain_free:
1205 iommu_domain_free(ddev->domain);
1206 ddev->domain = NULL;
1207 return -EIO;
1208}
1209
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001210static ssize_t __iommu_debug_attach_write(struct file *file,
1211 const char __user *ubuf,
1212 size_t count, loff_t *offset,
1213 bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001214{
1215 struct iommu_debug_device *ddev = file->private_data;
Patrick Daly6dd80252017-04-17 20:41:59 -07001216 struct device *dev = ddev->dev;
1217 struct iommu_domain *domain;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001218 ssize_t retval;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001219 int val;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001220
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001221 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1222 pr_err("Invalid format. Expected a hex or decimal integer");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001223 retval = -EFAULT;
1224 goto out;
1225 }
1226
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001227 if (val) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001228 if (ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001229 pr_err("Iommu-Debug is already attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001230 retval = -EINVAL;
1231 goto out;
1232 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001233
1234 domain = iommu_get_domain_for_dev(dev);
1235 if (domain) {
1236 pr_err("Another driver is using this device's iommu\n"
1237 "Iommu-Debug cannot be used concurrently\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001238 retval = -EINVAL;
1239 goto out;
1240 }
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001241 if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001242 retval = -EIO;
1243 goto out;
1244 }
1245 pr_err("Attached\n");
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001246 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001247 if (!ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001248 pr_err("Iommu-Debug is not attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001249 retval = -EINVAL;
1250 goto out;
1251 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001252 iommu_detach_group(ddev->domain, dev->iommu_group);
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001253 iommu_domain_free(ddev->domain);
1254 ddev->domain = NULL;
1255 pr_err("Detached\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001256 }
1257
1258 retval = count;
1259out:
1260 return retval;
1261}
1262
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001263static ssize_t iommu_debug_attach_write(struct file *file,
1264 const char __user *ubuf,
1265 size_t count, loff_t *offset)
1266{
1267 return __iommu_debug_attach_write(file, ubuf, count, offset,
1268 false);
1269
1270}
1271
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001272static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
1273 size_t count, loff_t *offset)
1274{
1275 struct iommu_debug_device *ddev = file->private_data;
1276 char c[2];
1277
1278 if (*offset)
1279 return 0;
1280
1281 c[0] = ddev->domain ? '1' : '0';
1282 c[1] = '\n';
1283 if (copy_to_user(ubuf, &c, 2)) {
1284 pr_err("copy_to_user failed\n");
1285 return -EFAULT;
1286 }
1287 *offset = 1; /* non-zero means we're done */
1288
1289 return 2;
1290}
1291
1292static const struct file_operations iommu_debug_attach_fops = {
1293 .open = simple_open,
1294 .write = iommu_debug_attach_write,
1295 .read = iommu_debug_attach_read,
1296};
1297
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001298static ssize_t iommu_debug_attach_write_secure(struct file *file,
1299 const char __user *ubuf,
1300 size_t count, loff_t *offset)
1301{
1302 return __iommu_debug_attach_write(file, ubuf, count, offset,
1303 true);
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001304}
1305
1306static const struct file_operations iommu_debug_secure_attach_fops = {
1307 .open = simple_open,
1308 .write = iommu_debug_attach_write_secure,
1309 .read = iommu_debug_attach_read,
1310};
1311
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001312static ssize_t iommu_debug_atos_write(struct file *file,
1313 const char __user *ubuf,
1314 size_t count, loff_t *offset)
1315{
1316 struct iommu_debug_device *ddev = file->private_data;
1317 dma_addr_t iova;
1318
Susheel Khiania4417e72016-07-12 11:28:32 +05301319 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001320 pr_err("Invalid format for iova\n");
1321 ddev->iova = 0;
1322 return -EINVAL;
1323 }
1324
1325 ddev->iova = iova;
1326 pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
1327 return count;
1328}
1329
1330static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
1331 size_t count, loff_t *offset)
1332{
1333 struct iommu_debug_device *ddev = file->private_data;
1334 phys_addr_t phys;
1335 char buf[100];
1336 ssize_t retval;
1337 size_t buflen;
1338
1339 if (!ddev->domain) {
1340 pr_err("No domain. Did you already attach?\n");
1341 return -EINVAL;
1342 }
1343
1344 if (*offset)
1345 return 0;
1346
1347 memset(buf, 0, 100);
1348
1349 phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
1350 if (!phys)
1351 strlcpy(buf, "FAIL\n", 100);
1352 else
1353 snprintf(buf, 100, "%pa\n", &phys);
1354
1355 buflen = strlen(buf);
1356 if (copy_to_user(ubuf, buf, buflen)) {
1357 pr_err("Couldn't copy_to_user\n");
1358 retval = -EFAULT;
1359 } else {
1360 *offset = 1; /* non-zero means we're done */
1361 retval = buflen;
1362 }
1363
1364 return retval;
1365}
1366
1367static const struct file_operations iommu_debug_atos_fops = {
1368 .open = simple_open,
1369 .write = iommu_debug_atos_write,
1370 .read = iommu_debug_atos_read,
1371};
1372
1373static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
1374 size_t count, loff_t *offset)
1375{
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301376 ssize_t retval = -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001377 int ret;
1378 char *comma1, *comma2, *comma3;
1379 char buf[100];
1380 dma_addr_t iova;
1381 phys_addr_t phys;
1382 size_t size;
1383 int prot;
1384 struct iommu_debug_device *ddev = file->private_data;
1385
1386 if (count >= 100) {
1387 pr_err("Value too large\n");
1388 return -EINVAL;
1389 }
1390
1391 if (!ddev->domain) {
1392 pr_err("No domain. Did you already attach?\n");
1393 return -EINVAL;
1394 }
1395
1396 memset(buf, 0, 100);
1397
1398 if (copy_from_user(buf, ubuf, count)) {
1399 pr_err("Couldn't copy from user\n");
1400 retval = -EFAULT;
1401 }
1402
1403 comma1 = strnchr(buf, count, ',');
1404 if (!comma1)
1405 goto invalid_format;
1406
1407 comma2 = strnchr(comma1 + 1, count, ',');
1408 if (!comma2)
1409 goto invalid_format;
1410
1411 comma3 = strnchr(comma2 + 1, count, ',');
1412 if (!comma3)
1413 goto invalid_format;
1414
1415 /* split up the words */
1416 *comma1 = *comma2 = *comma3 = '\0';
1417
Susheel Khiania4417e72016-07-12 11:28:32 +05301418 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001419 goto invalid_format;
1420
Susheel Khiania4417e72016-07-12 11:28:32 +05301421 if (kstrtoux(comma1 + 1, 0, &phys))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001422 goto invalid_format;
1423
Susheel Khiania4417e72016-07-12 11:28:32 +05301424 if (kstrtosize_t(comma2 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001425 goto invalid_format;
1426
1427 if (kstrtoint(comma3 + 1, 0, &prot))
1428 goto invalid_format;
1429
1430 ret = iommu_map(ddev->domain, iova, phys, size, prot);
1431 if (ret) {
1432 pr_err("iommu_map failed with %d\n", ret);
1433 retval = -EIO;
1434 goto out;
1435 }
1436
1437 retval = count;
1438 pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
1439 &iova, &phys, size, prot);
1440out:
1441 return retval;
1442
1443invalid_format:
1444 pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
1445 return -EINVAL;
1446}
1447
1448static const struct file_operations iommu_debug_map_fops = {
1449 .open = simple_open,
1450 .write = iommu_debug_map_write,
1451};
1452
1453static ssize_t iommu_debug_unmap_write(struct file *file,
1454 const char __user *ubuf,
1455 size_t count, loff_t *offset)
1456{
1457 ssize_t retval = 0;
1458 char *comma1;
1459 char buf[100];
1460 dma_addr_t iova;
1461 size_t size;
1462 size_t unmapped;
1463 struct iommu_debug_device *ddev = file->private_data;
1464
1465 if (count >= 100) {
1466 pr_err("Value too large\n");
1467 return -EINVAL;
1468 }
1469
1470 if (!ddev->domain) {
1471 pr_err("No domain. Did you already attach?\n");
1472 return -EINVAL;
1473 }
1474
1475 memset(buf, 0, 100);
1476
1477 if (copy_from_user(buf, ubuf, count)) {
1478 pr_err("Couldn't copy from user\n");
1479 retval = -EFAULT;
1480 goto out;
1481 }
1482
1483 comma1 = strnchr(buf, count, ',');
1484 if (!comma1)
1485 goto invalid_format;
1486
1487 /* split up the words */
1488 *comma1 = '\0';
1489
Susheel Khiania4417e72016-07-12 11:28:32 +05301490 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001491 goto invalid_format;
1492
Susheel Khiania4417e72016-07-12 11:28:32 +05301493 if (kstrtosize_t(comma1 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001494 goto invalid_format;
1495
1496 unmapped = iommu_unmap(ddev->domain, iova, size);
1497 if (unmapped != size) {
1498 pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
1499 size, unmapped);
1500 return -EIO;
1501 }
1502
1503 retval = count;
1504 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
1505out:
1506 return retval;
1507
1508invalid_format:
1509 pr_err("Invalid format. Expected: iova,len\n");
Patrick Daly5a5e3ff2016-10-13 19:31:50 -07001510 return -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001511}
1512
1513static const struct file_operations iommu_debug_unmap_fops = {
1514 .open = simple_open,
1515 .write = iommu_debug_unmap_write,
1516};
1517
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08001518static ssize_t iommu_debug_config_clocks_write(struct file *file,
1519 const char __user *ubuf,
1520 size_t count, loff_t *offset)
1521{
1522 char buf;
1523 struct iommu_debug_device *ddev = file->private_data;
1524 struct device *dev = ddev->dev;
1525
1526 /* we're expecting a single character plus (optionally) a newline */
1527 if (count > 2) {
1528 dev_err(dev, "Invalid value\n");
1529 return -EINVAL;
1530 }
1531
1532 if (!ddev->domain) {
1533 dev_err(dev, "No domain. Did you already attach?\n");
1534 return -EINVAL;
1535 }
1536
1537 if (copy_from_user(&buf, ubuf, 1)) {
1538 dev_err(dev, "Couldn't copy from user\n");
1539 return -EFAULT;
1540 }
1541
1542 switch (buf) {
1543 case '0':
1544 dev_err(dev, "Disabling config clocks\n");
1545 iommu_disable_config_clocks(ddev->domain);
1546 break;
1547 case '1':
1548 dev_err(dev, "Enabling config clocks\n");
1549 if (iommu_enable_config_clocks(ddev->domain))
1550 dev_err(dev, "Failed!\n");
1551 break;
1552 default:
1553 dev_err(dev, "Invalid value. Should be 0 or 1.\n");
1554 return -EINVAL;
1555 }
1556
1557 return count;
1558}
1559
1560static const struct file_operations iommu_debug_config_clocks_fops = {
1561 .open = simple_open,
1562 .write = iommu_debug_config_clocks_write,
1563};
1564
Patrick Daly9438f322017-04-05 18:03:19 -07001565static ssize_t iommu_debug_trigger_fault_write(
1566 struct file *file, const char __user *ubuf, size_t count,
1567 loff_t *offset)
1568{
1569 struct iommu_debug_device *ddev = file->private_data;
1570 unsigned long flags;
1571
1572 if (!ddev->domain) {
1573 pr_err("No domain. Did you already attach?\n");
1574 return -EINVAL;
1575 }
1576
1577 if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
1578 pr_err("Invalid flags format\n");
1579 return -EFAULT;
1580 }
1581
1582 iommu_trigger_fault(ddev->domain, flags);
1583
1584 return count;
1585}
1586
1587static const struct file_operations iommu_debug_trigger_fault_fops = {
1588 .open = simple_open,
1589 .write = iommu_debug_trigger_fault_write,
1590};
1591
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001592/*
1593 * The following will only work for drivers that implement the generic
1594 * device tree bindings described in
1595 * Documentation/devicetree/bindings/iommu/iommu.txt
1596 */
1597static int snarf_iommu_devices(struct device *dev, void *ignored)
1598{
1599 struct iommu_debug_device *ddev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001600 struct dentry *dir;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001601
1602 if (!of_find_property(dev->of_node, "iommus", NULL))
1603 return 0;
1604
Patrick Daly6dd80252017-04-17 20:41:59 -07001605 /* Hold a reference count */
1606 if (!iommu_group_get(dev))
1607 return 0;
1608
Mitchel Humpherys89924fd2015-07-09 14:50:22 -07001609 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001610 if (!ddev)
1611 return -ENODEV;
1612 ddev->dev = dev;
1613 dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
1614 if (!dir) {
1615 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
1616 dev_name(dev));
1617 goto err;
1618 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001619
Patrick Dalye4e39862015-11-20 20:00:50 -08001620 if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
1621 &iommu_debug_nr_iters_ops)) {
1622 pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
1623 dev_name(dev));
1624 goto err_rmdir;
1625 }
1626
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001627 if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
1628 &iommu_debug_profiling_fops)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001629 pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
1630 dev_name(dev));
1631 goto err_rmdir;
1632 }
1633
Mitchel Humpherys020f90f2015-10-02 16:02:31 -07001634 if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
1635 &iommu_debug_secure_profiling_fops)) {
1636 pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
1637 dev_name(dev));
1638 goto err_rmdir;
1639 }
1640
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -07001641 if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
1642 &iommu_debug_profiling_fast_fops)) {
1643 pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
1644 dev_name(dev));
1645 goto err_rmdir;
1646 }
1647
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -07001648 if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
1649 &iommu_debug_profiling_fast_dma_api_fops)) {
1650 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
1651 dev_name(dev));
1652 goto err_rmdir;
1653 }
1654
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001655 if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
1656 &iommu_debug_functional_fast_dma_api_fops)) {
1657 pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
1658 dev_name(dev));
1659 goto err_rmdir;
1660 }
1661
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001662 if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
1663 &iommu_debug_functional_arm_dma_api_fops)) {
1664 pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
1665 dev_name(dev));
1666 goto err_rmdir;
1667 }
1668
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001669 if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
1670 &iommu_debug_attach_fops)) {
1671 pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
1672 dev_name(dev));
1673 goto err_rmdir;
1674 }
1675
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001676 if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
1677 &iommu_debug_secure_attach_fops)) {
1678 pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
1679 dev_name(dev));
1680 goto err_rmdir;
1681 }
1682
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001683 if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
1684 &iommu_debug_atos_fops)) {
1685 pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
1686 dev_name(dev));
1687 goto err_rmdir;
1688 }
1689
1690 if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
1691 &iommu_debug_map_fops)) {
1692 pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
1693 dev_name(dev));
1694 goto err_rmdir;
1695 }
1696
1697 if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
1698 &iommu_debug_unmap_fops)) {
1699 pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
1700 dev_name(dev));
1701 goto err_rmdir;
1702 }
1703
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08001704 if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
1705 &iommu_debug_config_clocks_fops)) {
1706 pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
1707 dev_name(dev));
1708 goto err_rmdir;
1709 }
1710
Patrick Daly9438f322017-04-05 18:03:19 -07001711 if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
1712 &iommu_debug_trigger_fault_fops)) {
1713 pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
1714 dev_name(dev));
1715 goto err_rmdir;
1716 }
1717
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001718 list_add(&ddev->list, &iommu_debug_devices);
1719 return 0;
1720
1721err_rmdir:
1722 debugfs_remove_recursive(dir);
1723err:
1724 kfree(ddev);
1725 return 0;
1726}
1727
1728static int iommu_debug_init_tests(void)
1729{
1730 debugfs_tests_dir = debugfs_create_dir("tests",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001731 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001732 if (!debugfs_tests_dir) {
1733 pr_err("Couldn't create iommu/tests debugfs directory\n");
1734 return -ENODEV;
1735 }
1736
1737 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
1738 snarf_iommu_devices);
1739}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001740
1741static void iommu_debug_destroy_tests(void)
1742{
1743 debugfs_remove_recursive(debugfs_tests_dir);
1744}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001745#else
1746static inline int iommu_debug_init_tests(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001747static inline void iommu_debug_destroy_tests(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001748#endif
1749
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07001750/*
1751 * This isn't really a "driver", we just need something in the device tree
1752 * so that our tests can run without any client drivers, and our tests rely
1753 * on parsing the device tree for nodes with the `iommus' property.
1754 */
1755static int iommu_debug_pass(struct platform_device *pdev)
1756{
1757 return 0;
1758}
1759
1760static const struct of_device_id iommu_debug_of_match[] = {
1761 { .compatible = "iommu-debug-test" },
1762 { },
1763};
1764
1765static struct platform_driver iommu_debug_driver = {
1766 .probe = iommu_debug_pass,
1767 .remove = iommu_debug_pass,
1768 .driver = {
1769 .name = "iommu-debug",
1770 .of_match_table = iommu_debug_of_match,
1771 },
1772};
1773
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001774static int iommu_debug_init(void)
1775{
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001776 if (iommu_debug_init_tests())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001777 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001778
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07001779 return platform_driver_register(&iommu_debug_driver);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001780}
1781
1782static void iommu_debug_exit(void)
1783{
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07001784 platform_driver_unregister(&iommu_debug_driver);
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001785 iommu_debug_destroy_tests();
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001786}
1787
1788module_init(iommu_debug_init);
1789module_exit(iommu_debug_exit);