blob: 87a1244206bab192c48b9e01fe6dcfda9e43d35a [file] [log] [blame]
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001/*
Srinivasarao P90a23732019-01-08 13:52:13 +05302 * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
16
17#include <linux/debugfs.h>
18#include <linux/device.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/module.h>
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -070024#include <linux/uaccess.h>
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070025#include <linux/dma-contiguous.h>
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070026#include <soc/qcom/secure_buffer.h>
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -070027#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma-iommu.h>
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070030
Patrick Dalyffe1db02018-01-11 13:20:53 -080031#if defined(CONFIG_IOMMU_TESTS)
Susheel Khiania4417e72016-07-12 11:28:32 +053032
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070033static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
34{
35 switch (attr) {
36 case DOMAIN_ATTR_GEOMETRY:
37 return "DOMAIN_ATTR_GEOMETRY";
38 case DOMAIN_ATTR_PAGING:
39 return "DOMAIN_ATTR_PAGING";
40 case DOMAIN_ATTR_WINDOWS:
41 return "DOMAIN_ATTR_WINDOWS";
42 case DOMAIN_ATTR_FSL_PAMU_STASH:
43 return "DOMAIN_ATTR_FSL_PAMU_STASH";
44 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
45 return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
46 case DOMAIN_ATTR_FSL_PAMUV1:
47 return "DOMAIN_ATTR_FSL_PAMUV1";
48 case DOMAIN_ATTR_NESTING:
49 return "DOMAIN_ATTR_NESTING";
50 case DOMAIN_ATTR_PT_BASE_ADDR:
51 return "DOMAIN_ATTR_PT_BASE_ADDR";
52 case DOMAIN_ATTR_SECURE_VMID:
53 return "DOMAIN_ATTR_SECURE_VMID";
54 case DOMAIN_ATTR_ATOMIC:
55 return "DOMAIN_ATTR_ATOMIC";
56 case DOMAIN_ATTR_CONTEXT_BANK:
57 return "DOMAIN_ATTR_CONTEXT_BANK";
58 case DOMAIN_ATTR_TTBR0:
59 return "DOMAIN_ATTR_TTBR0";
60 case DOMAIN_ATTR_CONTEXTIDR:
61 return "DOMAIN_ATTR_CONTEXTIDR";
62 case DOMAIN_ATTR_PROCID:
63 return "DOMAIN_ATTR_PROCID";
64 case DOMAIN_ATTR_DYNAMIC:
65 return "DOMAIN_ATTR_DYNAMIC";
66 case DOMAIN_ATTR_NON_FATAL_FAULTS:
67 return "DOMAIN_ATTR_NON_FATAL_FAULTS";
68 case DOMAIN_ATTR_S1_BYPASS:
69 return "DOMAIN_ATTR_S1_BYPASS";
70 case DOMAIN_ATTR_FAST:
71 return "DOMAIN_ATTR_FAST";
Patrick Dalyef6c1dc2016-11-16 14:35:23 -080072 case DOMAIN_ATTR_EARLY_MAP:
73 return "DOMAIN_ATTR_EARLY_MAP";
Charan Teja Reddyc682e472017-04-20 19:11:20 +053074 case DOMAIN_ATTR_CB_STALL_DISABLE:
75 return "DOMAIN_ATTR_CB_STALL_DISABLE";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070076 default:
77 return "Unknown attr!";
78 }
79}
Susheel Khiania4417e72016-07-12 11:28:32 +053080#endif
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070081
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070082#ifdef CONFIG_IOMMU_DEBUG_TRACKING
83
84static DEFINE_MUTEX(iommu_debug_attachments_lock);
85static LIST_HEAD(iommu_debug_attachments);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070086
Patrick Dalyee7a25f2017-04-05 18:05:02 -070087/*
88 * Each group may have more than one domain; but each domain may
89 * only have one group.
90 * Used by debug tools to display the name of the device(s) associated
91 * with a particular domain.
92 */
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070093struct iommu_debug_attachment {
94 struct iommu_domain *domain;
Patrick Dalyee7a25f2017-04-05 18:05:02 -070095 struct iommu_group *group;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070096 struct list_head list;
97};
98
Susheel Khianie66aa5b2015-08-25 17:25:42 +053099void iommu_debug_attach_device(struct iommu_domain *domain,
100 struct device *dev)
101{
102 struct iommu_debug_attachment *attach;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700103 struct iommu_group *group;
104
Patrick Daly35af1bb2017-09-29 16:09:05 -0700105 group = dev->iommu_group;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700106 if (!group)
107 return;
108
Patrick Daly35af1bb2017-09-29 16:09:05 -0700109 mutex_lock(&iommu_debug_attachments_lock);
110 list_for_each_entry(attach, &iommu_debug_attachments, list)
111 if ((attach->domain == domain) && (attach->group == group))
112 goto out;
113
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700114 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
115 if (!attach)
Patrick Daly35af1bb2017-09-29 16:09:05 -0700116 goto out;
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700117
118 attach->domain = domain;
119 attach->group = group;
120 INIT_LIST_HEAD(&attach->list);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530121
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700122 list_add(&attach->list, &iommu_debug_attachments);
Patrick Daly35af1bb2017-09-29 16:09:05 -0700123out:
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700124 mutex_unlock(&iommu_debug_attachments_lock);
125}
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530126
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700127void iommu_debug_domain_remove(struct iommu_domain *domain)
128{
129 struct iommu_debug_attachment *it, *tmp;
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530130
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700131 mutex_lock(&iommu_debug_attachments_lock);
132 list_for_each_entry_safe(it, tmp, &iommu_debug_attachments, list) {
133 if (it->domain != domain)
134 continue;
135 list_del(&it->list);
Patrick Dalyee7a25f2017-04-05 18:05:02 -0700136 kfree(it);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530137 }
138
139 mutex_unlock(&iommu_debug_attachments_lock);
140}
141
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700142#endif
143
144#ifdef CONFIG_IOMMU_TESTS
145
Susheel Khiania4417e72016-07-12 11:28:32 +0530146#ifdef CONFIG_64BIT
147
148#define kstrtoux kstrtou64
Patrick Daly9ef01862016-10-13 20:03:50 -0700149#define kstrtox_from_user kstrtoull_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530150#define kstrtosize_t kstrtoul
151
152#else
153
154#define kstrtoux kstrtou32
Patrick Daly9ef01862016-10-13 20:03:50 -0700155#define kstrtox_from_user kstrtouint_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530156#define kstrtosize_t kstrtouint
157
158#endif
159
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700160static LIST_HEAD(iommu_debug_devices);
161static struct dentry *debugfs_tests_dir;
Patrick Dalye4e39862015-11-20 20:00:50 -0800162static u32 iters_per_op = 1;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700163static void *test_virt_addr;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700164
165struct iommu_debug_device {
166 struct device *dev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700167 struct iommu_domain *domain;
Patrick Daly727fcc62017-11-13 19:27:08 -0800168 struct dma_iommu_mapping *mapping;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700169 u64 iova;
170 u64 phys;
171 size_t len;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700172 struct list_head list;
Liam Mark8db5dd22017-05-17 17:03:48 -0700173 struct mutex clk_lock;
174 unsigned int clk_count;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700175};
176
177static int iommu_debug_build_phoney_sg_table(struct device *dev,
178 struct sg_table *table,
179 unsigned long total_size,
180 unsigned long chunk_size)
181{
182 unsigned long nents = total_size / chunk_size;
183 struct scatterlist *sg;
184 int i;
185 struct page *page;
186
187 if (!IS_ALIGNED(total_size, PAGE_SIZE))
188 return -EINVAL;
189 if (!IS_ALIGNED(total_size, chunk_size))
190 return -EINVAL;
191 if (sg_alloc_table(table, nents, GFP_KERNEL))
192 return -EINVAL;
193 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
194 if (!page)
195 goto free_table;
196
197 /* all the same page... why not. */
198 for_each_sg(table->sgl, sg, table->nents, i)
199 sg_set_page(sg, page, chunk_size, 0);
200
201 return 0;
202
203free_table:
204 sg_free_table(table);
205 return -ENOMEM;
206}
207
208static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
209 struct sg_table *table,
210 unsigned long chunk_size)
211{
212 __free_pages(sg_page(table->sgl), get_order(chunk_size));
213 sg_free_table(table);
214}
215
216static const char * const _size_to_string(unsigned long size)
217{
218 switch (size) {
219 case SZ_4K:
220 return "4K";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700221 case SZ_8K:
222 return "8K";
223 case SZ_16K:
224 return "16K";
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700225 case SZ_64K:
226 return "64K";
227 case SZ_2M:
228 return "2M";
229 case SZ_1M * 12:
230 return "12M";
231 case SZ_1M * 20:
232 return "20M";
233 }
234 return "unknown size, please add to _size_to_string";
235}
236
Patrick Dalye4e39862015-11-20 20:00:50 -0800237static int nr_iters_set(void *data, u64 val)
238{
239 if (!val)
240 val = 1;
241 if (val > 10000)
242 val = 10000;
243 *(u32 *)data = val;
244 return 0;
245}
246
247static int nr_iters_get(void *data, u64 *val)
248{
249 *val = *(u32 *)data;
250 return 0;
251}
252
253DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
254 nr_iters_get, nr_iters_set, "%llu\n");
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700255
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700256static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700257 enum iommu_attr attrs[],
258 void *attr_values[], int nattrs,
Susheel Khiania4417e72016-07-12 11:28:32 +0530259 const size_t sizes[])
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700260{
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700261 int i;
Susheel Khiania4417e72016-07-12 11:28:32 +0530262 const size_t *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700263 struct iommu_domain *domain;
264 unsigned long iova = 0x10000;
265 phys_addr_t paddr = 0xa000;
266
267 domain = iommu_domain_alloc(&platform_bus_type);
268 if (!domain) {
269 seq_puts(s, "Couldn't allocate domain\n");
270 return;
271 }
272
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700273 seq_puts(s, "Domain attributes: [ ");
274 for (i = 0; i < nattrs; ++i) {
275 /* not all attrs are ints, but this will get us by for now */
276 seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
277 *((int *)attr_values[i]),
278 i < nattrs ? " " : "");
Mitchel Humpherys679567c2015-08-28 10:51:24 -0700279 }
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700280 seq_puts(s, "]\n");
281 for (i = 0; i < nattrs; ++i) {
282 if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
283 seq_printf(s, "Couldn't set %d to the value at %p\n",
284 attrs[i], attr_values[i]);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700285 goto out_domain_free;
286 }
287 }
288
Patrick Daly6dd80252017-04-17 20:41:59 -0700289 if (iommu_attach_group(domain, dev->iommu_group)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700290 seq_puts(s,
291 "Couldn't attach new domain to device. Is it already attached?\n");
292 goto out_domain_free;
293 }
294
Patrick Dalye4e39862015-11-20 20:00:50 -0800295 seq_printf(s, "(average over %d iterations)\n", iters_per_op);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800296 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700297 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530298 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700299 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800300 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700301 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800302 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700303 struct timespec tbefore, tafter, diff;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700304 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700305
Patrick Dalye4e39862015-11-20 20:00:50 -0800306 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700307 getnstimeofday(&tbefore);
308 if (iommu_map(domain, iova, paddr, size,
309 IOMMU_READ | IOMMU_WRITE)) {
310 seq_puts(s, "Failed to map\n");
311 continue;
312 }
313 getnstimeofday(&tafter);
314 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800315 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700316
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700317 getnstimeofday(&tbefore);
318 unmapped = iommu_unmap(domain, iova, size);
319 if (unmapped != size) {
320 seq_printf(s,
321 "Only unmapped %zx instead of %zx\n",
322 unmapped, size);
323 continue;
324 }
325 getnstimeofday(&tafter);
326 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800327 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700328 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700329
Susheel Khiania4417e72016-07-12 11:28:32 +0530330 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
331 &map_elapsed_rem);
332 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
333 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700334
Patrick Daly3ca31e32015-11-20 20:33:04 -0800335 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
336 &map_elapsed_rem);
337 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
338 &unmap_elapsed_rem);
339
340 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
341 _size_to_string(size),
342 map_elapsed_us, map_elapsed_rem,
343 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700344 }
345
346 seq_putc(s, '\n');
Patrick Daly3ca31e32015-11-20 20:33:04 -0800347 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700348 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530349 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700350 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800351 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700352 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800353 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700354 struct timespec tbefore, tafter, diff;
355 struct sg_table table;
356 unsigned long chunk_size = SZ_4K;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700357 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700358
359 if (iommu_debug_build_phoney_sg_table(dev, &table, size,
360 chunk_size)) {
361 seq_puts(s,
362 "couldn't build phoney sg table! bailing...\n");
363 goto out_detach;
364 }
365
Patrick Dalye4e39862015-11-20 20:00:50 -0800366 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700367 getnstimeofday(&tbefore);
368 if (iommu_map_sg(domain, iova, table.sgl, table.nents,
369 IOMMU_READ | IOMMU_WRITE) != size) {
370 seq_puts(s, "Failed to map_sg\n");
371 goto next;
372 }
373 getnstimeofday(&tafter);
374 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800375 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700376
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700377 getnstimeofday(&tbefore);
378 unmapped = iommu_unmap(domain, iova, size);
379 if (unmapped != size) {
380 seq_printf(s,
381 "Only unmapped %zx instead of %zx\n",
382 unmapped, size);
383 goto next;
384 }
385 getnstimeofday(&tafter);
386 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800387 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700388 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700389
Susheel Khiania4417e72016-07-12 11:28:32 +0530390 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
391 &map_elapsed_rem);
392 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
393 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700394
Patrick Daly3ca31e32015-11-20 20:33:04 -0800395 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
396 &map_elapsed_rem);
397 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
398 &unmap_elapsed_rem);
399
400 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
401 _size_to_string(size),
402 map_elapsed_us, map_elapsed_rem,
403 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700404
405next:
406 iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
407 }
408
409out_detach:
Patrick Daly6dd80252017-04-17 20:41:59 -0700410 iommu_detach_group(domain, dev->iommu_group);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700411out_domain_free:
412 iommu_domain_free(domain);
413}
414
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700415static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700416{
417 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530418 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700419 SZ_1M * 20, 0 };
420 enum iommu_attr attrs[] = {
421 DOMAIN_ATTR_ATOMIC,
422 };
423 int htw_disable = 1, atomic = 1;
424 void *attr_values[] = { &htw_disable, &atomic };
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700425
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700426 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
427 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700428
429 return 0;
430}
431
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700432static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700433{
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700434 return single_open(file, iommu_debug_profiling_show, inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700435}
436
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700437static const struct file_operations iommu_debug_profiling_fops = {
438 .open = iommu_debug_profiling_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700439 .read = seq_read,
440 .llseek = seq_lseek,
441 .release = single_release,
442};
443
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700444static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
445{
446 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530447 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700448 SZ_1M * 20, 0 };
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700449
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700450 enum iommu_attr attrs[] = {
451 DOMAIN_ATTR_ATOMIC,
452 DOMAIN_ATTR_SECURE_VMID,
453 };
454 int one = 1, secure_vmid = VMID_CP_PIXEL;
455 void *attr_values[] = { &one, &secure_vmid };
456
457 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
458 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700459
460 return 0;
461}
462
463static int iommu_debug_secure_profiling_open(struct inode *inode,
464 struct file *file)
465{
466 return single_open(file, iommu_debug_secure_profiling_show,
467 inode->i_private);
468}
469
470static const struct file_operations iommu_debug_secure_profiling_fops = {
471 .open = iommu_debug_secure_profiling_open,
472 .read = seq_read,
473 .llseek = seq_lseek,
474 .release = single_release,
475};
476
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700477static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
478{
479 struct iommu_debug_device *ddev = s->private;
480 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
481 enum iommu_attr attrs[] = {
482 DOMAIN_ATTR_FAST,
483 DOMAIN_ATTR_ATOMIC,
484 };
485 int one = 1;
486 void *attr_values[] = { &one, &one };
487
488 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
489 ARRAY_SIZE(attrs), sizes);
490
491 return 0;
492}
493
494static int iommu_debug_profiling_fast_open(struct inode *inode,
495 struct file *file)
496{
497 return single_open(file, iommu_debug_profiling_fast_show,
498 inode->i_private);
499}
500
501static const struct file_operations iommu_debug_profiling_fast_fops = {
502 .open = iommu_debug_profiling_fast_open,
503 .read = seq_read,
504 .llseek = seq_lseek,
505 .release = single_release,
506};
507
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700508static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
509 void *ignored)
510{
511 int i, experiment;
512 struct iommu_debug_device *ddev = s->private;
513 struct device *dev = ddev->dev;
514 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
515 struct dma_iommu_mapping *mapping;
516 dma_addr_t dma_addr;
517 void *virt;
518 int fast = 1;
519 const char * const extra_labels[] = {
520 "not coherent",
521 "coherent",
522 };
523 unsigned long extra_attrs[] = {
524 0,
525 DMA_ATTR_SKIP_CPU_SYNC,
526 };
527
528 virt = kmalloc(1518, GFP_KERNEL);
529 if (!virt)
530 goto out;
531
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530532 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700533 if (!mapping) {
534 seq_puts(s, "fast_smmu_create_mapping failed\n");
535 goto out_kfree;
536 }
537
538 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
539 seq_puts(s, "iommu_domain_set_attr failed\n");
540 goto out_release_mapping;
541 }
542
543 if (arm_iommu_attach_device(dev, mapping)) {
544 seq_puts(s, "fast_smmu_attach_device failed\n");
545 goto out_release_mapping;
546 }
547
548 if (iommu_enable_config_clocks(mapping->domain)) {
549 seq_puts(s, "Couldn't enable clocks\n");
550 goto out_detach;
551 }
552 for (experiment = 0; experiment < 2; ++experiment) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530553 size_t map_avg = 0, unmap_avg = 0;
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700554
555 for (i = 0; i < 10; ++i) {
556 struct timespec tbefore, tafter, diff;
557 u64 ns;
558
559 getnstimeofday(&tbefore);
560 dma_addr = dma_map_single_attrs(
561 dev, virt, SZ_4K, DMA_TO_DEVICE,
562 extra_attrs[experiment]);
563 getnstimeofday(&tafter);
564 diff = timespec_sub(tafter, tbefore);
565 ns = timespec_to_ns(&diff);
566 if (dma_mapping_error(dev, dma_addr)) {
567 seq_puts(s, "dma_map_single failed\n");
568 goto out_disable_config_clocks;
569 }
570 map_elapsed_ns[i] = ns;
571
572 getnstimeofday(&tbefore);
573 dma_unmap_single_attrs(
574 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
575 extra_attrs[experiment]);
576 getnstimeofday(&tafter);
577 diff = timespec_sub(tafter, tbefore);
578 ns = timespec_to_ns(&diff);
579 unmap_elapsed_ns[i] = ns;
580 }
581
582 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
583 "dma_map_single_attrs");
584 for (i = 0; i < 10; ++i) {
585 map_avg += map_elapsed_ns[i];
586 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
587 i < 9 ? ", " : "");
588 }
589 map_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530590 seq_printf(s, "] (avg: %zu)\n", map_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700591
592 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
593 "dma_unmap_single_attrs");
594 for (i = 0; i < 10; ++i) {
595 unmap_avg += unmap_elapsed_ns[i];
596 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
597 i < 9 ? ", " : "");
598 }
599 unmap_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530600 seq_printf(s, "] (avg: %zu)\n", unmap_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700601 }
602
603out_disable_config_clocks:
604 iommu_disable_config_clocks(mapping->domain);
605out_detach:
606 arm_iommu_detach_device(dev);
607out_release_mapping:
608 arm_iommu_release_mapping(mapping);
609out_kfree:
610 kfree(virt);
611out:
612 return 0;
613}
614
615static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
616 struct file *file)
617{
618 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
619 inode->i_private);
620}
621
622static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
623 .open = iommu_debug_profiling_fast_dma_api_open,
624 .read = seq_read,
625 .llseek = seq_lseek,
626 .release = single_release,
627};
628
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800629static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
630{
631 int i, ret = 0;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530632 u64 iova;
633 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800634 void *virt;
635 phys_addr_t phys;
636 dma_addr_t dma_addr;
637
638 /*
639 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
640 * chunk that we can work with.
641 */
642 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
643 phys = virt_to_phys(virt);
644
645 /* fill the whole 4GB space */
646 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
647 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
648 if (dma_addr == DMA_ERROR_CODE) {
649 dev_err(dev, "Failed map on iter %d\n", i);
650 ret = -EINVAL;
651 goto out;
652 }
653 }
654
655 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
656 dev_err(dev,
657 "dma_map_single unexpectedly (VA should have been exhausted)\n");
658 ret = -EINVAL;
659 goto out;
660 }
661
662 /*
663 * free up 4K at the very beginning, then leave one 4K mapping,
664 * then free up 8K. This will result in the next 8K map to skip
665 * over the 4K hole and take the 8K one.
666 */
667 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
668 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
669 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
670
671 /* remap 8K */
672 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
673 if (dma_addr != SZ_8K) {
674 dma_addr_t expected = SZ_8K;
675
676 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
677 &dma_addr, &expected);
678 ret = -EINVAL;
679 goto out;
680 }
681
682 /*
683 * now remap 4K. We should get the first 4K chunk that was skipped
684 * over during the previous 8K map. If we missed a TLB invalidate
685 * at that point this should explode.
686 */
687 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
688 if (dma_addr != 0) {
689 dma_addr_t expected = 0;
690
691 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
692 &dma_addr, &expected);
693 ret = -EINVAL;
694 goto out;
695 }
696
697 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
698 dev_err(dev,
699 "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
700 ret = -EINVAL;
701 goto out;
702 }
703
704 /* we're all full again. unmap everything. */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530705 for (iova = 0; iova < max; iova += SZ_8K)
706 dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800707
708out:
709 free_pages((unsigned long)virt, get_order(SZ_8K));
710 return ret;
711}
712
713struct fib_state {
714 unsigned long cur;
715 unsigned long prev;
716};
717
718static void fib_init(struct fib_state *f)
719{
720 f->cur = f->prev = 1;
721}
722
723static unsigned long get_next_fib(struct fib_state *f)
724{
725 int next = f->cur + f->prev;
726
727 f->prev = f->cur;
728 f->cur = next;
729 return next;
730}
731
732/*
733 * Not actually random. Just testing the fibs (and max - the fibs).
734 */
735static int __rand_va_sweep(struct device *dev, struct seq_file *s,
736 const size_t size)
737{
738 u64 iova;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530739 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800740 int i, remapped, unmapped, ret = 0;
741 void *virt;
742 dma_addr_t dma_addr, dma_addr2;
743 struct fib_state fib;
744
745 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
746 if (!virt) {
747 if (size > SZ_8K) {
748 dev_err(dev,
749 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
750 _size_to_string(size));
751 return 0;
752 }
753 return -ENOMEM;
754 }
755
756 /* fill the whole 4GB space */
757 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
758 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
759 if (dma_addr == DMA_ERROR_CODE) {
760 dev_err(dev, "Failed map on iter %d\n", i);
761 ret = -EINVAL;
762 goto out;
763 }
764 }
765
766 /* now unmap "random" iovas */
767 unmapped = 0;
768 fib_init(&fib);
769 for (iova = get_next_fib(&fib) * size;
770 iova < max - size;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530771 iova = (u64)get_next_fib(&fib) * size) {
772 dma_addr = (dma_addr_t)(iova);
773 dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800774 if (dma_addr == dma_addr2) {
775 WARN(1,
776 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
777 __func__);
778 return -EINVAL;
779 }
780 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
781 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
782 unmapped += 2;
783 }
784
785 /* and map until everything fills back up */
786 for (remapped = 0; ; ++remapped) {
787 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
788 if (dma_addr == DMA_ERROR_CODE)
789 break;
790 }
791
792 if (unmapped != remapped) {
793 dev_err(dev,
794 "Unexpected random remap count! Unmapped %d but remapped %d\n",
795 unmapped, remapped);
796 ret = -EINVAL;
797 }
798
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530799 for (iova = 0; iova < max; iova += size)
800 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800801
802out:
803 free_pages((unsigned long)virt, get_order(size));
804 return ret;
805}
806
807static int __check_mapping(struct device *dev, struct iommu_domain *domain,
808 dma_addr_t iova, phys_addr_t expected)
809{
810 phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
811 phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
812
813 WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
814
815 if (res != expected) {
816 dev_err_ratelimited(dev,
817 "Bad translation for %pa! Expected: %pa Got: %pa\n",
818 &iova, &expected, &res);
819 return -EINVAL;
820 }
821
822 return 0;
823}
824
825static int __full_va_sweep(struct device *dev, struct seq_file *s,
826 const size_t size, struct iommu_domain *domain)
827{
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530828 u64 iova;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800829 dma_addr_t dma_addr;
830 void *virt;
831 phys_addr_t phys;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530832 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800833 int ret = 0, i;
834
835 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
836 if (!virt) {
837 if (size > SZ_8K) {
838 dev_err(dev,
839 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
840 _size_to_string(size));
841 return 0;
842 }
843 return -ENOMEM;
844 }
845 phys = virt_to_phys(virt);
846
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530847 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800848 unsigned long expected = iova;
849
850 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
851 if (dma_addr != expected) {
852 dev_err_ratelimited(dev,
853 "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
854 i, expected,
855 (unsigned long)dma_addr);
856 ret = -EINVAL;
857 goto out;
858 }
859 }
860
861 if (domain) {
862 /* check every mapping from 0..6M */
863 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
864 phys_addr_t expected = phys;
865
866 if (__check_mapping(dev, domain, iova, expected)) {
867 dev_err(dev, "iter: %d\n", i);
868 ret = -EINVAL;
869 goto out;
870 }
871 }
872 /* and from 4G..4G-6M */
873 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
874 phys_addr_t expected = phys;
875 unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
876
877 if (__check_mapping(dev, domain, theiova, expected)) {
878 dev_err(dev, "iter: %d\n", i);
879 ret = -EINVAL;
880 goto out;
881 }
882 }
883 }
884
885 /* at this point, our VA space should be full */
886 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
887 if (dma_addr != DMA_ERROR_CODE) {
888 dev_err_ratelimited(dev,
889 "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
890 (unsigned long)dma_addr);
891 ret = -EINVAL;
892 }
893
894out:
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530895 for (iova = 0; iova < max; iova += size)
896 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800897
898 free_pages((unsigned long)virt, get_order(size));
899 return ret;
900}
901
902#define ds_printf(d, s, fmt, ...) ({ \
903 dev_err(d, fmt, ##__VA_ARGS__); \
904 seq_printf(s, fmt, ##__VA_ARGS__); \
905 })
906
907static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
908 struct iommu_domain *domain, void *priv)
909{
910 int i, j, ret = 0;
911 size_t *sz, *sizes = priv;
912
913 for (j = 0; j < 1; ++j) {
914 for (sz = sizes; *sz; ++sz) {
915 for (i = 0; i < 2; ++i) {
916 ds_printf(dev, s, "Full VA sweep @%s %d",
917 _size_to_string(*sz), i);
918 if (__full_va_sweep(dev, s, *sz, domain)) {
919 ds_printf(dev, s, " -> FAILED\n");
920 ret = -EINVAL;
921 } else {
922 ds_printf(dev, s, " -> SUCCEEDED\n");
923 }
924 }
925 }
926 }
927
928 ds_printf(dev, s, "bonus map:");
929 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
930 ds_printf(dev, s, " -> FAILED\n");
931 ret = -EINVAL;
932 } else {
933 ds_printf(dev, s, " -> SUCCEEDED\n");
934 }
935
936 for (sz = sizes; *sz; ++sz) {
937 for (i = 0; i < 2; ++i) {
938 ds_printf(dev, s, "Rand VA sweep @%s %d",
939 _size_to_string(*sz), i);
940 if (__rand_va_sweep(dev, s, *sz)) {
941 ds_printf(dev, s, " -> FAILED\n");
942 ret = -EINVAL;
943 } else {
944 ds_printf(dev, s, " -> SUCCEEDED\n");
945 }
946 }
947 }
948
949 ds_printf(dev, s, "TLB stress sweep");
950 if (__tlb_stress_sweep(dev, s)) {
951 ds_printf(dev, s, " -> FAILED\n");
952 ret = -EINVAL;
953 } else {
954 ds_printf(dev, s, " -> SUCCEEDED\n");
955 }
956
957 ds_printf(dev, s, "second bonus map:");
958 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
959 ds_printf(dev, s, " -> FAILED\n");
960 ret = -EINVAL;
961 } else {
962 ds_printf(dev, s, " -> SUCCEEDED\n");
963 }
964
965 return ret;
966}
967
968static int __functional_dma_api_alloc_test(struct device *dev,
969 struct seq_file *s,
970 struct iommu_domain *domain,
971 void *ignored)
972{
973 size_t size = SZ_1K * 742;
974 int ret = 0;
975 u8 *data;
976 dma_addr_t iova;
977
978 /* Make sure we can allocate and use a buffer */
979 ds_printf(dev, s, "Allocating coherent buffer");
980 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
981 if (!data) {
982 ds_printf(dev, s, " -> FAILED\n");
983 ret = -EINVAL;
984 } else {
985 int i;
986
987 ds_printf(dev, s, " -> SUCCEEDED\n");
988 ds_printf(dev, s, "Using coherent buffer");
989 for (i = 0; i < 742; ++i) {
990 int ind = SZ_1K * i;
991 u8 *p = data + ind;
992 u8 val = i % 255;
993
994 memset(data, 0xa5, size);
995 *p = val;
996 (*p)++;
997 if ((*p) != val + 1) {
998 ds_printf(dev, s,
999 " -> FAILED on iter %d since %d != %d\n",
1000 i, *p, val + 1);
1001 ret = -EINVAL;
1002 }
1003 }
1004 if (!ret)
1005 ds_printf(dev, s, " -> SUCCEEDED\n");
1006 dma_free_coherent(dev, size, data, iova);
1007 }
1008
1009 return ret;
1010}
1011
1012static int __functional_dma_api_basic_test(struct device *dev,
1013 struct seq_file *s,
1014 struct iommu_domain *domain,
1015 void *ignored)
1016{
1017 size_t size = 1518;
1018 int i, j, ret = 0;
1019 u8 *data;
1020 dma_addr_t iova;
1021 phys_addr_t pa, pa2;
1022
1023 ds_printf(dev, s, "Basic DMA API test");
1024 /* Make sure we can allocate and use a buffer */
1025 for (i = 0; i < 1000; ++i) {
1026 data = kmalloc(size, GFP_KERNEL);
1027 if (!data) {
1028 ds_printf(dev, s, " -> FAILED\n");
1029 ret = -EINVAL;
1030 goto out;
1031 }
1032 memset(data, 0xa5, size);
1033 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
1034 pa = iommu_iova_to_phys(domain, iova);
1035 pa2 = iommu_iova_to_phys_hard(domain, iova);
1036 if (pa != pa2) {
1037 dev_err(dev,
1038 "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
1039 &pa, &pa2);
1040 ret = -EINVAL;
1041 goto out;
1042 }
1043 pa2 = virt_to_phys(data);
1044 if (pa != pa2) {
1045 dev_err(dev,
1046 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
1047 &pa, &pa2);
1048 ret = -EINVAL;
1049 goto out;
1050 }
1051 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
1052 for (j = 0; j < size; ++j) {
1053 if (data[j] != 0xa5) {
1054 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
1055 ret = -EINVAL;
1056 goto out;
1057 }
1058 }
1059 kfree(data);
1060 }
1061
1062out:
1063 if (ret)
1064 ds_printf(dev, s, " -> FAILED\n");
1065 else
1066 ds_printf(dev, s, " -> SUCCEEDED\n");
1067
1068 return ret;
1069}
1070
1071/* Creates a fresh fast mapping and applies @fn to it */
1072static int __apply_to_new_mapping(struct seq_file *s,
1073 int (*fn)(struct device *dev,
1074 struct seq_file *s,
1075 struct iommu_domain *domain,
1076 void *priv),
1077 void *priv)
1078{
1079 struct dma_iommu_mapping *mapping;
1080 struct iommu_debug_device *ddev = s->private;
1081 struct device *dev = ddev->dev;
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301082 int ret = -EINVAL, fast = 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001083 phys_addr_t pt_phys;
1084
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301085 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1086 (SZ_1G * 4ULL));
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001087 if (!mapping)
1088 goto out;
1089
1090 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
1091 seq_puts(s, "iommu_domain_set_attr failed\n");
1092 goto out_release_mapping;
1093 }
1094
1095 if (arm_iommu_attach_device(dev, mapping))
1096 goto out_release_mapping;
1097
1098 if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
1099 &pt_phys)) {
1100 ds_printf(dev, s, "Couldn't get page table base address\n");
1101 goto out_release_mapping;
1102 }
1103
1104 dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
1105 if (iommu_enable_config_clocks(mapping->domain)) {
1106 ds_printf(dev, s, "Couldn't enable clocks\n");
1107 goto out_release_mapping;
1108 }
1109 ret = fn(dev, s, mapping->domain, priv);
1110 iommu_disable_config_clocks(mapping->domain);
1111
1112 arm_iommu_detach_device(dev);
1113out_release_mapping:
1114 arm_iommu_release_mapping(mapping);
1115out:
1116 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1117 return 0;
1118}
1119
1120static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
1121 void *ignored)
1122{
1123 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
1124 int ret = 0;
1125
1126 ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
1127 ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
1128 ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
1129 return ret;
1130}
1131
1132static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
1133 struct file *file)
1134{
1135 return single_open(file, iommu_debug_functional_fast_dma_api_show,
1136 inode->i_private);
1137}
1138
1139static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
1140 .open = iommu_debug_functional_fast_dma_api_open,
1141 .read = seq_read,
1142 .llseek = seq_lseek,
1143 .release = single_release,
1144};
1145
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001146static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
1147 void *ignored)
1148{
1149 struct dma_iommu_mapping *mapping;
1150 struct iommu_debug_device *ddev = s->private;
1151 struct device *dev = ddev->dev;
1152 size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
1153 int ret = -EINVAL;
1154
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301155 /* Make the size equal to MAX_ULONG */
1156 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1157 (SZ_1G * 4ULL - 1));
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001158 if (!mapping)
1159 goto out;
1160
1161 if (arm_iommu_attach_device(dev, mapping))
1162 goto out_release_mapping;
1163
1164 ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
1165 ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
1166
1167 arm_iommu_detach_device(dev);
1168out_release_mapping:
1169 arm_iommu_release_mapping(mapping);
1170out:
1171 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1172 return 0;
1173}
1174
1175static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
1176 struct file *file)
1177{
1178 return single_open(file, iommu_debug_functional_arm_dma_api_show,
1179 inode->i_private);
1180}
1181
1182static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
1183 .open = iommu_debug_functional_arm_dma_api_open,
1184 .read = seq_read,
1185 .llseek = seq_lseek,
1186 .release = single_release,
1187};
1188
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001189static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
1190 int val, bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001191{
Patrick Daly6dd80252017-04-17 20:41:59 -07001192 struct iommu_group *group = ddev->dev->iommu_group;
1193
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001194 ddev->domain = iommu_domain_alloc(&platform_bus_type);
1195 if (!ddev->domain) {
1196 pr_err("Couldn't allocate domain\n");
1197 return -ENOMEM;
1198 }
1199
Liam Mark8db5dd22017-05-17 17:03:48 -07001200 val = VMID_CP_CAMERA;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001201 if (is_secure && iommu_domain_set_attr(ddev->domain,
1202 DOMAIN_ATTR_SECURE_VMID,
1203 &val)) {
1204 pr_err("Couldn't set secure vmid to %d\n", val);
1205 goto out_domain_free;
1206 }
1207
Patrick Daly6dd80252017-04-17 20:41:59 -07001208 if (iommu_attach_group(ddev->domain, group)) {
1209 dev_err(ddev->dev, "Couldn't attach new domain to device\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001210 goto out_domain_free;
1211 }
1212
1213 return 0;
1214
1215out_domain_free:
1216 iommu_domain_free(ddev->domain);
1217 ddev->domain = NULL;
1218 return -EIO;
1219}
1220
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001221static ssize_t __iommu_debug_dma_attach_write(struct file *file,
1222 const char __user *ubuf,
1223 size_t count, loff_t *offset)
1224{
1225 struct iommu_debug_device *ddev = file->private_data;
1226 struct device *dev = ddev->dev;
1227 struct dma_iommu_mapping *dma_mapping;
1228 ssize_t retval = -EINVAL;
1229 int val;
1230
1231 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1232 pr_err("Invalid format. Expected a hex or decimal integer");
1233 retval = -EFAULT;
1234 goto out;
1235 }
1236
1237 if (val) {
1238 if (dev->archdata.mapping)
1239 if (dev->archdata.mapping->domain) {
1240 pr_err("Already attached.\n");
1241 retval = -EINVAL;
1242 goto out;
1243 }
1244 if (WARN(dev->archdata.iommu,
1245 "Attachment tracking out of sync with device\n")) {
1246 retval = -EINVAL;
1247 goto out;
1248 }
1249
1250 dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1251 (SZ_1G * 4ULL));
1252
1253 if (!dma_mapping)
1254 goto out;
1255
1256 if (arm_iommu_attach_device(dev, dma_mapping))
1257 goto out_release_mapping;
Patrick Daly727fcc62017-11-13 19:27:08 -08001258
1259 ddev->mapping = dma_mapping;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001260 pr_err("Attached\n");
1261 } else {
1262 if (!dev->archdata.mapping) {
1263 pr_err("No mapping. Did you already attach?\n");
1264 retval = -EINVAL;
1265 goto out;
1266 }
1267 if (!dev->archdata.mapping->domain) {
1268 pr_err("No domain. Did you already attach?\n");
1269 retval = -EINVAL;
1270 goto out;
1271 }
1272 arm_iommu_detach_device(dev);
Patrick Daly727fcc62017-11-13 19:27:08 -08001273 arm_iommu_release_mapping(ddev->mapping);
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001274 pr_err("Detached\n");
1275 }
1276 retval = count;
1277 return retval;
1278
1279out_release_mapping:
1280 arm_iommu_release_mapping(dma_mapping);
1281out:
1282 return retval;
1283}
1284
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001285static ssize_t __iommu_debug_attach_write(struct file *file,
1286 const char __user *ubuf,
1287 size_t count, loff_t *offset,
1288 bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001289{
1290 struct iommu_debug_device *ddev = file->private_data;
Patrick Daly6dd80252017-04-17 20:41:59 -07001291 struct device *dev = ddev->dev;
1292 struct iommu_domain *domain;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001293 ssize_t retval;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001294 int val;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001295
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001296 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1297 pr_err("Invalid format. Expected a hex or decimal integer");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001298 retval = -EFAULT;
1299 goto out;
1300 }
1301
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001302 if (val) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001303 if (ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001304 pr_err("Iommu-Debug is already attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001305 retval = -EINVAL;
1306 goto out;
1307 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001308
1309 domain = iommu_get_domain_for_dev(dev);
1310 if (domain) {
1311 pr_err("Another driver is using this device's iommu\n"
1312 "Iommu-Debug cannot be used concurrently\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001313 retval = -EINVAL;
1314 goto out;
1315 }
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001316 if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001317 retval = -EIO;
1318 goto out;
1319 }
1320 pr_err("Attached\n");
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001321 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001322 if (!ddev->domain) {
Patrick Daly6dd80252017-04-17 20:41:59 -07001323 pr_err("Iommu-Debug is not attached?\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001324 retval = -EINVAL;
1325 goto out;
1326 }
Patrick Daly6dd80252017-04-17 20:41:59 -07001327 iommu_detach_group(ddev->domain, dev->iommu_group);
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001328 iommu_domain_free(ddev->domain);
1329 ddev->domain = NULL;
1330 pr_err("Detached\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001331 }
1332
1333 retval = count;
1334out:
1335 return retval;
1336}
1337
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001338static ssize_t iommu_debug_dma_attach_write(struct file *file,
1339 const char __user *ubuf,
1340 size_t count, loff_t *offset)
1341{
1342 return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
1343
1344}
1345
1346static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
1347 size_t count, loff_t *offset)
1348{
1349 struct iommu_debug_device *ddev = file->private_data;
1350 struct device *dev = ddev->dev;
1351 char c[2];
Srinivasarao P90a23732019-01-08 13:52:13 +05301352 size_t buflen = sizeof(c);
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001353
1354 if (*offset)
1355 return 0;
1356
1357 if (!dev->archdata.mapping)
1358 c[0] = '0';
1359 else
1360 c[0] = dev->archdata.mapping->domain ? '1' : '0';
1361
1362 c[1] = '\n';
Srinivasarao P90a23732019-01-08 13:52:13 +05301363 buflen = min(count, buflen);
1364 if (copy_to_user(ubuf, &c, buflen)) {
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001365 pr_err("copy_to_user failed\n");
1366 return -EFAULT;
1367 }
1368 *offset = 1; /* non-zero means we're done */
1369
Srinivasarao P90a23732019-01-08 13:52:13 +05301370 return buflen;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001371}
1372
1373static const struct file_operations iommu_debug_dma_attach_fops = {
1374 .open = simple_open,
1375 .write = iommu_debug_dma_attach_write,
1376 .read = iommu_debug_dma_attach_read,
1377};
1378
1379static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
1380 char __user *ubuf,
1381 size_t count, loff_t *offset)
1382{
1383 char buf[100];
1384 ssize_t retval;
1385 size_t buflen;
1386 int buf_len = sizeof(buf);
1387
1388 if (*offset)
1389 return 0;
1390
1391 memset(buf, 0, buf_len);
1392
1393 if (!test_virt_addr)
1394 strlcpy(buf, "FAIL\n", buf_len);
1395 else
1396 snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
1397
Vijayanand Jittaaffc9ac2019-02-13 14:21:41 +05301398 buflen = min(count, strlen(buf));
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001399 if (copy_to_user(ubuf, buf, buflen)) {
1400 pr_err("Couldn't copy_to_user\n");
1401 retval = -EFAULT;
1402 } else {
1403 *offset = 1; /* non-zero means we're done */
1404 retval = buflen;
1405 }
1406
1407 return retval;
1408}
1409
1410static const struct file_operations iommu_debug_test_virt_addr_fops = {
1411 .open = simple_open,
1412 .read = iommu_debug_test_virt_addr_read,
1413};
1414
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001415static ssize_t iommu_debug_attach_write(struct file *file,
1416 const char __user *ubuf,
1417 size_t count, loff_t *offset)
1418{
1419 return __iommu_debug_attach_write(file, ubuf, count, offset,
1420 false);
1421
1422}
1423
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001424static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
1425 size_t count, loff_t *offset)
1426{
1427 struct iommu_debug_device *ddev = file->private_data;
1428 char c[2];
Srinivasarao P90a23732019-01-08 13:52:13 +05301429 size_t buflen = sizeof(c);
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001430
1431 if (*offset)
1432 return 0;
1433
1434 c[0] = ddev->domain ? '1' : '0';
1435 c[1] = '\n';
Srinivasarao P90a23732019-01-08 13:52:13 +05301436 buflen = min(count, buflen);
1437 if (copy_to_user(ubuf, &c, buflen)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001438 pr_err("copy_to_user failed\n");
1439 return -EFAULT;
1440 }
1441 *offset = 1; /* non-zero means we're done */
1442
Srinivasarao P90a23732019-01-08 13:52:13 +05301443 return buflen;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001444}
1445
1446static const struct file_operations iommu_debug_attach_fops = {
1447 .open = simple_open,
1448 .write = iommu_debug_attach_write,
1449 .read = iommu_debug_attach_read,
1450};
1451
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001452static ssize_t iommu_debug_attach_write_secure(struct file *file,
1453 const char __user *ubuf,
1454 size_t count, loff_t *offset)
1455{
1456 return __iommu_debug_attach_write(file, ubuf, count, offset,
1457 true);
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001458}
1459
1460static const struct file_operations iommu_debug_secure_attach_fops = {
1461 .open = simple_open,
1462 .write = iommu_debug_attach_write_secure,
1463 .read = iommu_debug_attach_read,
1464};
1465
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001466static ssize_t iommu_debug_pte_write(struct file *file,
1467 const char __user *ubuf,
1468 size_t count, loff_t *offset)
1469{
1470 struct iommu_debug_device *ddev = file->private_data;
1471 dma_addr_t iova;
1472
1473 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
1474 pr_err("Invalid format for iova\n");
1475 ddev->iova = 0;
1476 return -EINVAL;
1477 }
1478
1479 ddev->iova = iova;
1480 pr_err("Saved iova=%pa for future PTE commands\n", &iova);
1481 return count;
1482}
1483
1484
1485static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
1486 size_t count, loff_t *offset)
1487{
1488 struct iommu_debug_device *ddev = file->private_data;
1489 struct device *dev = ddev->dev;
1490 uint64_t pte;
1491 char buf[100];
1492 ssize_t retval;
1493 size_t buflen;
1494
Liam Mark28c2aae2017-05-15 10:50:23 -07001495 if (kptr_restrict != 0) {
1496 pr_err("kptr_restrict needs to be disabled.\n");
1497 return -EPERM;
1498 }
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001499 if (!dev->archdata.mapping) {
1500 pr_err("No mapping. Did you already attach?\n");
1501 return -EINVAL;
1502 }
1503 if (!dev->archdata.mapping->domain) {
1504 pr_err("No domain. Did you already attach?\n");
1505 return -EINVAL;
1506 }
1507
1508 if (*offset)
1509 return 0;
1510
1511 memset(buf, 0, sizeof(buf));
1512
1513 pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
1514 ddev->iova);
1515
1516 if (!pte)
1517 strlcpy(buf, "FAIL\n", sizeof(buf));
1518 else
1519 snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
1520
Vijayanand Jittaaffc9ac2019-02-13 14:21:41 +05301521 buflen = min(count, strlen(buf));
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001522 if (copy_to_user(ubuf, buf, buflen)) {
1523 pr_err("Couldn't copy_to_user\n");
1524 retval = -EFAULT;
1525 } else {
1526 *offset = 1; /* non-zero means we're done */
1527 retval = buflen;
1528 }
1529
1530 return retval;
1531}
1532
1533static const struct file_operations iommu_debug_pte_fops = {
1534 .open = simple_open,
1535 .write = iommu_debug_pte_write,
1536 .read = iommu_debug_pte_read,
1537};
1538
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001539static ssize_t iommu_debug_atos_write(struct file *file,
1540 const char __user *ubuf,
1541 size_t count, loff_t *offset)
1542{
1543 struct iommu_debug_device *ddev = file->private_data;
1544 dma_addr_t iova;
1545
Susheel Khiania4417e72016-07-12 11:28:32 +05301546 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001547 pr_err("Invalid format for iova\n");
1548 ddev->iova = 0;
1549 return -EINVAL;
1550 }
1551
1552 ddev->iova = iova;
1553 pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
1554 return count;
1555}
1556
1557static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
1558 size_t count, loff_t *offset)
1559{
1560 struct iommu_debug_device *ddev = file->private_data;
1561 phys_addr_t phys;
1562 char buf[100];
1563 ssize_t retval;
1564 size_t buflen;
1565
Liam Mark8db5dd22017-05-17 17:03:48 -07001566 if (kptr_restrict != 0) {
1567 pr_err("kptr_restrict needs to be disabled.\n");
1568 return -EPERM;
1569 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001570 if (!ddev->domain) {
1571 pr_err("No domain. Did you already attach?\n");
1572 return -EINVAL;
1573 }
1574
1575 if (*offset)
1576 return 0;
1577
1578 memset(buf, 0, 100);
1579
1580 phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001581 if (!phys) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001582 strlcpy(buf, "FAIL\n", 100);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001583 phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
1584 dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
1585 &ddev->iova, &phys);
1586 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001587 snprintf(buf, 100, "%pa\n", &phys);
Mitchel Humpherysff93b1e2016-04-29 11:41:59 -07001588 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001589
Vijayanand Jittaaffc9ac2019-02-13 14:21:41 +05301590 buflen = min(count, strlen(buf));
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001591 if (copy_to_user(ubuf, buf, buflen)) {
1592 pr_err("Couldn't copy_to_user\n");
1593 retval = -EFAULT;
1594 } else {
1595 *offset = 1; /* non-zero means we're done */
1596 retval = buflen;
1597 }
1598
1599 return retval;
1600}
1601
1602static const struct file_operations iommu_debug_atos_fops = {
1603 .open = simple_open,
1604 .write = iommu_debug_atos_write,
1605 .read = iommu_debug_atos_read,
1606};
1607
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001608static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
1609 size_t count, loff_t *offset)
1610{
1611 struct iommu_debug_device *ddev = file->private_data;
1612 struct device *dev = ddev->dev;
1613 phys_addr_t phys;
1614 char buf[100];
1615 ssize_t retval;
1616 size_t buflen;
1617
Liam Mark28c2aae2017-05-15 10:50:23 -07001618 if (kptr_restrict != 0) {
1619 pr_err("kptr_restrict needs to be disabled.\n");
1620 return -EPERM;
1621 }
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001622 if (!dev->archdata.mapping) {
1623 pr_err("No mapping. Did you already attach?\n");
1624 return -EINVAL;
1625 }
1626 if (!dev->archdata.mapping->domain) {
1627 pr_err("No domain. Did you already attach?\n");
1628 return -EINVAL;
1629 }
1630
1631 if (*offset)
1632 return 0;
1633
1634 memset(buf, 0, sizeof(buf));
1635
1636 phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
1637 ddev->iova);
1638 if (!phys)
1639 strlcpy(buf, "FAIL\n", sizeof(buf));
1640 else
1641 snprintf(buf, sizeof(buf), "%pa\n", &phys);
1642
Vijayanand Jittaaffc9ac2019-02-13 14:21:41 +05301643 buflen = min(count, strlen(buf));
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001644 if (copy_to_user(ubuf, buf, buflen)) {
1645 pr_err("Couldn't copy_to_user\n");
1646 retval = -EFAULT;
1647 } else {
1648 *offset = 1; /* non-zero means we're done */
1649 retval = buflen;
1650 }
1651
1652 return retval;
1653}
1654
1655static const struct file_operations iommu_debug_dma_atos_fops = {
1656 .open = simple_open,
1657 .write = iommu_debug_atos_write,
1658 .read = iommu_debug_dma_atos_read,
1659};
1660
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001661static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
1662 size_t count, loff_t *offset)
1663{
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301664 ssize_t retval = -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001665 int ret;
1666 char *comma1, *comma2, *comma3;
1667 char buf[100];
1668 dma_addr_t iova;
1669 phys_addr_t phys;
1670 size_t size;
1671 int prot;
1672 struct iommu_debug_device *ddev = file->private_data;
1673
1674 if (count >= 100) {
1675 pr_err("Value too large\n");
1676 return -EINVAL;
1677 }
1678
1679 if (!ddev->domain) {
1680 pr_err("No domain. Did you already attach?\n");
1681 return -EINVAL;
1682 }
1683
1684 memset(buf, 0, 100);
1685
1686 if (copy_from_user(buf, ubuf, count)) {
1687 pr_err("Couldn't copy from user\n");
1688 retval = -EFAULT;
1689 }
1690
1691 comma1 = strnchr(buf, count, ',');
1692 if (!comma1)
1693 goto invalid_format;
1694
1695 comma2 = strnchr(comma1 + 1, count, ',');
1696 if (!comma2)
1697 goto invalid_format;
1698
1699 comma3 = strnchr(comma2 + 1, count, ',');
1700 if (!comma3)
1701 goto invalid_format;
1702
1703 /* split up the words */
1704 *comma1 = *comma2 = *comma3 = '\0';
1705
Susheel Khiania4417e72016-07-12 11:28:32 +05301706 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001707 goto invalid_format;
1708
Susheel Khiania4417e72016-07-12 11:28:32 +05301709 if (kstrtoux(comma1 + 1, 0, &phys))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001710 goto invalid_format;
1711
Susheel Khiania4417e72016-07-12 11:28:32 +05301712 if (kstrtosize_t(comma2 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001713 goto invalid_format;
1714
1715 if (kstrtoint(comma3 + 1, 0, &prot))
1716 goto invalid_format;
1717
1718 ret = iommu_map(ddev->domain, iova, phys, size, prot);
1719 if (ret) {
1720 pr_err("iommu_map failed with %d\n", ret);
1721 retval = -EIO;
1722 goto out;
1723 }
1724
1725 retval = count;
1726 pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
1727 &iova, &phys, size, prot);
1728out:
1729 return retval;
1730
1731invalid_format:
1732 pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
1733 return -EINVAL;
1734}
1735
1736static const struct file_operations iommu_debug_map_fops = {
1737 .open = simple_open,
1738 .write = iommu_debug_map_write,
1739};
1740
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001741/*
1742 * Performs DMA mapping of a given virtual address and size to an iova address.
1743 * User input format: (addr,len,dma attr) where dma attr is:
1744 * 0: normal mapping
1745 * 1: force coherent mapping
1746 * 2: force non-cohernet mapping
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001747 * 3: use system cache
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001748 */
1749static ssize_t iommu_debug_dma_map_write(struct file *file,
1750 const char __user *ubuf, size_t count, loff_t *offset)
1751{
1752 ssize_t retval = -EINVAL;
1753 int ret;
1754 char *comma1, *comma2;
1755 char buf[100];
1756 unsigned long addr;
1757 void *v_addr;
1758 dma_addr_t iova;
1759 size_t size;
1760 unsigned int attr;
1761 unsigned long dma_attrs;
1762 struct iommu_debug_device *ddev = file->private_data;
1763 struct device *dev = ddev->dev;
1764
1765 if (count >= sizeof(buf)) {
1766 pr_err("Value too large\n");
1767 return -EINVAL;
1768 }
1769
1770 if (!dev->archdata.mapping) {
1771 pr_err("No mapping. Did you already attach?\n");
1772 retval = -EINVAL;
1773 goto out;
1774 }
1775 if (!dev->archdata.mapping->domain) {
1776 pr_err("No domain. Did you already attach?\n");
1777 retval = -EINVAL;
1778 goto out;
1779 }
1780
1781 memset(buf, 0, sizeof(buf));
1782
1783 if (copy_from_user(buf, ubuf, count)) {
1784 pr_err("Couldn't copy from user\n");
1785 retval = -EFAULT;
1786 goto out;
1787 }
1788
1789 comma1 = strnchr(buf, count, ',');
1790 if (!comma1)
1791 goto invalid_format;
1792
1793 comma2 = strnchr(comma1 + 1, count, ',');
1794 if (!comma2)
1795 goto invalid_format;
1796
1797 *comma1 = *comma2 = '\0';
1798
1799 if (kstrtoul(buf, 0, &addr))
1800 goto invalid_format;
1801 v_addr = (void *)addr;
1802
1803 if (kstrtosize_t(comma1 + 1, 0, &size))
1804 goto invalid_format;
1805
1806 if (kstrtouint(comma2 + 1, 0, &attr))
1807 goto invalid_format;
1808
1809 if (v_addr < test_virt_addr || v_addr > (test_virt_addr + SZ_1M - 1))
1810 goto invalid_addr;
1811
1812 if (attr == 0)
1813 dma_attrs = 0;
1814 else if (attr == 1)
1815 dma_attrs = DMA_ATTR_FORCE_COHERENT;
1816 else if (attr == 2)
1817 dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001818 else if (attr == 3)
1819 dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001820 else
1821 goto invalid_format;
1822
1823 iova = dma_map_single_attrs(dev, v_addr, size,
1824 DMA_TO_DEVICE, dma_attrs);
1825
1826 if (dma_mapping_error(dev, iova)) {
1827 pr_err("Failed to perform dma_map_single\n");
1828 ret = -EINVAL;
1829 goto out;
1830 }
1831
1832 retval = count;
1833 pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
1834 v_addr, &iova, size);
1835 ddev->iova = iova;
1836 pr_err("Saved iova=%pa for future PTE commands\n", &iova);
1837out:
1838 return retval;
1839
1840invalid_format:
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07001841 pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001842 return retval;
1843
1844invalid_addr:
1845 pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
1846 return retval;
1847}
1848
1849static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
1850 size_t count, loff_t *offset)
1851{
1852 struct iommu_debug_device *ddev = file->private_data;
1853 struct device *dev = ddev->dev;
1854 char buf[100];
1855 ssize_t retval;
1856 size_t buflen;
1857 dma_addr_t iova;
1858
1859 if (!dev->archdata.mapping) {
1860 pr_err("No mapping. Did you already attach?\n");
1861 return -EINVAL;
1862 }
1863 if (!dev->archdata.mapping->domain) {
1864 pr_err("No domain. Did you already attach?\n");
1865 return -EINVAL;
1866 }
1867
1868 if (*offset)
1869 return 0;
1870
1871 memset(buf, 0, sizeof(buf));
1872
1873 iova = ddev->iova;
1874 snprintf(buf, sizeof(buf), "%pa\n", &iova);
1875
Vijayanand Jittaaffc9ac2019-02-13 14:21:41 +05301876 buflen = min(count, strlen(buf));
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001877 if (copy_to_user(ubuf, buf, buflen)) {
1878 pr_err("Couldn't copy_to_user\n");
1879 retval = -EFAULT;
1880 } else {
1881 *offset = 1; /* non-zero means we're done */
1882 retval = buflen;
1883 }
1884
1885 return retval;
1886}
1887
1888static const struct file_operations iommu_debug_dma_map_fops = {
1889 .open = simple_open,
1890 .write = iommu_debug_dma_map_write,
1891 .read = iommu_debug_dma_map_read,
1892};
1893
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001894static ssize_t iommu_debug_unmap_write(struct file *file,
1895 const char __user *ubuf,
1896 size_t count, loff_t *offset)
1897{
1898 ssize_t retval = 0;
1899 char *comma1;
1900 char buf[100];
1901 dma_addr_t iova;
1902 size_t size;
1903 size_t unmapped;
1904 struct iommu_debug_device *ddev = file->private_data;
1905
1906 if (count >= 100) {
1907 pr_err("Value too large\n");
1908 return -EINVAL;
1909 }
1910
1911 if (!ddev->domain) {
1912 pr_err("No domain. Did you already attach?\n");
1913 return -EINVAL;
1914 }
1915
1916 memset(buf, 0, 100);
1917
1918 if (copy_from_user(buf, ubuf, count)) {
1919 pr_err("Couldn't copy from user\n");
1920 retval = -EFAULT;
1921 goto out;
1922 }
1923
1924 comma1 = strnchr(buf, count, ',');
1925 if (!comma1)
1926 goto invalid_format;
1927
1928 /* split up the words */
1929 *comma1 = '\0';
1930
Susheel Khiania4417e72016-07-12 11:28:32 +05301931 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001932 goto invalid_format;
1933
Susheel Khiania4417e72016-07-12 11:28:32 +05301934 if (kstrtosize_t(comma1 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001935 goto invalid_format;
1936
1937 unmapped = iommu_unmap(ddev->domain, iova, size);
1938 if (unmapped != size) {
1939 pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
1940 size, unmapped);
1941 return -EIO;
1942 }
1943
1944 retval = count;
1945 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
1946out:
1947 return retval;
1948
1949invalid_format:
1950 pr_err("Invalid format. Expected: iova,len\n");
Patrick Daly5a5e3ff2016-10-13 19:31:50 -07001951 return -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001952}
1953
1954static const struct file_operations iommu_debug_unmap_fops = {
1955 .open = simple_open,
1956 .write = iommu_debug_unmap_write,
1957};
1958
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07001959static ssize_t iommu_debug_dma_unmap_write(struct file *file,
1960 const char __user *ubuf,
1961 size_t count, loff_t *offset)
1962{
1963 ssize_t retval = 0;
1964 char *comma1, *comma2;
1965 char buf[100];
1966 size_t size;
1967 unsigned int attr;
1968 dma_addr_t iova;
1969 unsigned long dma_attrs;
1970 struct iommu_debug_device *ddev = file->private_data;
1971 struct device *dev = ddev->dev;
1972
1973 if (count >= sizeof(buf)) {
1974 pr_err("Value too large\n");
1975 return -EINVAL;
1976 }
1977
1978 if (!dev->archdata.mapping) {
1979 pr_err("No mapping. Did you already attach?\n");
1980 retval = -EINVAL;
1981 goto out;
1982 }
1983 if (!dev->archdata.mapping->domain) {
1984 pr_err("No domain. Did you already attach?\n");
1985 retval = -EINVAL;
1986 goto out;
1987 }
1988
1989 memset(buf, 0, sizeof(buf));
1990
1991 if (copy_from_user(buf, ubuf, count)) {
1992 pr_err("Couldn't copy from user\n");
1993 retval = -EFAULT;
1994 goto out;
1995 }
1996
1997 comma1 = strnchr(buf, count, ',');
1998 if (!comma1)
1999 goto invalid_format;
2000
2001 comma2 = strnchr(comma1 + 1, count, ',');
2002 if (!comma2)
2003 goto invalid_format;
2004
2005 *comma1 = *comma2 = '\0';
2006
2007 if (kstrtoux(buf, 0, &iova))
2008 goto invalid_format;
2009
2010 if (kstrtosize_t(comma1 + 1, 0, &size))
2011 goto invalid_format;
2012
2013 if (kstrtouint(comma2 + 1, 0, &attr))
2014 goto invalid_format;
2015
2016 if (attr == 0)
2017 dma_attrs = 0;
2018 else if (attr == 1)
2019 dma_attrs = DMA_ATTR_FORCE_COHERENT;
2020 else if (attr == 2)
2021 dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
Sudarshan Rajagopalan5d6c6f52017-05-19 17:00:31 -07002022 else if (attr == 3)
2023 dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002024 else
2025 goto invalid_format;
2026
2027 dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
2028
2029 retval = count;
2030 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
2031out:
2032 return retval;
2033
2034invalid_format:
2035 pr_err("Invalid format. Expected: iova,len, dma attr\n");
2036 return retval;
2037}
2038
2039static const struct file_operations iommu_debug_dma_unmap_fops = {
2040 .open = simple_open,
2041 .write = iommu_debug_dma_unmap_write,
2042};
2043
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002044static ssize_t iommu_debug_config_clocks_write(struct file *file,
2045 const char __user *ubuf,
2046 size_t count, loff_t *offset)
2047{
2048 char buf;
2049 struct iommu_debug_device *ddev = file->private_data;
2050 struct device *dev = ddev->dev;
2051
2052 /* we're expecting a single character plus (optionally) a newline */
2053 if (count > 2) {
2054 dev_err(dev, "Invalid value\n");
2055 return -EINVAL;
2056 }
2057
2058 if (!ddev->domain) {
2059 dev_err(dev, "No domain. Did you already attach?\n");
2060 return -EINVAL;
2061 }
2062
2063 if (copy_from_user(&buf, ubuf, 1)) {
2064 dev_err(dev, "Couldn't copy from user\n");
2065 return -EFAULT;
2066 }
2067
Liam Mark8db5dd22017-05-17 17:03:48 -07002068 mutex_lock(&ddev->clk_lock);
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002069 switch (buf) {
2070 case '0':
Liam Mark8db5dd22017-05-17 17:03:48 -07002071 if (ddev->clk_count == 0) {
2072 dev_err(dev, "Config clocks already disabled\n");
2073 break;
2074 }
2075
2076 if (--ddev->clk_count > 0)
2077 break;
2078
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002079 dev_err(dev, "Disabling config clocks\n");
2080 iommu_disable_config_clocks(ddev->domain);
2081 break;
2082 case '1':
Liam Mark8db5dd22017-05-17 17:03:48 -07002083 if (ddev->clk_count++ > 0)
2084 break;
2085
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002086 dev_err(dev, "Enabling config clocks\n");
2087 if (iommu_enable_config_clocks(ddev->domain))
2088 dev_err(dev, "Failed!\n");
2089 break;
2090 default:
2091 dev_err(dev, "Invalid value. Should be 0 or 1.\n");
Liam Mark8db5dd22017-05-17 17:03:48 -07002092 mutex_unlock(&ddev->clk_lock);
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002093 return -EINVAL;
2094 }
Liam Mark8db5dd22017-05-17 17:03:48 -07002095 mutex_unlock(&ddev->clk_lock);
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002096
2097 return count;
2098}
2099
2100static const struct file_operations iommu_debug_config_clocks_fops = {
2101 .open = simple_open,
2102 .write = iommu_debug_config_clocks_write,
2103};
2104
Patrick Daly9438f322017-04-05 18:03:19 -07002105static ssize_t iommu_debug_trigger_fault_write(
2106 struct file *file, const char __user *ubuf, size_t count,
2107 loff_t *offset)
2108{
2109 struct iommu_debug_device *ddev = file->private_data;
2110 unsigned long flags;
2111
2112 if (!ddev->domain) {
2113 pr_err("No domain. Did you already attach?\n");
2114 return -EINVAL;
2115 }
2116
2117 if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
2118 pr_err("Invalid flags format\n");
2119 return -EFAULT;
2120 }
2121
2122 iommu_trigger_fault(ddev->domain, flags);
2123
2124 return count;
2125}
2126
2127static const struct file_operations iommu_debug_trigger_fault_fops = {
2128 .open = simple_open,
2129 .write = iommu_debug_trigger_fault_write,
2130};
2131
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002132/*
2133 * The following will only work for drivers that implement the generic
2134 * device tree bindings described in
2135 * Documentation/devicetree/bindings/iommu/iommu.txt
2136 */
2137static int snarf_iommu_devices(struct device *dev, void *ignored)
2138{
2139 struct iommu_debug_device *ddev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002140 struct dentry *dir;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002141
2142 if (!of_find_property(dev->of_node, "iommus", NULL))
2143 return 0;
2144
Liam Mark8db5dd22017-05-17 17:03:48 -07002145 if (!of_device_is_compatible(dev->of_node, "iommu-debug-test"))
2146 return 0;
2147
Patrick Daly6dd80252017-04-17 20:41:59 -07002148 /* Hold a reference count */
2149 if (!iommu_group_get(dev))
2150 return 0;
2151
Mitchel Humpherys89924fd2015-07-09 14:50:22 -07002152 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002153 if (!ddev)
2154 return -ENODEV;
Liam Mark8db5dd22017-05-17 17:03:48 -07002155 mutex_init(&ddev->clk_lock);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002156 ddev->dev = dev;
2157 dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
2158 if (!dir) {
2159 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
2160 dev_name(dev));
2161 goto err;
2162 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002163
Patrick Dalye4e39862015-11-20 20:00:50 -08002164 if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
2165 &iommu_debug_nr_iters_ops)) {
2166 pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
2167 dev_name(dev));
2168 goto err_rmdir;
2169 }
2170
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002171 if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
2172 &iommu_debug_test_virt_addr_fops)) {
2173 pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
2174 dev_name(dev));
2175 goto err_rmdir;
2176 }
2177
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002178 if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
2179 &iommu_debug_profiling_fops)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002180 pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
2181 dev_name(dev));
2182 goto err_rmdir;
2183 }
2184
Mitchel Humpherys020f90f2015-10-02 16:02:31 -07002185 if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
2186 &iommu_debug_secure_profiling_fops)) {
2187 pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
2188 dev_name(dev));
2189 goto err_rmdir;
2190 }
2191
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -07002192 if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
2193 &iommu_debug_profiling_fast_fops)) {
2194 pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
2195 dev_name(dev));
2196 goto err_rmdir;
2197 }
2198
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -07002199 if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
2200 &iommu_debug_profiling_fast_dma_api_fops)) {
2201 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
2202 dev_name(dev));
2203 goto err_rmdir;
2204 }
2205
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08002206 if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
2207 &iommu_debug_functional_fast_dma_api_fops)) {
2208 pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
2209 dev_name(dev));
2210 goto err_rmdir;
2211 }
2212
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08002213 if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
2214 &iommu_debug_functional_arm_dma_api_fops)) {
2215 pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
2216 dev_name(dev));
2217 goto err_rmdir;
2218 }
2219
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002220 if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
2221 &iommu_debug_dma_attach_fops)) {
2222 pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
2223 dev_name(dev));
2224 goto err_rmdir;
2225 }
2226
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002227 if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
2228 &iommu_debug_attach_fops)) {
2229 pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
2230 dev_name(dev));
2231 goto err_rmdir;
2232 }
2233
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07002234 if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
2235 &iommu_debug_secure_attach_fops)) {
2236 pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
2237 dev_name(dev));
2238 goto err_rmdir;
2239 }
2240
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002241 if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
2242 &iommu_debug_atos_fops)) {
2243 pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
2244 dev_name(dev));
2245 goto err_rmdir;
2246 }
2247
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002248 if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
2249 &iommu_debug_dma_atos_fops)) {
2250 pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
2251 dev_name(dev));
2252 goto err_rmdir;
2253 }
2254
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002255 if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
2256 &iommu_debug_map_fops)) {
2257 pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
2258 dev_name(dev));
2259 goto err_rmdir;
2260 }
2261
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002262 if (!debugfs_create_file("dma_map", 0600, dir, ddev,
2263 &iommu_debug_dma_map_fops)) {
2264 pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
2265 dev_name(dev));
2266 goto err_rmdir;
2267 }
2268
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002269 if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
2270 &iommu_debug_unmap_fops)) {
2271 pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
2272 dev_name(dev));
2273 goto err_rmdir;
2274 }
2275
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002276 if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
2277 &iommu_debug_dma_unmap_fops)) {
2278 pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
2279 dev_name(dev));
2280 goto err_rmdir;
2281 }
2282
2283 if (!debugfs_create_file("pte", 0600, dir, ddev,
2284 &iommu_debug_pte_fops)) {
2285 pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
2286 dev_name(dev));
2287 goto err_rmdir;
2288 }
2289
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08002290 if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
2291 &iommu_debug_config_clocks_fops)) {
2292 pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
2293 dev_name(dev));
2294 goto err_rmdir;
2295 }
2296
Patrick Daly9438f322017-04-05 18:03:19 -07002297 if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
2298 &iommu_debug_trigger_fault_fops)) {
2299 pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
2300 dev_name(dev));
2301 goto err_rmdir;
2302 }
2303
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002304 list_add(&ddev->list, &iommu_debug_devices);
2305 return 0;
2306
2307err_rmdir:
2308 debugfs_remove_recursive(dir);
2309err:
2310 kfree(ddev);
2311 return 0;
2312}
2313
2314static int iommu_debug_init_tests(void)
2315{
2316 debugfs_tests_dir = debugfs_create_dir("tests",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002317 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002318 if (!debugfs_tests_dir) {
2319 pr_err("Couldn't create iommu/tests debugfs directory\n");
2320 return -ENODEV;
2321 }
2322
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002323 test_virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
2324
2325 if (!test_virt_addr)
2326 return -ENOMEM;
2327
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002328 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
2329 snarf_iommu_devices);
2330}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002331
2332static void iommu_debug_destroy_tests(void)
2333{
2334 debugfs_remove_recursive(debugfs_tests_dir);
2335}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002336#else
2337static inline int iommu_debug_init_tests(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002338static inline void iommu_debug_destroy_tests(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002339#endif
2340
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002341/*
2342 * This isn't really a "driver", we just need something in the device tree
2343 * so that our tests can run without any client drivers, and our tests rely
2344 * on parsing the device tree for nodes with the `iommus' property.
2345 */
2346static int iommu_debug_pass(struct platform_device *pdev)
2347{
2348 return 0;
2349}
2350
2351static const struct of_device_id iommu_debug_of_match[] = {
2352 { .compatible = "iommu-debug-test" },
2353 { },
2354};
2355
2356static struct platform_driver iommu_debug_driver = {
2357 .probe = iommu_debug_pass,
2358 .remove = iommu_debug_pass,
2359 .driver = {
2360 .name = "iommu-debug",
2361 .of_match_table = iommu_debug_of_match,
2362 },
2363};
2364
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002365static int iommu_debug_init(void)
2366{
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002367 if (iommu_debug_init_tests())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002368 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002369
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002370 return platform_driver_register(&iommu_debug_driver);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002371}
2372
2373static void iommu_debug_exit(void)
2374{
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002375 platform_driver_unregister(&iommu_debug_driver);
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002376 iommu_debug_destroy_tests();
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002377}
2378
2379module_init(iommu_debug_init);
2380module_exit(iommu_debug_exit);