blob: d6f13465eac5d93e3dbac68754e41f0a65a1f913 [file] [log] [blame]
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001/*
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
16
17#include <linux/debugfs.h>
18#include <linux/device.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/module.h>
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -070024#include <linux/uaccess.h>
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070025#include <linux/dma-contiguous.h>
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070026#include <soc/qcom/secure_buffer.h>
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -070027#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma-iommu.h>
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070030
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070031static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
32{
33 switch (attr) {
34 case DOMAIN_ATTR_GEOMETRY:
35 return "DOMAIN_ATTR_GEOMETRY";
36 case DOMAIN_ATTR_PAGING:
37 return "DOMAIN_ATTR_PAGING";
38 case DOMAIN_ATTR_WINDOWS:
39 return "DOMAIN_ATTR_WINDOWS";
40 case DOMAIN_ATTR_FSL_PAMU_STASH:
41 return "DOMAIN_ATTR_FSL_PAMU_STASH";
42 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
43 return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
44 case DOMAIN_ATTR_FSL_PAMUV1:
45 return "DOMAIN_ATTR_FSL_PAMUV1";
46 case DOMAIN_ATTR_NESTING:
47 return "DOMAIN_ATTR_NESTING";
48 case DOMAIN_ATTR_PT_BASE_ADDR:
49 return "DOMAIN_ATTR_PT_BASE_ADDR";
50 case DOMAIN_ATTR_SECURE_VMID:
51 return "DOMAIN_ATTR_SECURE_VMID";
52 case DOMAIN_ATTR_ATOMIC:
53 return "DOMAIN_ATTR_ATOMIC";
54 case DOMAIN_ATTR_CONTEXT_BANK:
55 return "DOMAIN_ATTR_CONTEXT_BANK";
56 case DOMAIN_ATTR_TTBR0:
57 return "DOMAIN_ATTR_TTBR0";
58 case DOMAIN_ATTR_CONTEXTIDR:
59 return "DOMAIN_ATTR_CONTEXTIDR";
60 case DOMAIN_ATTR_PROCID:
61 return "DOMAIN_ATTR_PROCID";
62 case DOMAIN_ATTR_DYNAMIC:
63 return "DOMAIN_ATTR_DYNAMIC";
64 case DOMAIN_ATTR_NON_FATAL_FAULTS:
65 return "DOMAIN_ATTR_NON_FATAL_FAULTS";
66 case DOMAIN_ATTR_S1_BYPASS:
67 return "DOMAIN_ATTR_S1_BYPASS";
68 case DOMAIN_ATTR_FAST:
69 return "DOMAIN_ATTR_FAST";
70 default:
71 return "Unknown attr!";
72 }
73}
74
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070075#ifdef CONFIG_IOMMU_DEBUG_TRACKING
76
77static DEFINE_MUTEX(iommu_debug_attachments_lock);
78static LIST_HEAD(iommu_debug_attachments);
79static struct dentry *debugfs_attachments_dir;
80
81struct iommu_debug_attachment {
82 struct iommu_domain *domain;
83 struct device *dev;
84 struct dentry *dentry;
85 struct list_head list;
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -070086 unsigned long reg_offset;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070087};
88
Mitchel Humpherys088cc582015-07-09 15:02:03 -070089static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070090{
91 struct iommu_debug_attachment *attach = s->private;
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070092 int secure_vmid;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070093
94 seq_printf(s, "Domain: 0x%p\n", attach->domain);
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070095
96 seq_puts(s, "SECURE_VMID: ");
97 if (iommu_domain_get_attr(attach->domain,
98 DOMAIN_ATTR_SECURE_VMID,
99 &secure_vmid))
100 seq_puts(s, "(Unknown)\n");
101 else
102 seq_printf(s, "%s (0x%x)\n",
103 msm_secure_vmid_to_string(secure_vmid), secure_vmid);
104
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700105 return 0;
106}
107
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700108static int iommu_debug_attachment_info_open(struct inode *inode,
109 struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700110{
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700111 return single_open(file, iommu_debug_attachment_info_show,
112 inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700113}
114
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700115static const struct file_operations iommu_debug_attachment_info_fops = {
116 .open = iommu_debug_attachment_info_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700117 .read = seq_read,
118 .llseek = seq_lseek,
119 .release = single_release,
120};
121
Mitchel Humpherys288086e2015-07-09 16:55:08 -0700122static ssize_t iommu_debug_attachment_trigger_fault_write(
123 struct file *file, const char __user *ubuf, size_t count,
124 loff_t *offset)
125{
126 struct iommu_debug_attachment *attach = file->private_data;
127 unsigned long flags;
128
129 if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
130 pr_err("Invalid flags format\n");
131 return -EFAULT;
132 }
133
134 iommu_trigger_fault(attach->domain, flags);
135
136 return count;
137}
138
139static const struct file_operations
140iommu_debug_attachment_trigger_fault_fops = {
141 .open = simple_open,
142 .write = iommu_debug_attachment_trigger_fault_write,
143};
144
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -0700145static ssize_t iommu_debug_attachment_reg_offset_write(
146 struct file *file, const char __user *ubuf, size_t count,
147 loff_t *offset)
148{
149 struct iommu_debug_attachment *attach = file->private_data;
150 unsigned long reg_offset;
151
152 if (kstrtoul_from_user(ubuf, count, 0, &reg_offset)) {
153 pr_err("Invalid reg_offset format\n");
154 return -EFAULT;
155 }
156
157 attach->reg_offset = reg_offset;
158
159 return count;
160}
161
162static const struct file_operations iommu_debug_attachment_reg_offset_fops = {
163 .open = simple_open,
164 .write = iommu_debug_attachment_reg_offset_write,
165};
166
167static ssize_t iommu_debug_attachment_reg_read_read(
168 struct file *file, char __user *ubuf, size_t count, loff_t *offset)
169{
170 struct iommu_debug_attachment *attach = file->private_data;
171 unsigned long val;
172 char *val_str;
173 ssize_t val_str_len;
174
175 if (*offset)
176 return 0;
177
178 val = iommu_reg_read(attach->domain, attach->reg_offset);
179 val_str = kasprintf(GFP_KERNEL, "0x%lx\n", val);
180 if (!val_str)
181 return -ENOMEM;
182 val_str_len = strlen(val_str);
183
184 if (copy_to_user(ubuf, val_str, val_str_len)) {
185 pr_err("copy_to_user failed\n");
186 val_str_len = -EFAULT;
187 goto out;
188 }
189 *offset = 1; /* non-zero means we're done */
190
191out:
192 kfree(val_str);
193 return val_str_len;
194}
195
196static const struct file_operations iommu_debug_attachment_reg_read_fops = {
197 .open = simple_open,
198 .read = iommu_debug_attachment_reg_read_read,
199};
200
201static ssize_t iommu_debug_attachment_reg_write_write(
202 struct file *file, const char __user *ubuf, size_t count,
203 loff_t *offset)
204{
205 struct iommu_debug_attachment *attach = file->private_data;
206 unsigned long val;
207
208 if (kstrtoul_from_user(ubuf, count, 0, &val)) {
209 pr_err("Invalid val format\n");
210 return -EFAULT;
211 }
212
213 iommu_reg_write(attach->domain, attach->reg_offset, val);
214
215 return count;
216}
217
218static const struct file_operations iommu_debug_attachment_reg_write_fops = {
219 .open = simple_open,
220 .write = iommu_debug_attachment_reg_write_write,
221};
222
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700223/* should be called with iommu_debug_attachments_lock locked */
224static int iommu_debug_attach_add_debugfs(
225 struct iommu_debug_attachment *attach)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700226{
Mitchel Humpherys54379212015-08-26 11:52:57 -0700227 const char *attach_name;
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700228 struct device *dev = attach->dev;
229 struct iommu_domain *domain = attach->domain;
Mitchel Humpherys54379212015-08-26 11:52:57 -0700230 int is_dynamic;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700231
Mitchel Humpherys54379212015-08-26 11:52:57 -0700232 if (iommu_domain_get_attr(domain, DOMAIN_ATTR_DYNAMIC, &is_dynamic))
233 is_dynamic = 0;
234
235 if (is_dynamic) {
236 uuid_le uuid;
237
238 uuid_le_gen(&uuid);
239 attach_name = kasprintf(GFP_KERNEL, "%s-%pUl", dev_name(dev),
240 uuid.b);
241 if (!attach_name)
242 return -ENOMEM;
243 } else {
244 attach_name = dev_name(dev);
245 }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700246
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700247 attach->dentry = debugfs_create_dir(attach_name,
248 debugfs_attachments_dir);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700249 if (!attach->dentry) {
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700250 pr_err("Couldn't create iommu/attachments/%s debugfs directory for domain 0x%p\n",
Mitchel Humpherys876e2be2015-07-10 11:56:56 -0700251 attach_name, domain);
Mitchel Humpherys54379212015-08-26 11:52:57 -0700252 if (is_dynamic)
253 kfree(attach_name);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700254 return -EIO;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700255 }
Mitchel Humpherys54379212015-08-26 11:52:57 -0700256
257 if (is_dynamic)
258 kfree(attach_name);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700259
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700260 if (!debugfs_create_file(
261 "info", S_IRUSR, attach->dentry, attach,
262 &iommu_debug_attachment_info_fops)) {
263 pr_err("Couldn't create iommu/attachments/%s/info debugfs file for domain 0x%p\n",
264 dev_name(dev), domain);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700265 goto err_rmdir;
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700266 }
267
Mitchel Humpherys288086e2015-07-09 16:55:08 -0700268 if (!debugfs_create_file(
269 "trigger_fault", S_IRUSR, attach->dentry, attach,
270 &iommu_debug_attachment_trigger_fault_fops)) {
271 pr_err("Couldn't create iommu/attachments/%s/trigger_fault debugfs file for domain 0x%p\n",
272 dev_name(dev), domain);
273 goto err_rmdir;
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -0700274 }
275
276 if (!debugfs_create_file(
277 "reg_offset", S_IRUSR, attach->dentry, attach,
278 &iommu_debug_attachment_reg_offset_fops)) {
279 pr_err("Couldn't create iommu/attachments/%s/reg_offset debugfs file for domain 0x%p\n",
280 dev_name(dev), domain);
281 goto err_rmdir;
282 }
283
284 if (!debugfs_create_file(
285 "reg_read", S_IRUSR, attach->dentry, attach,
286 &iommu_debug_attachment_reg_read_fops)) {
287 pr_err("Couldn't create iommu/attachments/%s/reg_read debugfs file for domain 0x%p\n",
288 dev_name(dev), domain);
289 goto err_rmdir;
290 }
291
292 if (!debugfs_create_file(
293 "reg_write", S_IRUSR, attach->dentry, attach,
294 &iommu_debug_attachment_reg_write_fops)) {
295 pr_err("Couldn't create iommu/attachments/%s/reg_write debugfs file for domain 0x%p\n",
296 dev_name(dev), domain);
297 goto err_rmdir;
298 }
299
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700300 return 0;
301
302err_rmdir:
303 debugfs_remove_recursive(attach->dentry);
304 return -EIO;
305}
306
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530307void iommu_debug_domain_add(struct iommu_domain *domain)
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700308{
309 struct iommu_debug_attachment *attach;
310
311 mutex_lock(&iommu_debug_attachments_lock);
312
313 attach = kmalloc(sizeof(*attach), GFP_KERNEL);
314 if (!attach)
315 goto out_unlock;
316
317 attach->domain = domain;
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530318 attach->dev = NULL;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700319 list_add(&attach->list, &iommu_debug_attachments);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530320
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700321out_unlock:
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700322 mutex_unlock(&iommu_debug_attachments_lock);
323}
324
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530325void iommu_debug_domain_remove(struct iommu_domain *domain)
326{
327 struct iommu_debug_attachment *it;
328
329 mutex_lock(&iommu_debug_attachments_lock);
330 list_for_each_entry(it, &iommu_debug_attachments, list)
331 if (it->domain == domain && it->dev == NULL)
332 break;
333
334 if (&it->list == &iommu_debug_attachments) {
335 WARN(1, "Couldn't find debug attachment for domain=0x%p",
336 domain);
337 } else {
338 list_del(&it->list);
339 kfree(it);
340 }
341 mutex_unlock(&iommu_debug_attachments_lock);
342}
343
344void iommu_debug_attach_device(struct iommu_domain *domain,
345 struct device *dev)
346{
347 struct iommu_debug_attachment *attach;
348
349 mutex_lock(&iommu_debug_attachments_lock);
350
351 list_for_each_entry(attach, &iommu_debug_attachments, list)
352 if (attach->domain == domain && attach->dev == NULL)
353 break;
354
355 if (&attach->list == &iommu_debug_attachments) {
356 WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
357 domain, dev_name(dev));
358 } else {
359 attach->dev = dev;
360
361 /*
362 * we might not init until after other drivers start calling
363 * iommu_attach_device. Only set up the debugfs nodes if we've
364 * already init'd to avoid polluting the top-level debugfs
365 * directory (by calling debugfs_create_dir with a NULL
366 * parent). These will be flushed out later once we init.
367 */
368
369 if (debugfs_attachments_dir)
370 iommu_debug_attach_add_debugfs(attach);
371 }
372
373 mutex_unlock(&iommu_debug_attachments_lock);
374}
375
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700376void iommu_debug_detach_device(struct iommu_domain *domain,
377 struct device *dev)
378{
379 struct iommu_debug_attachment *it;
380
381 mutex_lock(&iommu_debug_attachments_lock);
382 list_for_each_entry(it, &iommu_debug_attachments, list)
383 if (it->domain == domain && it->dev == dev)
384 break;
385
386 if (&it->list == &iommu_debug_attachments) {
387 WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
388 domain, dev_name(dev));
389 } else {
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530390 /*
391 * Just remove debugfs entry and mark dev as NULL on
392 * iommu_detach call. We would remove the actual
393 * attachment entry from the list only on domain_free call.
394 * This is to ensure we keep track of unattached domains too.
395 */
396
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700397 debugfs_remove_recursive(it->dentry);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530398 it->dev = NULL;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700399 }
400 mutex_unlock(&iommu_debug_attachments_lock);
401}
402
403static int iommu_debug_init_tracking(void)
404{
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700405 int ret = 0;
406 struct iommu_debug_attachment *attach;
407
408 mutex_lock(&iommu_debug_attachments_lock);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700409 debugfs_attachments_dir = debugfs_create_dir("attachments",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700410 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700411 if (!debugfs_attachments_dir) {
412 pr_err("Couldn't create iommu/attachments debugfs directory\n");
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700413 ret = -ENODEV;
414 goto out_unlock;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700415 }
416
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700417 /* set up debugfs entries for attachments made during early boot */
418 list_for_each_entry(attach, &iommu_debug_attachments, list)
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530419 if (attach->dev)
420 iommu_debug_attach_add_debugfs(attach);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700421
422out_unlock:
423 mutex_unlock(&iommu_debug_attachments_lock);
424 return ret;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700425}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700426
427static void iommu_debug_destroy_tracking(void)
428{
429 debugfs_remove_recursive(debugfs_attachments_dir);
430}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700431#else
432static inline int iommu_debug_init_tracking(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700433static inline void iommu_debug_destroy_tracking(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700434#endif
435
436#ifdef CONFIG_IOMMU_TESTS
437
438static LIST_HEAD(iommu_debug_devices);
439static struct dentry *debugfs_tests_dir;
Patrick Dalye4e39862015-11-20 20:00:50 -0800440static u32 iters_per_op = 1;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700441
442struct iommu_debug_device {
443 struct device *dev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700444 struct iommu_domain *domain;
445 u64 iova;
446 u64 phys;
447 size_t len;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700448 struct list_head list;
449};
450
451static int iommu_debug_build_phoney_sg_table(struct device *dev,
452 struct sg_table *table,
453 unsigned long total_size,
454 unsigned long chunk_size)
455{
456 unsigned long nents = total_size / chunk_size;
457 struct scatterlist *sg;
458 int i;
459 struct page *page;
460
461 if (!IS_ALIGNED(total_size, PAGE_SIZE))
462 return -EINVAL;
463 if (!IS_ALIGNED(total_size, chunk_size))
464 return -EINVAL;
465 if (sg_alloc_table(table, nents, GFP_KERNEL))
466 return -EINVAL;
467 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
468 if (!page)
469 goto free_table;
470
471 /* all the same page... why not. */
472 for_each_sg(table->sgl, sg, table->nents, i)
473 sg_set_page(sg, page, chunk_size, 0);
474
475 return 0;
476
477free_table:
478 sg_free_table(table);
479 return -ENOMEM;
480}
481
482static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
483 struct sg_table *table,
484 unsigned long chunk_size)
485{
486 __free_pages(sg_page(table->sgl), get_order(chunk_size));
487 sg_free_table(table);
488}
489
490static const char * const _size_to_string(unsigned long size)
491{
492 switch (size) {
493 case SZ_4K:
494 return "4K";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700495 case SZ_8K:
496 return "8K";
497 case SZ_16K:
498 return "16K";
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700499 case SZ_64K:
500 return "64K";
501 case SZ_2M:
502 return "2M";
503 case SZ_1M * 12:
504 return "12M";
505 case SZ_1M * 20:
506 return "20M";
507 }
508 return "unknown size, please add to _size_to_string";
509}
510
Patrick Dalye4e39862015-11-20 20:00:50 -0800511static int nr_iters_set(void *data, u64 val)
512{
513 if (!val)
514 val = 1;
515 if (val > 10000)
516 val = 10000;
517 *(u32 *)data = val;
518 return 0;
519}
520
521static int nr_iters_get(void *data, u64 *val)
522{
523 *val = *(u32 *)data;
524 return 0;
525}
526
527DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
528 nr_iters_get, nr_iters_set, "%llu\n");
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700529
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700530static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700531 enum iommu_attr attrs[],
532 void *attr_values[], int nattrs,
533 const unsigned long sizes[])
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700534{
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700535 int i;
536 const unsigned long *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700537 struct iommu_domain *domain;
538 unsigned long iova = 0x10000;
539 phys_addr_t paddr = 0xa000;
540
541 domain = iommu_domain_alloc(&platform_bus_type);
542 if (!domain) {
543 seq_puts(s, "Couldn't allocate domain\n");
544 return;
545 }
546
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700547 seq_puts(s, "Domain attributes: [ ");
548 for (i = 0; i < nattrs; ++i) {
549 /* not all attrs are ints, but this will get us by for now */
550 seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
551 *((int *)attr_values[i]),
552 i < nattrs ? " " : "");
Mitchel Humpherys679567c2015-08-28 10:51:24 -0700553 }
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700554 seq_puts(s, "]\n");
555 for (i = 0; i < nattrs; ++i) {
556 if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
557 seq_printf(s, "Couldn't set %d to the value at %p\n",
558 attrs[i], attr_values[i]);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700559 goto out_domain_free;
560 }
561 }
562
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700563 if (iommu_attach_device(domain, dev)) {
564 seq_puts(s,
565 "Couldn't attach new domain to device. Is it already attached?\n");
566 goto out_domain_free;
567 }
568
Patrick Dalye4e39862015-11-20 20:00:50 -0800569 seq_printf(s, "(average over %d iterations)\n", iters_per_op);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800570 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700571 for (sz = sizes; *sz; ++sz) {
572 unsigned long size = *sz;
573 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800574 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700575 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800576 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700577 struct timespec tbefore, tafter, diff;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700578 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700579
Patrick Dalye4e39862015-11-20 20:00:50 -0800580 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700581 getnstimeofday(&tbefore);
582 if (iommu_map(domain, iova, paddr, size,
583 IOMMU_READ | IOMMU_WRITE)) {
584 seq_puts(s, "Failed to map\n");
585 continue;
586 }
587 getnstimeofday(&tafter);
588 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800589 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700590
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700591 getnstimeofday(&tbefore);
592 unmapped = iommu_unmap(domain, iova, size);
593 if (unmapped != size) {
594 seq_printf(s,
595 "Only unmapped %zx instead of %zx\n",
596 unmapped, size);
597 continue;
598 }
599 getnstimeofday(&tafter);
600 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800601 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700602 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700603
Patrick Daly3ca31e32015-11-20 20:33:04 -0800604 map_elapsed_ns /= iters_per_op;
605 unmap_elapsed_ns /= iters_per_op;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700606
Patrick Daly3ca31e32015-11-20 20:33:04 -0800607 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
608 &map_elapsed_rem);
609 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
610 &unmap_elapsed_rem);
611
612 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
613 _size_to_string(size),
614 map_elapsed_us, map_elapsed_rem,
615 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700616 }
617
618 seq_putc(s, '\n');
Patrick Daly3ca31e32015-11-20 20:33:04 -0800619 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700620 for (sz = sizes; *sz; ++sz) {
621 unsigned long size = *sz;
622 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800623 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700624 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800625 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700626 struct timespec tbefore, tafter, diff;
627 struct sg_table table;
628 unsigned long chunk_size = SZ_4K;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700629 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700630
631 if (iommu_debug_build_phoney_sg_table(dev, &table, size,
632 chunk_size)) {
633 seq_puts(s,
634 "couldn't build phoney sg table! bailing...\n");
635 goto out_detach;
636 }
637
Patrick Dalye4e39862015-11-20 20:00:50 -0800638 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700639 getnstimeofday(&tbefore);
640 if (iommu_map_sg(domain, iova, table.sgl, table.nents,
641 IOMMU_READ | IOMMU_WRITE) != size) {
642 seq_puts(s, "Failed to map_sg\n");
643 goto next;
644 }
645 getnstimeofday(&tafter);
646 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800647 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700648
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700649 getnstimeofday(&tbefore);
650 unmapped = iommu_unmap(domain, iova, size);
651 if (unmapped != size) {
652 seq_printf(s,
653 "Only unmapped %zx instead of %zx\n",
654 unmapped, size);
655 goto next;
656 }
657 getnstimeofday(&tafter);
658 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800659 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700660 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700661
Patrick Daly3ca31e32015-11-20 20:33:04 -0800662 map_elapsed_ns /= iters_per_op;
663 unmap_elapsed_ns /= iters_per_op;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700664
Patrick Daly3ca31e32015-11-20 20:33:04 -0800665 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
666 &map_elapsed_rem);
667 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
668 &unmap_elapsed_rem);
669
670 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
671 _size_to_string(size),
672 map_elapsed_us, map_elapsed_rem,
673 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700674
675next:
676 iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
677 }
678
679out_detach:
680 iommu_detach_device(domain, dev);
681out_domain_free:
682 iommu_domain_free(domain);
683}
684
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700685static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700686{
687 struct iommu_debug_device *ddev = s->private;
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700688 const unsigned long sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
689 SZ_1M * 20, 0 };
690 enum iommu_attr attrs[] = {
691 DOMAIN_ATTR_ATOMIC,
692 };
693 int htw_disable = 1, atomic = 1;
694 void *attr_values[] = { &htw_disable, &atomic };
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700695
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700696 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
697 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700698
699 return 0;
700}
701
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700702static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700703{
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700704 return single_open(file, iommu_debug_profiling_show, inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700705}
706
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700707static const struct file_operations iommu_debug_profiling_fops = {
708 .open = iommu_debug_profiling_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700709 .read = seq_read,
710 .llseek = seq_lseek,
711 .release = single_release,
712};
713
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700714static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
715{
716 struct iommu_debug_device *ddev = s->private;
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700717 const unsigned long sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
718 SZ_1M * 20, 0 };
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700719
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700720 enum iommu_attr attrs[] = {
721 DOMAIN_ATTR_ATOMIC,
722 DOMAIN_ATTR_SECURE_VMID,
723 };
724 int one = 1, secure_vmid = VMID_CP_PIXEL;
725 void *attr_values[] = { &one, &secure_vmid };
726
727 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
728 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700729
730 return 0;
731}
732
733static int iommu_debug_secure_profiling_open(struct inode *inode,
734 struct file *file)
735{
736 return single_open(file, iommu_debug_secure_profiling_show,
737 inode->i_private);
738}
739
740static const struct file_operations iommu_debug_secure_profiling_fops = {
741 .open = iommu_debug_secure_profiling_open,
742 .read = seq_read,
743 .llseek = seq_lseek,
744 .release = single_release,
745};
746
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700747static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
748{
749 struct iommu_debug_device *ddev = s->private;
750 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
751 enum iommu_attr attrs[] = {
752 DOMAIN_ATTR_FAST,
753 DOMAIN_ATTR_ATOMIC,
754 };
755 int one = 1;
756 void *attr_values[] = { &one, &one };
757
758 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
759 ARRAY_SIZE(attrs), sizes);
760
761 return 0;
762}
763
764static int iommu_debug_profiling_fast_open(struct inode *inode,
765 struct file *file)
766{
767 return single_open(file, iommu_debug_profiling_fast_show,
768 inode->i_private);
769}
770
771static const struct file_operations iommu_debug_profiling_fast_fops = {
772 .open = iommu_debug_profiling_fast_open,
773 .read = seq_read,
774 .llseek = seq_lseek,
775 .release = single_release,
776};
777
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700778static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
779 void *ignored)
780{
781 int i, experiment;
782 struct iommu_debug_device *ddev = s->private;
783 struct device *dev = ddev->dev;
784 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
785 struct dma_iommu_mapping *mapping;
786 dma_addr_t dma_addr;
787 void *virt;
788 int fast = 1;
789 const char * const extra_labels[] = {
790 "not coherent",
791 "coherent",
792 };
793 unsigned long extra_attrs[] = {
794 0,
795 DMA_ATTR_SKIP_CPU_SYNC,
796 };
797
798 virt = kmalloc(1518, GFP_KERNEL);
799 if (!virt)
800 goto out;
801
802 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
803 if (!mapping) {
804 seq_puts(s, "fast_smmu_create_mapping failed\n");
805 goto out_kfree;
806 }
807
808 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
809 seq_puts(s, "iommu_domain_set_attr failed\n");
810 goto out_release_mapping;
811 }
812
813 if (arm_iommu_attach_device(dev, mapping)) {
814 seq_puts(s, "fast_smmu_attach_device failed\n");
815 goto out_release_mapping;
816 }
817
818 if (iommu_enable_config_clocks(mapping->domain)) {
819 seq_puts(s, "Couldn't enable clocks\n");
820 goto out_detach;
821 }
822 for (experiment = 0; experiment < 2; ++experiment) {
823 u64 map_avg = 0, unmap_avg = 0;
824
825 for (i = 0; i < 10; ++i) {
826 struct timespec tbefore, tafter, diff;
827 u64 ns;
828
829 getnstimeofday(&tbefore);
830 dma_addr = dma_map_single_attrs(
831 dev, virt, SZ_4K, DMA_TO_DEVICE,
832 extra_attrs[experiment]);
833 getnstimeofday(&tafter);
834 diff = timespec_sub(tafter, tbefore);
835 ns = timespec_to_ns(&diff);
836 if (dma_mapping_error(dev, dma_addr)) {
837 seq_puts(s, "dma_map_single failed\n");
838 goto out_disable_config_clocks;
839 }
840 map_elapsed_ns[i] = ns;
841
842 getnstimeofday(&tbefore);
843 dma_unmap_single_attrs(
844 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
845 extra_attrs[experiment]);
846 getnstimeofday(&tafter);
847 diff = timespec_sub(tafter, tbefore);
848 ns = timespec_to_ns(&diff);
849 unmap_elapsed_ns[i] = ns;
850 }
851
852 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
853 "dma_map_single_attrs");
854 for (i = 0; i < 10; ++i) {
855 map_avg += map_elapsed_ns[i];
856 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
857 i < 9 ? ", " : "");
858 }
859 map_avg /= 10;
860 seq_printf(s, "] (avg: %llu)\n", map_avg);
861
862 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
863 "dma_unmap_single_attrs");
864 for (i = 0; i < 10; ++i) {
865 unmap_avg += unmap_elapsed_ns[i];
866 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
867 i < 9 ? ", " : "");
868 }
869 unmap_avg /= 10;
870 seq_printf(s, "] (avg: %llu)\n", unmap_avg);
871 }
872
873out_disable_config_clocks:
874 iommu_disable_config_clocks(mapping->domain);
875out_detach:
876 arm_iommu_detach_device(dev);
877out_release_mapping:
878 arm_iommu_release_mapping(mapping);
879out_kfree:
880 kfree(virt);
881out:
882 return 0;
883}
884
885static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
886 struct file *file)
887{
888 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
889 inode->i_private);
890}
891
892static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
893 .open = iommu_debug_profiling_fast_dma_api_open,
894 .read = seq_read,
895 .llseek = seq_lseek,
896 .release = single_release,
897};
898
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800899static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
900{
901 int i, ret = 0;
902 unsigned long iova;
903 const unsigned long max = SZ_1G * 4UL;
904 void *virt;
905 phys_addr_t phys;
906 dma_addr_t dma_addr;
907
908 /*
909 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
910 * chunk that we can work with.
911 */
912 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
913 phys = virt_to_phys(virt);
914
915 /* fill the whole 4GB space */
916 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
917 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
918 if (dma_addr == DMA_ERROR_CODE) {
919 dev_err(dev, "Failed map on iter %d\n", i);
920 ret = -EINVAL;
921 goto out;
922 }
923 }
924
925 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
926 dev_err(dev,
927 "dma_map_single unexpectedly (VA should have been exhausted)\n");
928 ret = -EINVAL;
929 goto out;
930 }
931
932 /*
933 * free up 4K at the very beginning, then leave one 4K mapping,
934 * then free up 8K. This will result in the next 8K map to skip
935 * over the 4K hole and take the 8K one.
936 */
937 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
938 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
939 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
940
941 /* remap 8K */
942 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
943 if (dma_addr != SZ_8K) {
944 dma_addr_t expected = SZ_8K;
945
946 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
947 &dma_addr, &expected);
948 ret = -EINVAL;
949 goto out;
950 }
951
952 /*
953 * now remap 4K. We should get the first 4K chunk that was skipped
954 * over during the previous 8K map. If we missed a TLB invalidate
955 * at that point this should explode.
956 */
957 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
958 if (dma_addr != 0) {
959 dma_addr_t expected = 0;
960
961 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
962 &dma_addr, &expected);
963 ret = -EINVAL;
964 goto out;
965 }
966
967 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
968 dev_err(dev,
969 "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
970 ret = -EINVAL;
971 goto out;
972 }
973
974 /* we're all full again. unmap everything. */
975 for (dma_addr = 0; dma_addr < max; dma_addr += SZ_8K)
976 dma_unmap_single(dev, dma_addr, SZ_8K, DMA_TO_DEVICE);
977
978out:
979 free_pages((unsigned long)virt, get_order(SZ_8K));
980 return ret;
981}
982
983struct fib_state {
984 unsigned long cur;
985 unsigned long prev;
986};
987
988static void fib_init(struct fib_state *f)
989{
990 f->cur = f->prev = 1;
991}
992
993static unsigned long get_next_fib(struct fib_state *f)
994{
995 int next = f->cur + f->prev;
996
997 f->prev = f->cur;
998 f->cur = next;
999 return next;
1000}
1001
1002/*
1003 * Not actually random. Just testing the fibs (and max - the fibs).
1004 */
1005static int __rand_va_sweep(struct device *dev, struct seq_file *s,
1006 const size_t size)
1007{
1008 u64 iova;
1009 const unsigned long max = SZ_1G * 4UL;
1010 int i, remapped, unmapped, ret = 0;
1011 void *virt;
1012 dma_addr_t dma_addr, dma_addr2;
1013 struct fib_state fib;
1014
1015 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
1016 if (!virt) {
1017 if (size > SZ_8K) {
1018 dev_err(dev,
1019 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
1020 _size_to_string(size));
1021 return 0;
1022 }
1023 return -ENOMEM;
1024 }
1025
1026 /* fill the whole 4GB space */
1027 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
1028 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1029 if (dma_addr == DMA_ERROR_CODE) {
1030 dev_err(dev, "Failed map on iter %d\n", i);
1031 ret = -EINVAL;
1032 goto out;
1033 }
1034 }
1035
1036 /* now unmap "random" iovas */
1037 unmapped = 0;
1038 fib_init(&fib);
1039 for (iova = get_next_fib(&fib) * size;
1040 iova < max - size;
1041 iova = get_next_fib(&fib) * size) {
1042 dma_addr = iova;
1043 dma_addr2 = max - size - iova;
1044 if (dma_addr == dma_addr2) {
1045 WARN(1,
1046 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
1047 __func__);
1048 return -EINVAL;
1049 }
1050 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
1051 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
1052 unmapped += 2;
1053 }
1054
1055 /* and map until everything fills back up */
1056 for (remapped = 0; ; ++remapped) {
1057 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1058 if (dma_addr == DMA_ERROR_CODE)
1059 break;
1060 }
1061
1062 if (unmapped != remapped) {
1063 dev_err(dev,
1064 "Unexpected random remap count! Unmapped %d but remapped %d\n",
1065 unmapped, remapped);
1066 ret = -EINVAL;
1067 }
1068
1069 for (dma_addr = 0; dma_addr < max; dma_addr += size)
1070 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
1071
1072out:
1073 free_pages((unsigned long)virt, get_order(size));
1074 return ret;
1075}
1076
1077static int __check_mapping(struct device *dev, struct iommu_domain *domain,
1078 dma_addr_t iova, phys_addr_t expected)
1079{
1080 phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
1081 phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
1082
1083 WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
1084
1085 if (res != expected) {
1086 dev_err_ratelimited(dev,
1087 "Bad translation for %pa! Expected: %pa Got: %pa\n",
1088 &iova, &expected, &res);
1089 return -EINVAL;
1090 }
1091
1092 return 0;
1093}
1094
1095static int __full_va_sweep(struct device *dev, struct seq_file *s,
1096 const size_t size, struct iommu_domain *domain)
1097{
1098 unsigned long iova;
1099 dma_addr_t dma_addr;
1100 void *virt;
1101 phys_addr_t phys;
1102 int ret = 0, i;
1103
1104 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
1105 if (!virt) {
1106 if (size > SZ_8K) {
1107 dev_err(dev,
1108 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
1109 _size_to_string(size));
1110 return 0;
1111 }
1112 return -ENOMEM;
1113 }
1114 phys = virt_to_phys(virt);
1115
1116 for (iova = 0, i = 0; iova < SZ_1G * 4UL; iova += size, ++i) {
1117 unsigned long expected = iova;
1118
1119 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1120 if (dma_addr != expected) {
1121 dev_err_ratelimited(dev,
1122 "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
1123 i, expected,
1124 (unsigned long)dma_addr);
1125 ret = -EINVAL;
1126 goto out;
1127 }
1128 }
1129
1130 if (domain) {
1131 /* check every mapping from 0..6M */
1132 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
1133 phys_addr_t expected = phys;
1134
1135 if (__check_mapping(dev, domain, iova, expected)) {
1136 dev_err(dev, "iter: %d\n", i);
1137 ret = -EINVAL;
1138 goto out;
1139 }
1140 }
1141 /* and from 4G..4G-6M */
1142 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
1143 phys_addr_t expected = phys;
1144 unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
1145
1146 if (__check_mapping(dev, domain, theiova, expected)) {
1147 dev_err(dev, "iter: %d\n", i);
1148 ret = -EINVAL;
1149 goto out;
1150 }
1151 }
1152 }
1153
1154 /* at this point, our VA space should be full */
1155 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1156 if (dma_addr != DMA_ERROR_CODE) {
1157 dev_err_ratelimited(dev,
1158 "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
1159 (unsigned long)dma_addr);
1160 ret = -EINVAL;
1161 }
1162
1163out:
1164 for (dma_addr = 0; dma_addr < SZ_1G * 4UL; dma_addr += size)
1165 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
1166
1167 free_pages((unsigned long)virt, get_order(size));
1168 return ret;
1169}
1170
1171#define ds_printf(d, s, fmt, ...) ({ \
1172 dev_err(d, fmt, ##__VA_ARGS__); \
1173 seq_printf(s, fmt, ##__VA_ARGS__); \
1174 })
1175
1176static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
1177 struct iommu_domain *domain, void *priv)
1178{
1179 int i, j, ret = 0;
1180 size_t *sz, *sizes = priv;
1181
1182 for (j = 0; j < 1; ++j) {
1183 for (sz = sizes; *sz; ++sz) {
1184 for (i = 0; i < 2; ++i) {
1185 ds_printf(dev, s, "Full VA sweep @%s %d",
1186 _size_to_string(*sz), i);
1187 if (__full_va_sweep(dev, s, *sz, domain)) {
1188 ds_printf(dev, s, " -> FAILED\n");
1189 ret = -EINVAL;
1190 } else {
1191 ds_printf(dev, s, " -> SUCCEEDED\n");
1192 }
1193 }
1194 }
1195 }
1196
1197 ds_printf(dev, s, "bonus map:");
1198 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
1199 ds_printf(dev, s, " -> FAILED\n");
1200 ret = -EINVAL;
1201 } else {
1202 ds_printf(dev, s, " -> SUCCEEDED\n");
1203 }
1204
1205 for (sz = sizes; *sz; ++sz) {
1206 for (i = 0; i < 2; ++i) {
1207 ds_printf(dev, s, "Rand VA sweep @%s %d",
1208 _size_to_string(*sz), i);
1209 if (__rand_va_sweep(dev, s, *sz)) {
1210 ds_printf(dev, s, " -> FAILED\n");
1211 ret = -EINVAL;
1212 } else {
1213 ds_printf(dev, s, " -> SUCCEEDED\n");
1214 }
1215 }
1216 }
1217
1218 ds_printf(dev, s, "TLB stress sweep");
1219 if (__tlb_stress_sweep(dev, s)) {
1220 ds_printf(dev, s, " -> FAILED\n");
1221 ret = -EINVAL;
1222 } else {
1223 ds_printf(dev, s, " -> SUCCEEDED\n");
1224 }
1225
1226 ds_printf(dev, s, "second bonus map:");
1227 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
1228 ds_printf(dev, s, " -> FAILED\n");
1229 ret = -EINVAL;
1230 } else {
1231 ds_printf(dev, s, " -> SUCCEEDED\n");
1232 }
1233
1234 return ret;
1235}
1236
1237static int __functional_dma_api_alloc_test(struct device *dev,
1238 struct seq_file *s,
1239 struct iommu_domain *domain,
1240 void *ignored)
1241{
1242 size_t size = SZ_1K * 742;
1243 int ret = 0;
1244 u8 *data;
1245 dma_addr_t iova;
1246
1247 /* Make sure we can allocate and use a buffer */
1248 ds_printf(dev, s, "Allocating coherent buffer");
1249 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
1250 if (!data) {
1251 ds_printf(dev, s, " -> FAILED\n");
1252 ret = -EINVAL;
1253 } else {
1254 int i;
1255
1256 ds_printf(dev, s, " -> SUCCEEDED\n");
1257 ds_printf(dev, s, "Using coherent buffer");
1258 for (i = 0; i < 742; ++i) {
1259 int ind = SZ_1K * i;
1260 u8 *p = data + ind;
1261 u8 val = i % 255;
1262
1263 memset(data, 0xa5, size);
1264 *p = val;
1265 (*p)++;
1266 if ((*p) != val + 1) {
1267 ds_printf(dev, s,
1268 " -> FAILED on iter %d since %d != %d\n",
1269 i, *p, val + 1);
1270 ret = -EINVAL;
1271 }
1272 }
1273 if (!ret)
1274 ds_printf(dev, s, " -> SUCCEEDED\n");
1275 dma_free_coherent(dev, size, data, iova);
1276 }
1277
1278 return ret;
1279}
1280
1281static int __functional_dma_api_basic_test(struct device *dev,
1282 struct seq_file *s,
1283 struct iommu_domain *domain,
1284 void *ignored)
1285{
1286 size_t size = 1518;
1287 int i, j, ret = 0;
1288 u8 *data;
1289 dma_addr_t iova;
1290 phys_addr_t pa, pa2;
1291
1292 ds_printf(dev, s, "Basic DMA API test");
1293 /* Make sure we can allocate and use a buffer */
1294 for (i = 0; i < 1000; ++i) {
1295 data = kmalloc(size, GFP_KERNEL);
1296 if (!data) {
1297 ds_printf(dev, s, " -> FAILED\n");
1298 ret = -EINVAL;
1299 goto out;
1300 }
1301 memset(data, 0xa5, size);
1302 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
1303 pa = iommu_iova_to_phys(domain, iova);
1304 pa2 = iommu_iova_to_phys_hard(domain, iova);
1305 if (pa != pa2) {
1306 dev_err(dev,
1307 "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
1308 &pa, &pa2);
1309 ret = -EINVAL;
1310 goto out;
1311 }
1312 pa2 = virt_to_phys(data);
1313 if (pa != pa2) {
1314 dev_err(dev,
1315 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
1316 &pa, &pa2);
1317 ret = -EINVAL;
1318 goto out;
1319 }
1320 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
1321 for (j = 0; j < size; ++j) {
1322 if (data[j] != 0xa5) {
1323 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
1324 ret = -EINVAL;
1325 goto out;
1326 }
1327 }
1328 kfree(data);
1329 }
1330
1331out:
1332 if (ret)
1333 ds_printf(dev, s, " -> FAILED\n");
1334 else
1335 ds_printf(dev, s, " -> SUCCEEDED\n");
1336
1337 return ret;
1338}
1339
1340/* Creates a fresh fast mapping and applies @fn to it */
1341static int __apply_to_new_mapping(struct seq_file *s,
1342 int (*fn)(struct device *dev,
1343 struct seq_file *s,
1344 struct iommu_domain *domain,
1345 void *priv),
1346 void *priv)
1347{
1348 struct dma_iommu_mapping *mapping;
1349 struct iommu_debug_device *ddev = s->private;
1350 struct device *dev = ddev->dev;
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301351 int ret = -EINVAL, fast = 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001352 phys_addr_t pt_phys;
1353
1354 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
1355 if (!mapping)
1356 goto out;
1357
1358 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
1359 seq_puts(s, "iommu_domain_set_attr failed\n");
1360 goto out_release_mapping;
1361 }
1362
1363 if (arm_iommu_attach_device(dev, mapping))
1364 goto out_release_mapping;
1365
1366 if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
1367 &pt_phys)) {
1368 ds_printf(dev, s, "Couldn't get page table base address\n");
1369 goto out_release_mapping;
1370 }
1371
1372 dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
1373 if (iommu_enable_config_clocks(mapping->domain)) {
1374 ds_printf(dev, s, "Couldn't enable clocks\n");
1375 goto out_release_mapping;
1376 }
1377 ret = fn(dev, s, mapping->domain, priv);
1378 iommu_disable_config_clocks(mapping->domain);
1379
1380 arm_iommu_detach_device(dev);
1381out_release_mapping:
1382 arm_iommu_release_mapping(mapping);
1383out:
1384 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1385 return 0;
1386}
1387
1388static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
1389 void *ignored)
1390{
1391 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
1392 int ret = 0;
1393
1394 ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
1395 ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
1396 ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
1397 return ret;
1398}
1399
1400static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
1401 struct file *file)
1402{
1403 return single_open(file, iommu_debug_functional_fast_dma_api_show,
1404 inode->i_private);
1405}
1406
1407static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
1408 .open = iommu_debug_functional_fast_dma_api_open,
1409 .read = seq_read,
1410 .llseek = seq_lseek,
1411 .release = single_release,
1412};
1413
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001414static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
1415 void *ignored)
1416{
1417 struct dma_iommu_mapping *mapping;
1418 struct iommu_debug_device *ddev = s->private;
1419 struct device *dev = ddev->dev;
1420 size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
1421 int ret = -EINVAL;
1422
1423 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
1424 if (!mapping)
1425 goto out;
1426
1427 if (arm_iommu_attach_device(dev, mapping))
1428 goto out_release_mapping;
1429
1430 ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
1431 ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
1432
1433 arm_iommu_detach_device(dev);
1434out_release_mapping:
1435 arm_iommu_release_mapping(mapping);
1436out:
1437 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1438 return 0;
1439}
1440
1441static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
1442 struct file *file)
1443{
1444 return single_open(file, iommu_debug_functional_arm_dma_api_show,
1445 inode->i_private);
1446}
1447
1448static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
1449 .open = iommu_debug_functional_arm_dma_api_open,
1450 .read = seq_read,
1451 .llseek = seq_lseek,
1452 .release = single_release,
1453};
1454
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001455static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
1456 int val, bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001457{
1458 ddev->domain = iommu_domain_alloc(&platform_bus_type);
1459 if (!ddev->domain) {
1460 pr_err("Couldn't allocate domain\n");
1461 return -ENOMEM;
1462 }
1463
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001464 if (is_secure && iommu_domain_set_attr(ddev->domain,
1465 DOMAIN_ATTR_SECURE_VMID,
1466 &val)) {
1467 pr_err("Couldn't set secure vmid to %d\n", val);
1468 goto out_domain_free;
1469 }
1470
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001471 if (iommu_attach_device(ddev->domain, ddev->dev)) {
1472 pr_err("Couldn't attach new domain to device. Is it already attached?\n");
1473 goto out_domain_free;
1474 }
1475
1476 return 0;
1477
1478out_domain_free:
1479 iommu_domain_free(ddev->domain);
1480 ddev->domain = NULL;
1481 return -EIO;
1482}
1483
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001484static ssize_t __iommu_debug_attach_write(struct file *file,
1485 const char __user *ubuf,
1486 size_t count, loff_t *offset,
1487 bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001488{
1489 struct iommu_debug_device *ddev = file->private_data;
1490 ssize_t retval;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001491 int val;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001492
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001493 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1494 pr_err("Invalid format. Expected a hex or decimal integer");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001495 retval = -EFAULT;
1496 goto out;
1497 }
1498
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001499 if (val) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001500 if (ddev->domain) {
1501 pr_err("Already attached.\n");
1502 retval = -EINVAL;
1503 goto out;
1504 }
1505 if (WARN(ddev->dev->archdata.iommu,
1506 "Attachment tracking out of sync with device\n")) {
1507 retval = -EINVAL;
1508 goto out;
1509 }
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001510 if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001511 retval = -EIO;
1512 goto out;
1513 }
1514 pr_err("Attached\n");
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001515 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001516 if (!ddev->domain) {
1517 pr_err("No domain. Did you already attach?\n");
1518 retval = -EINVAL;
1519 goto out;
1520 }
1521 iommu_detach_device(ddev->domain, ddev->dev);
1522 iommu_domain_free(ddev->domain);
1523 ddev->domain = NULL;
1524 pr_err("Detached\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001525 }
1526
1527 retval = count;
1528out:
1529 return retval;
1530}
1531
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001532static ssize_t iommu_debug_attach_write(struct file *file,
1533 const char __user *ubuf,
1534 size_t count, loff_t *offset)
1535{
1536 return __iommu_debug_attach_write(file, ubuf, count, offset,
1537 false);
1538
1539}
1540
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001541static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
1542 size_t count, loff_t *offset)
1543{
1544 struct iommu_debug_device *ddev = file->private_data;
1545 char c[2];
1546
1547 if (*offset)
1548 return 0;
1549
1550 c[0] = ddev->domain ? '1' : '0';
1551 c[1] = '\n';
1552 if (copy_to_user(ubuf, &c, 2)) {
1553 pr_err("copy_to_user failed\n");
1554 return -EFAULT;
1555 }
1556 *offset = 1; /* non-zero means we're done */
1557
1558 return 2;
1559}
1560
1561static const struct file_operations iommu_debug_attach_fops = {
1562 .open = simple_open,
1563 .write = iommu_debug_attach_write,
1564 .read = iommu_debug_attach_read,
1565};
1566
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001567static ssize_t iommu_debug_attach_write_secure(struct file *file,
1568 const char __user *ubuf,
1569 size_t count, loff_t *offset)
1570{
1571 return __iommu_debug_attach_write(file, ubuf, count, offset,
1572 true);
1573
1574}
1575
1576static const struct file_operations iommu_debug_secure_attach_fops = {
1577 .open = simple_open,
1578 .write = iommu_debug_attach_write_secure,
1579 .read = iommu_debug_attach_read,
1580};
1581
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001582static ssize_t iommu_debug_atos_write(struct file *file,
1583 const char __user *ubuf,
1584 size_t count, loff_t *offset)
1585{
1586 struct iommu_debug_device *ddev = file->private_data;
1587 dma_addr_t iova;
1588
1589 if (kstrtoll_from_user(ubuf, count, 0, &iova)) {
1590 pr_err("Invalid format for iova\n");
1591 ddev->iova = 0;
1592 return -EINVAL;
1593 }
1594
1595 ddev->iova = iova;
1596 pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
1597 return count;
1598}
1599
1600static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
1601 size_t count, loff_t *offset)
1602{
1603 struct iommu_debug_device *ddev = file->private_data;
1604 phys_addr_t phys;
1605 char buf[100];
1606 ssize_t retval;
1607 size_t buflen;
1608
1609 if (!ddev->domain) {
1610 pr_err("No domain. Did you already attach?\n");
1611 return -EINVAL;
1612 }
1613
1614 if (*offset)
1615 return 0;
1616
1617 memset(buf, 0, 100);
1618
1619 phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
1620 if (!phys)
1621 strlcpy(buf, "FAIL\n", 100);
1622 else
1623 snprintf(buf, 100, "%pa\n", &phys);
1624
1625 buflen = strlen(buf);
1626 if (copy_to_user(ubuf, buf, buflen)) {
1627 pr_err("Couldn't copy_to_user\n");
1628 retval = -EFAULT;
1629 } else {
1630 *offset = 1; /* non-zero means we're done */
1631 retval = buflen;
1632 }
1633
1634 return retval;
1635}
1636
1637static const struct file_operations iommu_debug_atos_fops = {
1638 .open = simple_open,
1639 .write = iommu_debug_atos_write,
1640 .read = iommu_debug_atos_read,
1641};
1642
1643static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
1644 size_t count, loff_t *offset)
1645{
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301646 ssize_t retval = -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001647 int ret;
1648 char *comma1, *comma2, *comma3;
1649 char buf[100];
1650 dma_addr_t iova;
1651 phys_addr_t phys;
1652 size_t size;
1653 int prot;
1654 struct iommu_debug_device *ddev = file->private_data;
1655
1656 if (count >= 100) {
1657 pr_err("Value too large\n");
1658 return -EINVAL;
1659 }
1660
1661 if (!ddev->domain) {
1662 pr_err("No domain. Did you already attach?\n");
1663 return -EINVAL;
1664 }
1665
1666 memset(buf, 0, 100);
1667
1668 if (copy_from_user(buf, ubuf, count)) {
1669 pr_err("Couldn't copy from user\n");
1670 retval = -EFAULT;
1671 }
1672
1673 comma1 = strnchr(buf, count, ',');
1674 if (!comma1)
1675 goto invalid_format;
1676
1677 comma2 = strnchr(comma1 + 1, count, ',');
1678 if (!comma2)
1679 goto invalid_format;
1680
1681 comma3 = strnchr(comma2 + 1, count, ',');
1682 if (!comma3)
1683 goto invalid_format;
1684
1685 /* split up the words */
1686 *comma1 = *comma2 = *comma3 = '\0';
1687
1688 if (kstrtou64(buf, 0, &iova))
1689 goto invalid_format;
1690
1691 if (kstrtou64(comma1 + 1, 0, &phys))
1692 goto invalid_format;
1693
1694 if (kstrtoul(comma2 + 1, 0, &size))
1695 goto invalid_format;
1696
1697 if (kstrtoint(comma3 + 1, 0, &prot))
1698 goto invalid_format;
1699
1700 ret = iommu_map(ddev->domain, iova, phys, size, prot);
1701 if (ret) {
1702 pr_err("iommu_map failed with %d\n", ret);
1703 retval = -EIO;
1704 goto out;
1705 }
1706
1707 retval = count;
1708 pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
1709 &iova, &phys, size, prot);
1710out:
1711 return retval;
1712
1713invalid_format:
1714 pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
1715 return -EINVAL;
1716}
1717
1718static const struct file_operations iommu_debug_map_fops = {
1719 .open = simple_open,
1720 .write = iommu_debug_map_write,
1721};
1722
1723static ssize_t iommu_debug_unmap_write(struct file *file,
1724 const char __user *ubuf,
1725 size_t count, loff_t *offset)
1726{
1727 ssize_t retval = 0;
1728 char *comma1;
1729 char buf[100];
1730 dma_addr_t iova;
1731 size_t size;
1732 size_t unmapped;
1733 struct iommu_debug_device *ddev = file->private_data;
1734
1735 if (count >= 100) {
1736 pr_err("Value too large\n");
1737 return -EINVAL;
1738 }
1739
1740 if (!ddev->domain) {
1741 pr_err("No domain. Did you already attach?\n");
1742 return -EINVAL;
1743 }
1744
1745 memset(buf, 0, 100);
1746
1747 if (copy_from_user(buf, ubuf, count)) {
1748 pr_err("Couldn't copy from user\n");
1749 retval = -EFAULT;
1750 goto out;
1751 }
1752
1753 comma1 = strnchr(buf, count, ',');
1754 if (!comma1)
1755 goto invalid_format;
1756
1757 /* split up the words */
1758 *comma1 = '\0';
1759
1760 if (kstrtou64(buf, 0, &iova))
1761 goto invalid_format;
1762
Mitchel Humpherys70b75112015-07-29 12:45:29 -07001763 if (kstrtoul(comma1 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001764 goto invalid_format;
1765
1766 unmapped = iommu_unmap(ddev->domain, iova, size);
1767 if (unmapped != size) {
1768 pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
1769 size, unmapped);
1770 return -EIO;
1771 }
1772
1773 retval = count;
1774 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
1775out:
1776 return retval;
1777
1778invalid_format:
1779 pr_err("Invalid format. Expected: iova,len\n");
1780 return retval;
1781}
1782
1783static const struct file_operations iommu_debug_unmap_fops = {
1784 .open = simple_open,
1785 .write = iommu_debug_unmap_write,
1786};
1787
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08001788static ssize_t iommu_debug_config_clocks_write(struct file *file,
1789 const char __user *ubuf,
1790 size_t count, loff_t *offset)
1791{
1792 char buf;
1793 struct iommu_debug_device *ddev = file->private_data;
1794 struct device *dev = ddev->dev;
1795
1796 /* we're expecting a single character plus (optionally) a newline */
1797 if (count > 2) {
1798 dev_err(dev, "Invalid value\n");
1799 return -EINVAL;
1800 }
1801
1802 if (!ddev->domain) {
1803 dev_err(dev, "No domain. Did you already attach?\n");
1804 return -EINVAL;
1805 }
1806
1807 if (copy_from_user(&buf, ubuf, 1)) {
1808 dev_err(dev, "Couldn't copy from user\n");
1809 return -EFAULT;
1810 }
1811
1812 switch (buf) {
1813 case '0':
1814 dev_err(dev, "Disabling config clocks\n");
1815 iommu_disable_config_clocks(ddev->domain);
1816 break;
1817 case '1':
1818 dev_err(dev, "Enabling config clocks\n");
1819 if (iommu_enable_config_clocks(ddev->domain))
1820 dev_err(dev, "Failed!\n");
1821 break;
1822 default:
1823 dev_err(dev, "Invalid value. Should be 0 or 1.\n");
1824 return -EINVAL;
1825 }
1826
1827 return count;
1828}
1829
1830static const struct file_operations iommu_debug_config_clocks_fops = {
1831 .open = simple_open,
1832 .write = iommu_debug_config_clocks_write,
1833};
1834
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001835/*
1836 * The following will only work for drivers that implement the generic
1837 * device tree bindings described in
1838 * Documentation/devicetree/bindings/iommu/iommu.txt
1839 */
1840static int snarf_iommu_devices(struct device *dev, void *ignored)
1841{
1842 struct iommu_debug_device *ddev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001843 struct dentry *dir;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001844
1845 if (!of_find_property(dev->of_node, "iommus", NULL))
1846 return 0;
1847
Mitchel Humpherys89924fd2015-07-09 14:50:22 -07001848 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001849 if (!ddev)
1850 return -ENODEV;
1851 ddev->dev = dev;
1852 dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
1853 if (!dir) {
1854 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
1855 dev_name(dev));
1856 goto err;
1857 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001858
Patrick Dalye4e39862015-11-20 20:00:50 -08001859 if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
1860 &iommu_debug_nr_iters_ops)) {
1861 pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
1862 dev_name(dev));
1863 goto err_rmdir;
1864 }
1865
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001866 if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
1867 &iommu_debug_profiling_fops)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001868 pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
1869 dev_name(dev));
1870 goto err_rmdir;
1871 }
1872
Mitchel Humpherys020f90f2015-10-02 16:02:31 -07001873 if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
1874 &iommu_debug_secure_profiling_fops)) {
1875 pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
1876 dev_name(dev));
1877 goto err_rmdir;
1878 }
1879
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -07001880 if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
1881 &iommu_debug_profiling_fast_fops)) {
1882 pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
1883 dev_name(dev));
1884 goto err_rmdir;
1885 }
1886
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -07001887 if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
1888 &iommu_debug_profiling_fast_dma_api_fops)) {
1889 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
1890 dev_name(dev));
1891 goto err_rmdir;
1892 }
1893
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001894 if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
1895 &iommu_debug_functional_fast_dma_api_fops)) {
1896 pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
1897 dev_name(dev));
1898 goto err_rmdir;
1899 }
1900
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001901 if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
1902 &iommu_debug_functional_arm_dma_api_fops)) {
1903 pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
1904 dev_name(dev));
1905 goto err_rmdir;
1906 }
1907
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001908 if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
1909 &iommu_debug_attach_fops)) {
1910 pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
1911 dev_name(dev));
1912 goto err_rmdir;
1913 }
1914
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001915 if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
1916 &iommu_debug_secure_attach_fops)) {
1917 pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
1918 dev_name(dev));
1919 goto err_rmdir;
1920 }
1921
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001922 if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
1923 &iommu_debug_atos_fops)) {
1924 pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
1925 dev_name(dev));
1926 goto err_rmdir;
1927 }
1928
1929 if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
1930 &iommu_debug_map_fops)) {
1931 pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
1932 dev_name(dev));
1933 goto err_rmdir;
1934 }
1935
1936 if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
1937 &iommu_debug_unmap_fops)) {
1938 pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
1939 dev_name(dev));
1940 goto err_rmdir;
1941 }
1942
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08001943 if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
1944 &iommu_debug_config_clocks_fops)) {
1945 pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
1946 dev_name(dev));
1947 goto err_rmdir;
1948 }
1949
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001950 list_add(&ddev->list, &iommu_debug_devices);
1951 return 0;
1952
1953err_rmdir:
1954 debugfs_remove_recursive(dir);
1955err:
1956 kfree(ddev);
1957 return 0;
1958}
1959
1960static int iommu_debug_init_tests(void)
1961{
1962 debugfs_tests_dir = debugfs_create_dir("tests",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001963 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001964 if (!debugfs_tests_dir) {
1965 pr_err("Couldn't create iommu/tests debugfs directory\n");
1966 return -ENODEV;
1967 }
1968
1969 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
1970 snarf_iommu_devices);
1971}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001972
1973static void iommu_debug_destroy_tests(void)
1974{
1975 debugfs_remove_recursive(debugfs_tests_dir);
1976}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001977#else
1978static inline int iommu_debug_init_tests(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001979static inline void iommu_debug_destroy_tests(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001980#endif
1981
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07001982/*
1983 * This isn't really a "driver", we just need something in the device tree
1984 * so that our tests can run without any client drivers, and our tests rely
1985 * on parsing the device tree for nodes with the `iommus' property.
1986 */
1987static int iommu_debug_pass(struct platform_device *pdev)
1988{
1989 return 0;
1990}
1991
1992static const struct of_device_id iommu_debug_of_match[] = {
1993 { .compatible = "iommu-debug-test" },
1994 { },
1995};
1996
1997static struct platform_driver iommu_debug_driver = {
1998 .probe = iommu_debug_pass,
1999 .remove = iommu_debug_pass,
2000 .driver = {
2001 .name = "iommu-debug",
2002 .of_match_table = iommu_debug_of_match,
2003 },
2004};
2005
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002006static int iommu_debug_init(void)
2007{
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002008 if (iommu_debug_init_tracking())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002009 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002010
2011 if (iommu_debug_init_tests())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002012 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002013
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002014 return platform_driver_register(&iommu_debug_driver);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002015}
2016
2017static void iommu_debug_exit(void)
2018{
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002019 platform_driver_unregister(&iommu_debug_driver);
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002020 iommu_debug_destroy_tracking();
2021 iommu_debug_destroy_tests();
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002022}
2023
2024module_init(iommu_debug_init);
2025module_exit(iommu_debug_exit);