blob: 181e8897a80f1dfe1dbc15ed440efc3d802faf5e [file] [log] [blame]
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001/*
Charan Teja Reddy29f61402017-02-09 20:44:29 +05302 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
16
17#include <linux/debugfs.h>
18#include <linux/device.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/module.h>
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -070024#include <linux/uaccess.h>
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070025#include <linux/dma-contiguous.h>
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070026#include <soc/qcom/secure_buffer.h>
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -070027#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma-iommu.h>
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070030
Susheel Khiania4417e72016-07-12 11:28:32 +053031#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
32
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070033static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
34{
35 switch (attr) {
36 case DOMAIN_ATTR_GEOMETRY:
37 return "DOMAIN_ATTR_GEOMETRY";
38 case DOMAIN_ATTR_PAGING:
39 return "DOMAIN_ATTR_PAGING";
40 case DOMAIN_ATTR_WINDOWS:
41 return "DOMAIN_ATTR_WINDOWS";
42 case DOMAIN_ATTR_FSL_PAMU_STASH:
43 return "DOMAIN_ATTR_FSL_PAMU_STASH";
44 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
45 return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
46 case DOMAIN_ATTR_FSL_PAMUV1:
47 return "DOMAIN_ATTR_FSL_PAMUV1";
48 case DOMAIN_ATTR_NESTING:
49 return "DOMAIN_ATTR_NESTING";
50 case DOMAIN_ATTR_PT_BASE_ADDR:
51 return "DOMAIN_ATTR_PT_BASE_ADDR";
52 case DOMAIN_ATTR_SECURE_VMID:
53 return "DOMAIN_ATTR_SECURE_VMID";
54 case DOMAIN_ATTR_ATOMIC:
55 return "DOMAIN_ATTR_ATOMIC";
56 case DOMAIN_ATTR_CONTEXT_BANK:
57 return "DOMAIN_ATTR_CONTEXT_BANK";
58 case DOMAIN_ATTR_TTBR0:
59 return "DOMAIN_ATTR_TTBR0";
60 case DOMAIN_ATTR_CONTEXTIDR:
61 return "DOMAIN_ATTR_CONTEXTIDR";
62 case DOMAIN_ATTR_PROCID:
63 return "DOMAIN_ATTR_PROCID";
64 case DOMAIN_ATTR_DYNAMIC:
65 return "DOMAIN_ATTR_DYNAMIC";
66 case DOMAIN_ATTR_NON_FATAL_FAULTS:
67 return "DOMAIN_ATTR_NON_FATAL_FAULTS";
68 case DOMAIN_ATTR_S1_BYPASS:
69 return "DOMAIN_ATTR_S1_BYPASS";
70 case DOMAIN_ATTR_FAST:
71 return "DOMAIN_ATTR_FAST";
Patrick Dalyef6c1dc2016-11-16 14:35:23 -080072 case DOMAIN_ATTR_EARLY_MAP:
73 return "DOMAIN_ATTR_EARLY_MAP";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070074 default:
75 return "Unknown attr!";
76 }
77}
Susheel Khiania4417e72016-07-12 11:28:32 +053078#endif
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070079
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070080#ifdef CONFIG_IOMMU_DEBUG_TRACKING
81
82static DEFINE_MUTEX(iommu_debug_attachments_lock);
83static LIST_HEAD(iommu_debug_attachments);
84static struct dentry *debugfs_attachments_dir;
85
86struct iommu_debug_attachment {
87 struct iommu_domain *domain;
88 struct device *dev;
89 struct dentry *dentry;
90 struct list_head list;
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -070091 unsigned long reg_offset;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070092};
93
Mitchel Humpherys088cc582015-07-09 15:02:03 -070094static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070095{
96 struct iommu_debug_attachment *attach = s->private;
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070097 int secure_vmid;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070098
99 seq_printf(s, "Domain: 0x%p\n", attach->domain);
Mitchel Humpherys5e991f12015-07-30 19:25:54 -0700100
101 seq_puts(s, "SECURE_VMID: ");
102 if (iommu_domain_get_attr(attach->domain,
103 DOMAIN_ATTR_SECURE_VMID,
104 &secure_vmid))
105 seq_puts(s, "(Unknown)\n");
106 else
107 seq_printf(s, "%s (0x%x)\n",
108 msm_secure_vmid_to_string(secure_vmid), secure_vmid);
109
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700110 return 0;
111}
112
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700113static int iommu_debug_attachment_info_open(struct inode *inode,
114 struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700115{
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700116 return single_open(file, iommu_debug_attachment_info_show,
117 inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700118}
119
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700120static const struct file_operations iommu_debug_attachment_info_fops = {
121 .open = iommu_debug_attachment_info_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700122 .read = seq_read,
123 .llseek = seq_lseek,
124 .release = single_release,
125};
126
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -0700127static ssize_t iommu_debug_attachment_reg_offset_write(
128 struct file *file, const char __user *ubuf, size_t count,
129 loff_t *offset)
130{
131 struct iommu_debug_attachment *attach = file->private_data;
132 unsigned long reg_offset;
133
134 if (kstrtoul_from_user(ubuf, count, 0, &reg_offset)) {
135 pr_err("Invalid reg_offset format\n");
136 return -EFAULT;
137 }
138
139 attach->reg_offset = reg_offset;
140
141 return count;
142}
143
144static const struct file_operations iommu_debug_attachment_reg_offset_fops = {
145 .open = simple_open,
146 .write = iommu_debug_attachment_reg_offset_write,
147};
148
149static ssize_t iommu_debug_attachment_reg_read_read(
150 struct file *file, char __user *ubuf, size_t count, loff_t *offset)
151{
152 struct iommu_debug_attachment *attach = file->private_data;
153 unsigned long val;
154 char *val_str;
155 ssize_t val_str_len;
156
157 if (*offset)
158 return 0;
159
160 val = iommu_reg_read(attach->domain, attach->reg_offset);
161 val_str = kasprintf(GFP_KERNEL, "0x%lx\n", val);
162 if (!val_str)
163 return -ENOMEM;
164 val_str_len = strlen(val_str);
165
166 if (copy_to_user(ubuf, val_str, val_str_len)) {
167 pr_err("copy_to_user failed\n");
168 val_str_len = -EFAULT;
169 goto out;
170 }
171 *offset = 1; /* non-zero means we're done */
172
173out:
174 kfree(val_str);
175 return val_str_len;
176}
177
178static const struct file_operations iommu_debug_attachment_reg_read_fops = {
179 .open = simple_open,
180 .read = iommu_debug_attachment_reg_read_read,
181};
182
183static ssize_t iommu_debug_attachment_reg_write_write(
184 struct file *file, const char __user *ubuf, size_t count,
185 loff_t *offset)
186{
187 struct iommu_debug_attachment *attach = file->private_data;
188 unsigned long val;
189
190 if (kstrtoul_from_user(ubuf, count, 0, &val)) {
191 pr_err("Invalid val format\n");
192 return -EFAULT;
193 }
194
195 iommu_reg_write(attach->domain, attach->reg_offset, val);
196
197 return count;
198}
199
200static const struct file_operations iommu_debug_attachment_reg_write_fops = {
201 .open = simple_open,
202 .write = iommu_debug_attachment_reg_write_write,
203};
204
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700205/* should be called with iommu_debug_attachments_lock locked */
206static int iommu_debug_attach_add_debugfs(
207 struct iommu_debug_attachment *attach)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700208{
Mitchel Humpherys54379212015-08-26 11:52:57 -0700209 const char *attach_name;
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700210 struct device *dev = attach->dev;
211 struct iommu_domain *domain = attach->domain;
Mitchel Humpherys54379212015-08-26 11:52:57 -0700212 int is_dynamic;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700213
Mitchel Humpherys54379212015-08-26 11:52:57 -0700214 if (iommu_domain_get_attr(domain, DOMAIN_ATTR_DYNAMIC, &is_dynamic))
215 is_dynamic = 0;
216
217 if (is_dynamic) {
218 uuid_le uuid;
219
220 uuid_le_gen(&uuid);
221 attach_name = kasprintf(GFP_KERNEL, "%s-%pUl", dev_name(dev),
222 uuid.b);
223 if (!attach_name)
224 return -ENOMEM;
225 } else {
226 attach_name = dev_name(dev);
227 }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700228
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700229 attach->dentry = debugfs_create_dir(attach_name,
230 debugfs_attachments_dir);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700231 if (!attach->dentry) {
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700232 pr_err("Couldn't create iommu/attachments/%s debugfs directory for domain 0x%p\n",
Mitchel Humpherys876e2be2015-07-10 11:56:56 -0700233 attach_name, domain);
Mitchel Humpherys54379212015-08-26 11:52:57 -0700234 if (is_dynamic)
235 kfree(attach_name);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700236 return -EIO;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700237 }
Mitchel Humpherys54379212015-08-26 11:52:57 -0700238
239 if (is_dynamic)
240 kfree(attach_name);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700241
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700242 if (!debugfs_create_file(
243 "info", S_IRUSR, attach->dentry, attach,
244 &iommu_debug_attachment_info_fops)) {
245 pr_err("Couldn't create iommu/attachments/%s/info debugfs file for domain 0x%p\n",
246 dev_name(dev), domain);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700247 goto err_rmdir;
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700248 }
249
Mitchel Humpherys288086e2015-07-09 16:55:08 -0700250 if (!debugfs_create_file(
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -0700251 "reg_offset", S_IRUSR, attach->dentry, attach,
252 &iommu_debug_attachment_reg_offset_fops)) {
253 pr_err("Couldn't create iommu/attachments/%s/reg_offset debugfs file for domain 0x%p\n",
254 dev_name(dev), domain);
255 goto err_rmdir;
256 }
257
258 if (!debugfs_create_file(
259 "reg_read", S_IRUSR, attach->dentry, attach,
260 &iommu_debug_attachment_reg_read_fops)) {
261 pr_err("Couldn't create iommu/attachments/%s/reg_read debugfs file for domain 0x%p\n",
262 dev_name(dev), domain);
263 goto err_rmdir;
264 }
265
266 if (!debugfs_create_file(
267 "reg_write", S_IRUSR, attach->dentry, attach,
268 &iommu_debug_attachment_reg_write_fops)) {
269 pr_err("Couldn't create iommu/attachments/%s/reg_write debugfs file for domain 0x%p\n",
270 dev_name(dev), domain);
271 goto err_rmdir;
272 }
273
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700274 return 0;
275
276err_rmdir:
277 debugfs_remove_recursive(attach->dentry);
278 return -EIO;
279}
280
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530281void iommu_debug_domain_add(struct iommu_domain *domain)
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700282{
283 struct iommu_debug_attachment *attach;
284
285 mutex_lock(&iommu_debug_attachments_lock);
286
287 attach = kmalloc(sizeof(*attach), GFP_KERNEL);
288 if (!attach)
289 goto out_unlock;
290
291 attach->domain = domain;
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530292 attach->dev = NULL;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700293 list_add(&attach->list, &iommu_debug_attachments);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530294
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700295out_unlock:
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700296 mutex_unlock(&iommu_debug_attachments_lock);
297}
298
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530299void iommu_debug_domain_remove(struct iommu_domain *domain)
300{
301 struct iommu_debug_attachment *it;
302
303 mutex_lock(&iommu_debug_attachments_lock);
304 list_for_each_entry(it, &iommu_debug_attachments, list)
305 if (it->domain == domain && it->dev == NULL)
306 break;
307
308 if (&it->list == &iommu_debug_attachments) {
309 WARN(1, "Couldn't find debug attachment for domain=0x%p",
310 domain);
311 } else {
312 list_del(&it->list);
313 kfree(it);
314 }
315 mutex_unlock(&iommu_debug_attachments_lock);
316}
317
318void iommu_debug_attach_device(struct iommu_domain *domain,
319 struct device *dev)
320{
321 struct iommu_debug_attachment *attach;
322
323 mutex_lock(&iommu_debug_attachments_lock);
324
325 list_for_each_entry(attach, &iommu_debug_attachments, list)
326 if (attach->domain == domain && attach->dev == NULL)
327 break;
328
329 if (&attach->list == &iommu_debug_attachments) {
330 WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
331 domain, dev_name(dev));
332 } else {
333 attach->dev = dev;
334
335 /*
336 * we might not init until after other drivers start calling
337 * iommu_attach_device. Only set up the debugfs nodes if we've
338 * already init'd to avoid polluting the top-level debugfs
339 * directory (by calling debugfs_create_dir with a NULL
340 * parent). These will be flushed out later once we init.
341 */
342
343 if (debugfs_attachments_dir)
344 iommu_debug_attach_add_debugfs(attach);
345 }
346
347 mutex_unlock(&iommu_debug_attachments_lock);
348}
349
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700350void iommu_debug_detach_device(struct iommu_domain *domain,
351 struct device *dev)
352{
353 struct iommu_debug_attachment *it;
354
355 mutex_lock(&iommu_debug_attachments_lock);
356 list_for_each_entry(it, &iommu_debug_attachments, list)
357 if (it->domain == domain && it->dev == dev)
358 break;
359
360 if (&it->list == &iommu_debug_attachments) {
361 WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
362 domain, dev_name(dev));
363 } else {
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530364 /*
365 * Just remove debugfs entry and mark dev as NULL on
366 * iommu_detach call. We would remove the actual
367 * attachment entry from the list only on domain_free call.
368 * This is to ensure we keep track of unattached domains too.
369 */
370
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700371 debugfs_remove_recursive(it->dentry);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530372 it->dev = NULL;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700373 }
374 mutex_unlock(&iommu_debug_attachments_lock);
375}
376
377static int iommu_debug_init_tracking(void)
378{
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700379 int ret = 0;
380 struct iommu_debug_attachment *attach;
381
382 mutex_lock(&iommu_debug_attachments_lock);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700383 debugfs_attachments_dir = debugfs_create_dir("attachments",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700384 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700385 if (!debugfs_attachments_dir) {
386 pr_err("Couldn't create iommu/attachments debugfs directory\n");
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700387 ret = -ENODEV;
388 goto out_unlock;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700389 }
390
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700391 /* set up debugfs entries for attachments made during early boot */
392 list_for_each_entry(attach, &iommu_debug_attachments, list)
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530393 if (attach->dev)
394 iommu_debug_attach_add_debugfs(attach);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700395
396out_unlock:
397 mutex_unlock(&iommu_debug_attachments_lock);
398 return ret;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700399}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700400
401static void iommu_debug_destroy_tracking(void)
402{
403 debugfs_remove_recursive(debugfs_attachments_dir);
404}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700405#else
406static inline int iommu_debug_init_tracking(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700407static inline void iommu_debug_destroy_tracking(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700408#endif
409
410#ifdef CONFIG_IOMMU_TESTS
411
Susheel Khiania4417e72016-07-12 11:28:32 +0530412#ifdef CONFIG_64BIT
413
414#define kstrtoux kstrtou64
Patrick Daly9ef01862016-10-13 20:03:50 -0700415#define kstrtox_from_user kstrtoull_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530416#define kstrtosize_t kstrtoul
417
418#else
419
420#define kstrtoux kstrtou32
Patrick Daly9ef01862016-10-13 20:03:50 -0700421#define kstrtox_from_user kstrtouint_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530422#define kstrtosize_t kstrtouint
423
424#endif
425
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700426static LIST_HEAD(iommu_debug_devices);
427static struct dentry *debugfs_tests_dir;
Patrick Dalye4e39862015-11-20 20:00:50 -0800428static u32 iters_per_op = 1;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700429
430struct iommu_debug_device {
431 struct device *dev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700432 struct iommu_domain *domain;
433 u64 iova;
434 u64 phys;
435 size_t len;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700436 struct list_head list;
437};
438
439static int iommu_debug_build_phoney_sg_table(struct device *dev,
440 struct sg_table *table,
441 unsigned long total_size,
442 unsigned long chunk_size)
443{
444 unsigned long nents = total_size / chunk_size;
445 struct scatterlist *sg;
446 int i;
447 struct page *page;
448
449 if (!IS_ALIGNED(total_size, PAGE_SIZE))
450 return -EINVAL;
451 if (!IS_ALIGNED(total_size, chunk_size))
452 return -EINVAL;
453 if (sg_alloc_table(table, nents, GFP_KERNEL))
454 return -EINVAL;
455 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
456 if (!page)
457 goto free_table;
458
459 /* all the same page... why not. */
460 for_each_sg(table->sgl, sg, table->nents, i)
461 sg_set_page(sg, page, chunk_size, 0);
462
463 return 0;
464
465free_table:
466 sg_free_table(table);
467 return -ENOMEM;
468}
469
470static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
471 struct sg_table *table,
472 unsigned long chunk_size)
473{
474 __free_pages(sg_page(table->sgl), get_order(chunk_size));
475 sg_free_table(table);
476}
477
478static const char * const _size_to_string(unsigned long size)
479{
480 switch (size) {
481 case SZ_4K:
482 return "4K";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700483 case SZ_8K:
484 return "8K";
485 case SZ_16K:
486 return "16K";
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700487 case SZ_64K:
488 return "64K";
489 case SZ_2M:
490 return "2M";
491 case SZ_1M * 12:
492 return "12M";
493 case SZ_1M * 20:
494 return "20M";
495 }
496 return "unknown size, please add to _size_to_string";
497}
498
Patrick Dalye4e39862015-11-20 20:00:50 -0800499static int nr_iters_set(void *data, u64 val)
500{
501 if (!val)
502 val = 1;
503 if (val > 10000)
504 val = 10000;
505 *(u32 *)data = val;
506 return 0;
507}
508
509static int nr_iters_get(void *data, u64 *val)
510{
511 *val = *(u32 *)data;
512 return 0;
513}
514
515DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
516 nr_iters_get, nr_iters_set, "%llu\n");
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700517
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700518static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700519 enum iommu_attr attrs[],
520 void *attr_values[], int nattrs,
Susheel Khiania4417e72016-07-12 11:28:32 +0530521 const size_t sizes[])
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700522{
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700523 int i;
Susheel Khiania4417e72016-07-12 11:28:32 +0530524 const size_t *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700525 struct iommu_domain *domain;
526 unsigned long iova = 0x10000;
527 phys_addr_t paddr = 0xa000;
528
529 domain = iommu_domain_alloc(&platform_bus_type);
530 if (!domain) {
531 seq_puts(s, "Couldn't allocate domain\n");
532 return;
533 }
534
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700535 seq_puts(s, "Domain attributes: [ ");
536 for (i = 0; i < nattrs; ++i) {
537 /* not all attrs are ints, but this will get us by for now */
538 seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
539 *((int *)attr_values[i]),
540 i < nattrs ? " " : "");
Mitchel Humpherys679567c2015-08-28 10:51:24 -0700541 }
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700542 seq_puts(s, "]\n");
543 for (i = 0; i < nattrs; ++i) {
544 if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
545 seq_printf(s, "Couldn't set %d to the value at %p\n",
546 attrs[i], attr_values[i]);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700547 goto out_domain_free;
548 }
549 }
550
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700551 if (iommu_attach_device(domain, dev)) {
552 seq_puts(s,
553 "Couldn't attach new domain to device. Is it already attached?\n");
554 goto out_domain_free;
555 }
556
Patrick Dalye4e39862015-11-20 20:00:50 -0800557 seq_printf(s, "(average over %d iterations)\n", iters_per_op);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800558 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700559 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530560 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700561 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800562 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700563 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800564 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700565 struct timespec tbefore, tafter, diff;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700566 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700567
Patrick Dalye4e39862015-11-20 20:00:50 -0800568 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700569 getnstimeofday(&tbefore);
570 if (iommu_map(domain, iova, paddr, size,
571 IOMMU_READ | IOMMU_WRITE)) {
572 seq_puts(s, "Failed to map\n");
573 continue;
574 }
575 getnstimeofday(&tafter);
576 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800577 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700578
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700579 getnstimeofday(&tbefore);
580 unmapped = iommu_unmap(domain, iova, size);
581 if (unmapped != size) {
582 seq_printf(s,
583 "Only unmapped %zx instead of %zx\n",
584 unmapped, size);
585 continue;
586 }
587 getnstimeofday(&tafter);
588 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800589 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700590 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700591
Susheel Khiania4417e72016-07-12 11:28:32 +0530592 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
593 &map_elapsed_rem);
594 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
595 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700596
Patrick Daly3ca31e32015-11-20 20:33:04 -0800597 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
598 &map_elapsed_rem);
599 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
600 &unmap_elapsed_rem);
601
602 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
603 _size_to_string(size),
604 map_elapsed_us, map_elapsed_rem,
605 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700606 }
607
608 seq_putc(s, '\n');
Patrick Daly3ca31e32015-11-20 20:33:04 -0800609 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700610 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530611 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700612 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800613 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700614 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800615 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700616 struct timespec tbefore, tafter, diff;
617 struct sg_table table;
618 unsigned long chunk_size = SZ_4K;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700619 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700620
621 if (iommu_debug_build_phoney_sg_table(dev, &table, size,
622 chunk_size)) {
623 seq_puts(s,
624 "couldn't build phoney sg table! bailing...\n");
625 goto out_detach;
626 }
627
Patrick Dalye4e39862015-11-20 20:00:50 -0800628 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700629 getnstimeofday(&tbefore);
630 if (iommu_map_sg(domain, iova, table.sgl, table.nents,
631 IOMMU_READ | IOMMU_WRITE) != size) {
632 seq_puts(s, "Failed to map_sg\n");
633 goto next;
634 }
635 getnstimeofday(&tafter);
636 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800637 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700638
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700639 getnstimeofday(&tbefore);
640 unmapped = iommu_unmap(domain, iova, size);
641 if (unmapped != size) {
642 seq_printf(s,
643 "Only unmapped %zx instead of %zx\n",
644 unmapped, size);
645 goto next;
646 }
647 getnstimeofday(&tafter);
648 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800649 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700650 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700651
Susheel Khiania4417e72016-07-12 11:28:32 +0530652 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
653 &map_elapsed_rem);
654 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
655 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700656
Patrick Daly3ca31e32015-11-20 20:33:04 -0800657 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
658 &map_elapsed_rem);
659 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
660 &unmap_elapsed_rem);
661
662 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
663 _size_to_string(size),
664 map_elapsed_us, map_elapsed_rem,
665 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700666
667next:
668 iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
669 }
670
671out_detach:
672 iommu_detach_device(domain, dev);
673out_domain_free:
674 iommu_domain_free(domain);
675}
676
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700677static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700678{
679 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530680 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700681 SZ_1M * 20, 0 };
682 enum iommu_attr attrs[] = {
683 DOMAIN_ATTR_ATOMIC,
684 };
685 int htw_disable = 1, atomic = 1;
686 void *attr_values[] = { &htw_disable, &atomic };
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700687
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700688 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
689 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700690
691 return 0;
692}
693
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700694static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700695{
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700696 return single_open(file, iommu_debug_profiling_show, inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700697}
698
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700699static const struct file_operations iommu_debug_profiling_fops = {
700 .open = iommu_debug_profiling_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700701 .read = seq_read,
702 .llseek = seq_lseek,
703 .release = single_release,
704};
705
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700706static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
707{
708 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530709 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700710 SZ_1M * 20, 0 };
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700711
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700712 enum iommu_attr attrs[] = {
713 DOMAIN_ATTR_ATOMIC,
714 DOMAIN_ATTR_SECURE_VMID,
715 };
716 int one = 1, secure_vmid = VMID_CP_PIXEL;
717 void *attr_values[] = { &one, &secure_vmid };
718
719 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
720 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700721
722 return 0;
723}
724
725static int iommu_debug_secure_profiling_open(struct inode *inode,
726 struct file *file)
727{
728 return single_open(file, iommu_debug_secure_profiling_show,
729 inode->i_private);
730}
731
732static const struct file_operations iommu_debug_secure_profiling_fops = {
733 .open = iommu_debug_secure_profiling_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700739static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
740{
741 struct iommu_debug_device *ddev = s->private;
742 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
743 enum iommu_attr attrs[] = {
744 DOMAIN_ATTR_FAST,
745 DOMAIN_ATTR_ATOMIC,
746 };
747 int one = 1;
748 void *attr_values[] = { &one, &one };
749
750 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
751 ARRAY_SIZE(attrs), sizes);
752
753 return 0;
754}
755
756static int iommu_debug_profiling_fast_open(struct inode *inode,
757 struct file *file)
758{
759 return single_open(file, iommu_debug_profiling_fast_show,
760 inode->i_private);
761}
762
763static const struct file_operations iommu_debug_profiling_fast_fops = {
764 .open = iommu_debug_profiling_fast_open,
765 .read = seq_read,
766 .llseek = seq_lseek,
767 .release = single_release,
768};
769
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700770static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
771 void *ignored)
772{
773 int i, experiment;
774 struct iommu_debug_device *ddev = s->private;
775 struct device *dev = ddev->dev;
776 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
777 struct dma_iommu_mapping *mapping;
778 dma_addr_t dma_addr;
779 void *virt;
780 int fast = 1;
781 const char * const extra_labels[] = {
782 "not coherent",
783 "coherent",
784 };
785 unsigned long extra_attrs[] = {
786 0,
787 DMA_ATTR_SKIP_CPU_SYNC,
788 };
789
790 virt = kmalloc(1518, GFP_KERNEL);
791 if (!virt)
792 goto out;
793
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530794 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700795 if (!mapping) {
796 seq_puts(s, "fast_smmu_create_mapping failed\n");
797 goto out_kfree;
798 }
799
800 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
801 seq_puts(s, "iommu_domain_set_attr failed\n");
802 goto out_release_mapping;
803 }
804
805 if (arm_iommu_attach_device(dev, mapping)) {
806 seq_puts(s, "fast_smmu_attach_device failed\n");
807 goto out_release_mapping;
808 }
809
810 if (iommu_enable_config_clocks(mapping->domain)) {
811 seq_puts(s, "Couldn't enable clocks\n");
812 goto out_detach;
813 }
814 for (experiment = 0; experiment < 2; ++experiment) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530815 size_t map_avg = 0, unmap_avg = 0;
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700816
817 for (i = 0; i < 10; ++i) {
818 struct timespec tbefore, tafter, diff;
819 u64 ns;
820
821 getnstimeofday(&tbefore);
822 dma_addr = dma_map_single_attrs(
823 dev, virt, SZ_4K, DMA_TO_DEVICE,
824 extra_attrs[experiment]);
825 getnstimeofday(&tafter);
826 diff = timespec_sub(tafter, tbefore);
827 ns = timespec_to_ns(&diff);
828 if (dma_mapping_error(dev, dma_addr)) {
829 seq_puts(s, "dma_map_single failed\n");
830 goto out_disable_config_clocks;
831 }
832 map_elapsed_ns[i] = ns;
833
834 getnstimeofday(&tbefore);
835 dma_unmap_single_attrs(
836 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
837 extra_attrs[experiment]);
838 getnstimeofday(&tafter);
839 diff = timespec_sub(tafter, tbefore);
840 ns = timespec_to_ns(&diff);
841 unmap_elapsed_ns[i] = ns;
842 }
843
844 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
845 "dma_map_single_attrs");
846 for (i = 0; i < 10; ++i) {
847 map_avg += map_elapsed_ns[i];
848 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
849 i < 9 ? ", " : "");
850 }
851 map_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530852 seq_printf(s, "] (avg: %zu)\n", map_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700853
854 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
855 "dma_unmap_single_attrs");
856 for (i = 0; i < 10; ++i) {
857 unmap_avg += unmap_elapsed_ns[i];
858 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
859 i < 9 ? ", " : "");
860 }
861 unmap_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530862 seq_printf(s, "] (avg: %zu)\n", unmap_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700863 }
864
865out_disable_config_clocks:
866 iommu_disable_config_clocks(mapping->domain);
867out_detach:
868 arm_iommu_detach_device(dev);
869out_release_mapping:
870 arm_iommu_release_mapping(mapping);
871out_kfree:
872 kfree(virt);
873out:
874 return 0;
875}
876
877static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
878 struct file *file)
879{
880 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
881 inode->i_private);
882}
883
884static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
885 .open = iommu_debug_profiling_fast_dma_api_open,
886 .read = seq_read,
887 .llseek = seq_lseek,
888 .release = single_release,
889};
890
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800891static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
892{
893 int i, ret = 0;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530894 u64 iova;
895 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800896 void *virt;
897 phys_addr_t phys;
898 dma_addr_t dma_addr;
899
900 /*
901 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
902 * chunk that we can work with.
903 */
904 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
905 phys = virt_to_phys(virt);
906
907 /* fill the whole 4GB space */
908 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
909 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
910 if (dma_addr == DMA_ERROR_CODE) {
911 dev_err(dev, "Failed map on iter %d\n", i);
912 ret = -EINVAL;
913 goto out;
914 }
915 }
916
917 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
918 dev_err(dev,
919 "dma_map_single unexpectedly (VA should have been exhausted)\n");
920 ret = -EINVAL;
921 goto out;
922 }
923
924 /*
925 * free up 4K at the very beginning, then leave one 4K mapping,
926 * then free up 8K. This will result in the next 8K map to skip
927 * over the 4K hole and take the 8K one.
928 */
929 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
930 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
931 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
932
933 /* remap 8K */
934 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
935 if (dma_addr != SZ_8K) {
936 dma_addr_t expected = SZ_8K;
937
938 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
939 &dma_addr, &expected);
940 ret = -EINVAL;
941 goto out;
942 }
943
944 /*
945 * now remap 4K. We should get the first 4K chunk that was skipped
946 * over during the previous 8K map. If we missed a TLB invalidate
947 * at that point this should explode.
948 */
949 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
950 if (dma_addr != 0) {
951 dma_addr_t expected = 0;
952
953 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
954 &dma_addr, &expected);
955 ret = -EINVAL;
956 goto out;
957 }
958
959 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
960 dev_err(dev,
961 "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
962 ret = -EINVAL;
963 goto out;
964 }
965
966 /* we're all full again. unmap everything. */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530967 for (iova = 0; iova < max; iova += SZ_8K)
968 dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800969
970out:
971 free_pages((unsigned long)virt, get_order(SZ_8K));
972 return ret;
973}
974
975struct fib_state {
976 unsigned long cur;
977 unsigned long prev;
978};
979
980static void fib_init(struct fib_state *f)
981{
982 f->cur = f->prev = 1;
983}
984
985static unsigned long get_next_fib(struct fib_state *f)
986{
987 int next = f->cur + f->prev;
988
989 f->prev = f->cur;
990 f->cur = next;
991 return next;
992}
993
994/*
995 * Not actually random. Just testing the fibs (and max - the fibs).
996 */
997static int __rand_va_sweep(struct device *dev, struct seq_file *s,
998 const size_t size)
999{
1000 u64 iova;
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301001 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001002 int i, remapped, unmapped, ret = 0;
1003 void *virt;
1004 dma_addr_t dma_addr, dma_addr2;
1005 struct fib_state fib;
1006
1007 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
1008 if (!virt) {
1009 if (size > SZ_8K) {
1010 dev_err(dev,
1011 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
1012 _size_to_string(size));
1013 return 0;
1014 }
1015 return -ENOMEM;
1016 }
1017
1018 /* fill the whole 4GB space */
1019 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
1020 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1021 if (dma_addr == DMA_ERROR_CODE) {
1022 dev_err(dev, "Failed map on iter %d\n", i);
1023 ret = -EINVAL;
1024 goto out;
1025 }
1026 }
1027
1028 /* now unmap "random" iovas */
1029 unmapped = 0;
1030 fib_init(&fib);
1031 for (iova = get_next_fib(&fib) * size;
1032 iova < max - size;
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301033 iova = (u64)get_next_fib(&fib) * size) {
1034 dma_addr = (dma_addr_t)(iova);
1035 dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001036 if (dma_addr == dma_addr2) {
1037 WARN(1,
1038 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
1039 __func__);
1040 return -EINVAL;
1041 }
1042 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
1043 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
1044 unmapped += 2;
1045 }
1046
1047 /* and map until everything fills back up */
1048 for (remapped = 0; ; ++remapped) {
1049 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1050 if (dma_addr == DMA_ERROR_CODE)
1051 break;
1052 }
1053
1054 if (unmapped != remapped) {
1055 dev_err(dev,
1056 "Unexpected random remap count! Unmapped %d but remapped %d\n",
1057 unmapped, remapped);
1058 ret = -EINVAL;
1059 }
1060
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301061 for (iova = 0; iova < max; iova += size)
1062 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001063
1064out:
1065 free_pages((unsigned long)virt, get_order(size));
1066 return ret;
1067}
1068
1069static int __check_mapping(struct device *dev, struct iommu_domain *domain,
1070 dma_addr_t iova, phys_addr_t expected)
1071{
1072 phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
1073 phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
1074
1075 WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
1076
1077 if (res != expected) {
1078 dev_err_ratelimited(dev,
1079 "Bad translation for %pa! Expected: %pa Got: %pa\n",
1080 &iova, &expected, &res);
1081 return -EINVAL;
1082 }
1083
1084 return 0;
1085}
1086
1087static int __full_va_sweep(struct device *dev, struct seq_file *s,
1088 const size_t size, struct iommu_domain *domain)
1089{
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301090 u64 iova;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001091 dma_addr_t dma_addr;
1092 void *virt;
1093 phys_addr_t phys;
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301094 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001095 int ret = 0, i;
1096
1097 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
1098 if (!virt) {
1099 if (size > SZ_8K) {
1100 dev_err(dev,
1101 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
1102 _size_to_string(size));
1103 return 0;
1104 }
1105 return -ENOMEM;
1106 }
1107 phys = virt_to_phys(virt);
1108
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301109 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001110 unsigned long expected = iova;
1111
1112 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1113 if (dma_addr != expected) {
1114 dev_err_ratelimited(dev,
1115 "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
1116 i, expected,
1117 (unsigned long)dma_addr);
1118 ret = -EINVAL;
1119 goto out;
1120 }
1121 }
1122
1123 if (domain) {
1124 /* check every mapping from 0..6M */
1125 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
1126 phys_addr_t expected = phys;
1127
1128 if (__check_mapping(dev, domain, iova, expected)) {
1129 dev_err(dev, "iter: %d\n", i);
1130 ret = -EINVAL;
1131 goto out;
1132 }
1133 }
1134 /* and from 4G..4G-6M */
1135 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
1136 phys_addr_t expected = phys;
1137 unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
1138
1139 if (__check_mapping(dev, domain, theiova, expected)) {
1140 dev_err(dev, "iter: %d\n", i);
1141 ret = -EINVAL;
1142 goto out;
1143 }
1144 }
1145 }
1146
1147 /* at this point, our VA space should be full */
1148 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1149 if (dma_addr != DMA_ERROR_CODE) {
1150 dev_err_ratelimited(dev,
1151 "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
1152 (unsigned long)dma_addr);
1153 ret = -EINVAL;
1154 }
1155
1156out:
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301157 for (iova = 0; iova < max; iova += size)
1158 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001159
1160 free_pages((unsigned long)virt, get_order(size));
1161 return ret;
1162}
1163
1164#define ds_printf(d, s, fmt, ...) ({ \
1165 dev_err(d, fmt, ##__VA_ARGS__); \
1166 seq_printf(s, fmt, ##__VA_ARGS__); \
1167 })
1168
1169static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
1170 struct iommu_domain *domain, void *priv)
1171{
1172 int i, j, ret = 0;
1173 size_t *sz, *sizes = priv;
1174
1175 for (j = 0; j < 1; ++j) {
1176 for (sz = sizes; *sz; ++sz) {
1177 for (i = 0; i < 2; ++i) {
1178 ds_printf(dev, s, "Full VA sweep @%s %d",
1179 _size_to_string(*sz), i);
1180 if (__full_va_sweep(dev, s, *sz, domain)) {
1181 ds_printf(dev, s, " -> FAILED\n");
1182 ret = -EINVAL;
1183 } else {
1184 ds_printf(dev, s, " -> SUCCEEDED\n");
1185 }
1186 }
1187 }
1188 }
1189
1190 ds_printf(dev, s, "bonus map:");
1191 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
1192 ds_printf(dev, s, " -> FAILED\n");
1193 ret = -EINVAL;
1194 } else {
1195 ds_printf(dev, s, " -> SUCCEEDED\n");
1196 }
1197
1198 for (sz = sizes; *sz; ++sz) {
1199 for (i = 0; i < 2; ++i) {
1200 ds_printf(dev, s, "Rand VA sweep @%s %d",
1201 _size_to_string(*sz), i);
1202 if (__rand_va_sweep(dev, s, *sz)) {
1203 ds_printf(dev, s, " -> FAILED\n");
1204 ret = -EINVAL;
1205 } else {
1206 ds_printf(dev, s, " -> SUCCEEDED\n");
1207 }
1208 }
1209 }
1210
1211 ds_printf(dev, s, "TLB stress sweep");
1212 if (__tlb_stress_sweep(dev, s)) {
1213 ds_printf(dev, s, " -> FAILED\n");
1214 ret = -EINVAL;
1215 } else {
1216 ds_printf(dev, s, " -> SUCCEEDED\n");
1217 }
1218
1219 ds_printf(dev, s, "second bonus map:");
1220 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
1221 ds_printf(dev, s, " -> FAILED\n");
1222 ret = -EINVAL;
1223 } else {
1224 ds_printf(dev, s, " -> SUCCEEDED\n");
1225 }
1226
1227 return ret;
1228}
1229
1230static int __functional_dma_api_alloc_test(struct device *dev,
1231 struct seq_file *s,
1232 struct iommu_domain *domain,
1233 void *ignored)
1234{
1235 size_t size = SZ_1K * 742;
1236 int ret = 0;
1237 u8 *data;
1238 dma_addr_t iova;
1239
1240 /* Make sure we can allocate and use a buffer */
1241 ds_printf(dev, s, "Allocating coherent buffer");
1242 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
1243 if (!data) {
1244 ds_printf(dev, s, " -> FAILED\n");
1245 ret = -EINVAL;
1246 } else {
1247 int i;
1248
1249 ds_printf(dev, s, " -> SUCCEEDED\n");
1250 ds_printf(dev, s, "Using coherent buffer");
1251 for (i = 0; i < 742; ++i) {
1252 int ind = SZ_1K * i;
1253 u8 *p = data + ind;
1254 u8 val = i % 255;
1255
1256 memset(data, 0xa5, size);
1257 *p = val;
1258 (*p)++;
1259 if ((*p) != val + 1) {
1260 ds_printf(dev, s,
1261 " -> FAILED on iter %d since %d != %d\n",
1262 i, *p, val + 1);
1263 ret = -EINVAL;
1264 }
1265 }
1266 if (!ret)
1267 ds_printf(dev, s, " -> SUCCEEDED\n");
1268 dma_free_coherent(dev, size, data, iova);
1269 }
1270
1271 return ret;
1272}
1273
1274static int __functional_dma_api_basic_test(struct device *dev,
1275 struct seq_file *s,
1276 struct iommu_domain *domain,
1277 void *ignored)
1278{
1279 size_t size = 1518;
1280 int i, j, ret = 0;
1281 u8 *data;
1282 dma_addr_t iova;
1283 phys_addr_t pa, pa2;
1284
1285 ds_printf(dev, s, "Basic DMA API test");
1286 /* Make sure we can allocate and use a buffer */
1287 for (i = 0; i < 1000; ++i) {
1288 data = kmalloc(size, GFP_KERNEL);
1289 if (!data) {
1290 ds_printf(dev, s, " -> FAILED\n");
1291 ret = -EINVAL;
1292 goto out;
1293 }
1294 memset(data, 0xa5, size);
1295 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
1296 pa = iommu_iova_to_phys(domain, iova);
1297 pa2 = iommu_iova_to_phys_hard(domain, iova);
1298 if (pa != pa2) {
1299 dev_err(dev,
1300 "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
1301 &pa, &pa2);
1302 ret = -EINVAL;
1303 goto out;
1304 }
1305 pa2 = virt_to_phys(data);
1306 if (pa != pa2) {
1307 dev_err(dev,
1308 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
1309 &pa, &pa2);
1310 ret = -EINVAL;
1311 goto out;
1312 }
1313 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
1314 for (j = 0; j < size; ++j) {
1315 if (data[j] != 0xa5) {
1316 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
1317 ret = -EINVAL;
1318 goto out;
1319 }
1320 }
1321 kfree(data);
1322 }
1323
1324out:
1325 if (ret)
1326 ds_printf(dev, s, " -> FAILED\n");
1327 else
1328 ds_printf(dev, s, " -> SUCCEEDED\n");
1329
1330 return ret;
1331}
1332
1333/* Creates a fresh fast mapping and applies @fn to it */
1334static int __apply_to_new_mapping(struct seq_file *s,
1335 int (*fn)(struct device *dev,
1336 struct seq_file *s,
1337 struct iommu_domain *domain,
1338 void *priv),
1339 void *priv)
1340{
1341 struct dma_iommu_mapping *mapping;
1342 struct iommu_debug_device *ddev = s->private;
1343 struct device *dev = ddev->dev;
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301344 int ret = -EINVAL, fast = 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001345 phys_addr_t pt_phys;
1346
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301347 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1348 (SZ_1G * 4ULL));
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001349 if (!mapping)
1350 goto out;
1351
1352 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
1353 seq_puts(s, "iommu_domain_set_attr failed\n");
1354 goto out_release_mapping;
1355 }
1356
1357 if (arm_iommu_attach_device(dev, mapping))
1358 goto out_release_mapping;
1359
1360 if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
1361 &pt_phys)) {
1362 ds_printf(dev, s, "Couldn't get page table base address\n");
1363 goto out_release_mapping;
1364 }
1365
1366 dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
1367 if (iommu_enable_config_clocks(mapping->domain)) {
1368 ds_printf(dev, s, "Couldn't enable clocks\n");
1369 goto out_release_mapping;
1370 }
1371 ret = fn(dev, s, mapping->domain, priv);
1372 iommu_disable_config_clocks(mapping->domain);
1373
1374 arm_iommu_detach_device(dev);
1375out_release_mapping:
1376 arm_iommu_release_mapping(mapping);
1377out:
1378 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1379 return 0;
1380}
1381
1382static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
1383 void *ignored)
1384{
1385 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
1386 int ret = 0;
1387
1388 ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
1389 ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
1390 ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
1391 return ret;
1392}
1393
1394static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
1395 struct file *file)
1396{
1397 return single_open(file, iommu_debug_functional_fast_dma_api_show,
1398 inode->i_private);
1399}
1400
1401static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
1402 .open = iommu_debug_functional_fast_dma_api_open,
1403 .read = seq_read,
1404 .llseek = seq_lseek,
1405 .release = single_release,
1406};
1407
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001408static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
1409 void *ignored)
1410{
1411 struct dma_iommu_mapping *mapping;
1412 struct iommu_debug_device *ddev = s->private;
1413 struct device *dev = ddev->dev;
1414 size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
1415 int ret = -EINVAL;
1416
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301417 /* Make the size equal to MAX_ULONG */
1418 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1419 (SZ_1G * 4ULL - 1));
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001420 if (!mapping)
1421 goto out;
1422
1423 if (arm_iommu_attach_device(dev, mapping))
1424 goto out_release_mapping;
1425
1426 ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
1427 ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
1428
1429 arm_iommu_detach_device(dev);
1430out_release_mapping:
1431 arm_iommu_release_mapping(mapping);
1432out:
1433 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1434 return 0;
1435}
1436
1437static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
1438 struct file *file)
1439{
1440 return single_open(file, iommu_debug_functional_arm_dma_api_show,
1441 inode->i_private);
1442}
1443
1444static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
1445 .open = iommu_debug_functional_arm_dma_api_open,
1446 .read = seq_read,
1447 .llseek = seq_lseek,
1448 .release = single_release,
1449};
1450
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001451static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
1452 int val, bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001453{
1454 ddev->domain = iommu_domain_alloc(&platform_bus_type);
1455 if (!ddev->domain) {
1456 pr_err("Couldn't allocate domain\n");
1457 return -ENOMEM;
1458 }
1459
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001460 if (is_secure && iommu_domain_set_attr(ddev->domain,
1461 DOMAIN_ATTR_SECURE_VMID,
1462 &val)) {
1463 pr_err("Couldn't set secure vmid to %d\n", val);
1464 goto out_domain_free;
1465 }
1466
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001467 if (iommu_attach_device(ddev->domain, ddev->dev)) {
1468 pr_err("Couldn't attach new domain to device. Is it already attached?\n");
1469 goto out_domain_free;
1470 }
1471
1472 return 0;
1473
1474out_domain_free:
1475 iommu_domain_free(ddev->domain);
1476 ddev->domain = NULL;
1477 return -EIO;
1478}
1479
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001480static ssize_t __iommu_debug_attach_write(struct file *file,
1481 const char __user *ubuf,
1482 size_t count, loff_t *offset,
1483 bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001484{
1485 struct iommu_debug_device *ddev = file->private_data;
1486 ssize_t retval;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001487 int val;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001488
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001489 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1490 pr_err("Invalid format. Expected a hex or decimal integer");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001491 retval = -EFAULT;
1492 goto out;
1493 }
1494
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001495 if (val) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001496 if (ddev->domain) {
1497 pr_err("Already attached.\n");
1498 retval = -EINVAL;
1499 goto out;
1500 }
1501 if (WARN(ddev->dev->archdata.iommu,
1502 "Attachment tracking out of sync with device\n")) {
1503 retval = -EINVAL;
1504 goto out;
1505 }
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001506 if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001507 retval = -EIO;
1508 goto out;
1509 }
1510 pr_err("Attached\n");
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001511 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001512 if (!ddev->domain) {
1513 pr_err("No domain. Did you already attach?\n");
1514 retval = -EINVAL;
1515 goto out;
1516 }
1517 iommu_detach_device(ddev->domain, ddev->dev);
1518 iommu_domain_free(ddev->domain);
1519 ddev->domain = NULL;
1520 pr_err("Detached\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001521 }
1522
1523 retval = count;
1524out:
1525 return retval;
1526}
1527
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001528static ssize_t iommu_debug_attach_write(struct file *file,
1529 const char __user *ubuf,
1530 size_t count, loff_t *offset)
1531{
1532 return __iommu_debug_attach_write(file, ubuf, count, offset,
1533 false);
1534
1535}
1536
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001537static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
1538 size_t count, loff_t *offset)
1539{
1540 struct iommu_debug_device *ddev = file->private_data;
1541 char c[2];
1542
1543 if (*offset)
1544 return 0;
1545
1546 c[0] = ddev->domain ? '1' : '0';
1547 c[1] = '\n';
1548 if (copy_to_user(ubuf, &c, 2)) {
1549 pr_err("copy_to_user failed\n");
1550 return -EFAULT;
1551 }
1552 *offset = 1; /* non-zero means we're done */
1553
1554 return 2;
1555}
1556
1557static const struct file_operations iommu_debug_attach_fops = {
1558 .open = simple_open,
1559 .write = iommu_debug_attach_write,
1560 .read = iommu_debug_attach_read,
1561};
1562
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001563static ssize_t iommu_debug_attach_write_secure(struct file *file,
1564 const char __user *ubuf,
1565 size_t count, loff_t *offset)
1566{
1567 return __iommu_debug_attach_write(file, ubuf, count, offset,
1568 true);
1569
1570}
1571
1572static const struct file_operations iommu_debug_secure_attach_fops = {
1573 .open = simple_open,
1574 .write = iommu_debug_attach_write_secure,
1575 .read = iommu_debug_attach_read,
1576};
1577
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001578static ssize_t iommu_debug_atos_write(struct file *file,
1579 const char __user *ubuf,
1580 size_t count, loff_t *offset)
1581{
1582 struct iommu_debug_device *ddev = file->private_data;
1583 dma_addr_t iova;
1584
Susheel Khiania4417e72016-07-12 11:28:32 +05301585 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001586 pr_err("Invalid format for iova\n");
1587 ddev->iova = 0;
1588 return -EINVAL;
1589 }
1590
1591 ddev->iova = iova;
1592 pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
1593 return count;
1594}
1595
1596static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
1597 size_t count, loff_t *offset)
1598{
1599 struct iommu_debug_device *ddev = file->private_data;
1600 phys_addr_t phys;
1601 char buf[100];
1602 ssize_t retval;
1603 size_t buflen;
1604
1605 if (!ddev->domain) {
1606 pr_err("No domain. Did you already attach?\n");
1607 return -EINVAL;
1608 }
1609
1610 if (*offset)
1611 return 0;
1612
1613 memset(buf, 0, 100);
1614
1615 phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
1616 if (!phys)
1617 strlcpy(buf, "FAIL\n", 100);
1618 else
1619 snprintf(buf, 100, "%pa\n", &phys);
1620
1621 buflen = strlen(buf);
1622 if (copy_to_user(ubuf, buf, buflen)) {
1623 pr_err("Couldn't copy_to_user\n");
1624 retval = -EFAULT;
1625 } else {
1626 *offset = 1; /* non-zero means we're done */
1627 retval = buflen;
1628 }
1629
1630 return retval;
1631}
1632
1633static const struct file_operations iommu_debug_atos_fops = {
1634 .open = simple_open,
1635 .write = iommu_debug_atos_write,
1636 .read = iommu_debug_atos_read,
1637};
1638
1639static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
1640 size_t count, loff_t *offset)
1641{
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301642 ssize_t retval = -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001643 int ret;
1644 char *comma1, *comma2, *comma3;
1645 char buf[100];
1646 dma_addr_t iova;
1647 phys_addr_t phys;
1648 size_t size;
1649 int prot;
1650 struct iommu_debug_device *ddev = file->private_data;
1651
1652 if (count >= 100) {
1653 pr_err("Value too large\n");
1654 return -EINVAL;
1655 }
1656
1657 if (!ddev->domain) {
1658 pr_err("No domain. Did you already attach?\n");
1659 return -EINVAL;
1660 }
1661
1662 memset(buf, 0, 100);
1663
1664 if (copy_from_user(buf, ubuf, count)) {
1665 pr_err("Couldn't copy from user\n");
1666 retval = -EFAULT;
1667 }
1668
1669 comma1 = strnchr(buf, count, ',');
1670 if (!comma1)
1671 goto invalid_format;
1672
1673 comma2 = strnchr(comma1 + 1, count, ',');
1674 if (!comma2)
1675 goto invalid_format;
1676
1677 comma3 = strnchr(comma2 + 1, count, ',');
1678 if (!comma3)
1679 goto invalid_format;
1680
1681 /* split up the words */
1682 *comma1 = *comma2 = *comma3 = '\0';
1683
Susheel Khiania4417e72016-07-12 11:28:32 +05301684 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001685 goto invalid_format;
1686
Susheel Khiania4417e72016-07-12 11:28:32 +05301687 if (kstrtoux(comma1 + 1, 0, &phys))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001688 goto invalid_format;
1689
Susheel Khiania4417e72016-07-12 11:28:32 +05301690 if (kstrtosize_t(comma2 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001691 goto invalid_format;
1692
1693 if (kstrtoint(comma3 + 1, 0, &prot))
1694 goto invalid_format;
1695
1696 ret = iommu_map(ddev->domain, iova, phys, size, prot);
1697 if (ret) {
1698 pr_err("iommu_map failed with %d\n", ret);
1699 retval = -EIO;
1700 goto out;
1701 }
1702
1703 retval = count;
1704 pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
1705 &iova, &phys, size, prot);
1706out:
1707 return retval;
1708
1709invalid_format:
1710 pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
1711 return -EINVAL;
1712}
1713
1714static const struct file_operations iommu_debug_map_fops = {
1715 .open = simple_open,
1716 .write = iommu_debug_map_write,
1717};
1718
1719static ssize_t iommu_debug_unmap_write(struct file *file,
1720 const char __user *ubuf,
1721 size_t count, loff_t *offset)
1722{
1723 ssize_t retval = 0;
1724 char *comma1;
1725 char buf[100];
1726 dma_addr_t iova;
1727 size_t size;
1728 size_t unmapped;
1729 struct iommu_debug_device *ddev = file->private_data;
1730
1731 if (count >= 100) {
1732 pr_err("Value too large\n");
1733 return -EINVAL;
1734 }
1735
1736 if (!ddev->domain) {
1737 pr_err("No domain. Did you already attach?\n");
1738 return -EINVAL;
1739 }
1740
1741 memset(buf, 0, 100);
1742
1743 if (copy_from_user(buf, ubuf, count)) {
1744 pr_err("Couldn't copy from user\n");
1745 retval = -EFAULT;
1746 goto out;
1747 }
1748
1749 comma1 = strnchr(buf, count, ',');
1750 if (!comma1)
1751 goto invalid_format;
1752
1753 /* split up the words */
1754 *comma1 = '\0';
1755
Susheel Khiania4417e72016-07-12 11:28:32 +05301756 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001757 goto invalid_format;
1758
Susheel Khiania4417e72016-07-12 11:28:32 +05301759 if (kstrtosize_t(comma1 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001760 goto invalid_format;
1761
1762 unmapped = iommu_unmap(ddev->domain, iova, size);
1763 if (unmapped != size) {
1764 pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
1765 size, unmapped);
1766 return -EIO;
1767 }
1768
1769 retval = count;
1770 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
1771out:
1772 return retval;
1773
1774invalid_format:
1775 pr_err("Invalid format. Expected: iova,len\n");
Patrick Daly5a5e3ff2016-10-13 19:31:50 -07001776 return -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001777}
1778
1779static const struct file_operations iommu_debug_unmap_fops = {
1780 .open = simple_open,
1781 .write = iommu_debug_unmap_write,
1782};
1783
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08001784static ssize_t iommu_debug_config_clocks_write(struct file *file,
1785 const char __user *ubuf,
1786 size_t count, loff_t *offset)
1787{
1788 char buf;
1789 struct iommu_debug_device *ddev = file->private_data;
1790 struct device *dev = ddev->dev;
1791
1792 /* we're expecting a single character plus (optionally) a newline */
1793 if (count > 2) {
1794 dev_err(dev, "Invalid value\n");
1795 return -EINVAL;
1796 }
1797
1798 if (!ddev->domain) {
1799 dev_err(dev, "No domain. Did you already attach?\n");
1800 return -EINVAL;
1801 }
1802
1803 if (copy_from_user(&buf, ubuf, 1)) {
1804 dev_err(dev, "Couldn't copy from user\n");
1805 return -EFAULT;
1806 }
1807
1808 switch (buf) {
1809 case '0':
1810 dev_err(dev, "Disabling config clocks\n");
1811 iommu_disable_config_clocks(ddev->domain);
1812 break;
1813 case '1':
1814 dev_err(dev, "Enabling config clocks\n");
1815 if (iommu_enable_config_clocks(ddev->domain))
1816 dev_err(dev, "Failed!\n");
1817 break;
1818 default:
1819 dev_err(dev, "Invalid value. Should be 0 or 1.\n");
1820 return -EINVAL;
1821 }
1822
1823 return count;
1824}
1825
1826static const struct file_operations iommu_debug_config_clocks_fops = {
1827 .open = simple_open,
1828 .write = iommu_debug_config_clocks_write,
1829};
1830
Patrick Daly9438f322017-04-05 18:03:19 -07001831static ssize_t iommu_debug_trigger_fault_write(
1832 struct file *file, const char __user *ubuf, size_t count,
1833 loff_t *offset)
1834{
1835 struct iommu_debug_device *ddev = file->private_data;
1836 unsigned long flags;
1837
1838 if (!ddev->domain) {
1839 pr_err("No domain. Did you already attach?\n");
1840 return -EINVAL;
1841 }
1842
1843 if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
1844 pr_err("Invalid flags format\n");
1845 return -EFAULT;
1846 }
1847
1848 iommu_trigger_fault(ddev->domain, flags);
1849
1850 return count;
1851}
1852
1853static const struct file_operations iommu_debug_trigger_fault_fops = {
1854 .open = simple_open,
1855 .write = iommu_debug_trigger_fault_write,
1856};
1857
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001858/*
1859 * The following will only work for drivers that implement the generic
1860 * device tree bindings described in
1861 * Documentation/devicetree/bindings/iommu/iommu.txt
1862 */
1863static int snarf_iommu_devices(struct device *dev, void *ignored)
1864{
1865 struct iommu_debug_device *ddev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001866 struct dentry *dir;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001867
1868 if (!of_find_property(dev->of_node, "iommus", NULL))
1869 return 0;
1870
Mitchel Humpherys89924fd2015-07-09 14:50:22 -07001871 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001872 if (!ddev)
1873 return -ENODEV;
1874 ddev->dev = dev;
1875 dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
1876 if (!dir) {
1877 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
1878 dev_name(dev));
1879 goto err;
1880 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001881
Patrick Dalye4e39862015-11-20 20:00:50 -08001882 if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
1883 &iommu_debug_nr_iters_ops)) {
1884 pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
1885 dev_name(dev));
1886 goto err_rmdir;
1887 }
1888
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001889 if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
1890 &iommu_debug_profiling_fops)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001891 pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
1892 dev_name(dev));
1893 goto err_rmdir;
1894 }
1895
Mitchel Humpherys020f90f2015-10-02 16:02:31 -07001896 if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
1897 &iommu_debug_secure_profiling_fops)) {
1898 pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
1899 dev_name(dev));
1900 goto err_rmdir;
1901 }
1902
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -07001903 if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
1904 &iommu_debug_profiling_fast_fops)) {
1905 pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
1906 dev_name(dev));
1907 goto err_rmdir;
1908 }
1909
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -07001910 if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
1911 &iommu_debug_profiling_fast_dma_api_fops)) {
1912 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
1913 dev_name(dev));
1914 goto err_rmdir;
1915 }
1916
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001917 if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
1918 &iommu_debug_functional_fast_dma_api_fops)) {
1919 pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
1920 dev_name(dev));
1921 goto err_rmdir;
1922 }
1923
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001924 if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
1925 &iommu_debug_functional_arm_dma_api_fops)) {
1926 pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
1927 dev_name(dev));
1928 goto err_rmdir;
1929 }
1930
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001931 if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
1932 &iommu_debug_attach_fops)) {
1933 pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
1934 dev_name(dev));
1935 goto err_rmdir;
1936 }
1937
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001938 if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
1939 &iommu_debug_secure_attach_fops)) {
1940 pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
1941 dev_name(dev));
1942 goto err_rmdir;
1943 }
1944
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001945 if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
1946 &iommu_debug_atos_fops)) {
1947 pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
1948 dev_name(dev));
1949 goto err_rmdir;
1950 }
1951
1952 if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
1953 &iommu_debug_map_fops)) {
1954 pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
1955 dev_name(dev));
1956 goto err_rmdir;
1957 }
1958
1959 if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
1960 &iommu_debug_unmap_fops)) {
1961 pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
1962 dev_name(dev));
1963 goto err_rmdir;
1964 }
1965
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08001966 if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
1967 &iommu_debug_config_clocks_fops)) {
1968 pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
1969 dev_name(dev));
1970 goto err_rmdir;
1971 }
1972
Patrick Daly9438f322017-04-05 18:03:19 -07001973 if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
1974 &iommu_debug_trigger_fault_fops)) {
1975 pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
1976 dev_name(dev));
1977 goto err_rmdir;
1978 }
1979
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001980 list_add(&ddev->list, &iommu_debug_devices);
1981 return 0;
1982
1983err_rmdir:
1984 debugfs_remove_recursive(dir);
1985err:
1986 kfree(ddev);
1987 return 0;
1988}
1989
1990static int iommu_debug_init_tests(void)
1991{
1992 debugfs_tests_dir = debugfs_create_dir("tests",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001993 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001994 if (!debugfs_tests_dir) {
1995 pr_err("Couldn't create iommu/tests debugfs directory\n");
1996 return -ENODEV;
1997 }
1998
1999 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
2000 snarf_iommu_devices);
2001}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002002
2003static void iommu_debug_destroy_tests(void)
2004{
2005 debugfs_remove_recursive(debugfs_tests_dir);
2006}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002007#else
2008static inline int iommu_debug_init_tests(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002009static inline void iommu_debug_destroy_tests(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002010#endif
2011
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002012/*
2013 * This isn't really a "driver", we just need something in the device tree
2014 * so that our tests can run without any client drivers, and our tests rely
2015 * on parsing the device tree for nodes with the `iommus' property.
2016 */
2017static int iommu_debug_pass(struct platform_device *pdev)
2018{
2019 return 0;
2020}
2021
2022static const struct of_device_id iommu_debug_of_match[] = {
2023 { .compatible = "iommu-debug-test" },
2024 { },
2025};
2026
2027static struct platform_driver iommu_debug_driver = {
2028 .probe = iommu_debug_pass,
2029 .remove = iommu_debug_pass,
2030 .driver = {
2031 .name = "iommu-debug",
2032 .of_match_table = iommu_debug_of_match,
2033 },
2034};
2035
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002036static int iommu_debug_init(void)
2037{
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002038 if (iommu_debug_init_tracking())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002039 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002040
2041 if (iommu_debug_init_tests())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002042 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002043
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002044 return platform_driver_register(&iommu_debug_driver);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002045}
2046
2047static void iommu_debug_exit(void)
2048{
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002049 platform_driver_unregister(&iommu_debug_driver);
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002050 iommu_debug_destroy_tracking();
2051 iommu_debug_destroy_tests();
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002052}
2053
2054module_init(iommu_debug_init);
2055module_exit(iommu_debug_exit);