blob: 1ea89013ee681698bde8e01fd81e6d86d221a5f8 [file] [log] [blame]
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001/*
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07002 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
16
17#include <linux/debugfs.h>
18#include <linux/device.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/module.h>
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -070024#include <linux/uaccess.h>
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070025#include <linux/dma-contiguous.h>
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070026#include <soc/qcom/secure_buffer.h>
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -070027#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma-iommu.h>
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070030
Susheel Khiania4417e72016-07-12 11:28:32 +053031#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
32
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070033static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
34{
35 switch (attr) {
36 case DOMAIN_ATTR_GEOMETRY:
37 return "DOMAIN_ATTR_GEOMETRY";
38 case DOMAIN_ATTR_PAGING:
39 return "DOMAIN_ATTR_PAGING";
40 case DOMAIN_ATTR_WINDOWS:
41 return "DOMAIN_ATTR_WINDOWS";
42 case DOMAIN_ATTR_FSL_PAMU_STASH:
43 return "DOMAIN_ATTR_FSL_PAMU_STASH";
44 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
45 return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
46 case DOMAIN_ATTR_FSL_PAMUV1:
47 return "DOMAIN_ATTR_FSL_PAMUV1";
48 case DOMAIN_ATTR_NESTING:
49 return "DOMAIN_ATTR_NESTING";
50 case DOMAIN_ATTR_PT_BASE_ADDR:
51 return "DOMAIN_ATTR_PT_BASE_ADDR";
52 case DOMAIN_ATTR_SECURE_VMID:
53 return "DOMAIN_ATTR_SECURE_VMID";
54 case DOMAIN_ATTR_ATOMIC:
55 return "DOMAIN_ATTR_ATOMIC";
56 case DOMAIN_ATTR_CONTEXT_BANK:
57 return "DOMAIN_ATTR_CONTEXT_BANK";
58 case DOMAIN_ATTR_TTBR0:
59 return "DOMAIN_ATTR_TTBR0";
60 case DOMAIN_ATTR_CONTEXTIDR:
61 return "DOMAIN_ATTR_CONTEXTIDR";
62 case DOMAIN_ATTR_PROCID:
63 return "DOMAIN_ATTR_PROCID";
64 case DOMAIN_ATTR_DYNAMIC:
65 return "DOMAIN_ATTR_DYNAMIC";
66 case DOMAIN_ATTR_NON_FATAL_FAULTS:
67 return "DOMAIN_ATTR_NON_FATAL_FAULTS";
68 case DOMAIN_ATTR_S1_BYPASS:
69 return "DOMAIN_ATTR_S1_BYPASS";
70 case DOMAIN_ATTR_FAST:
71 return "DOMAIN_ATTR_FAST";
72 default:
73 return "Unknown attr!";
74 }
75}
Susheel Khiania4417e72016-07-12 11:28:32 +053076#endif
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070077
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070078#ifdef CONFIG_IOMMU_DEBUG_TRACKING
79
80static DEFINE_MUTEX(iommu_debug_attachments_lock);
81static LIST_HEAD(iommu_debug_attachments);
82static struct dentry *debugfs_attachments_dir;
83
84struct iommu_debug_attachment {
85 struct iommu_domain *domain;
86 struct device *dev;
87 struct dentry *dentry;
88 struct list_head list;
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -070089 unsigned long reg_offset;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070090};
91
Mitchel Humpherys088cc582015-07-09 15:02:03 -070092static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070093{
94 struct iommu_debug_attachment *attach = s->private;
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070095 int secure_vmid;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070096
97 seq_printf(s, "Domain: 0x%p\n", attach->domain);
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070098
99 seq_puts(s, "SECURE_VMID: ");
100 if (iommu_domain_get_attr(attach->domain,
101 DOMAIN_ATTR_SECURE_VMID,
102 &secure_vmid))
103 seq_puts(s, "(Unknown)\n");
104 else
105 seq_printf(s, "%s (0x%x)\n",
106 msm_secure_vmid_to_string(secure_vmid), secure_vmid);
107
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700108 return 0;
109}
110
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700111static int iommu_debug_attachment_info_open(struct inode *inode,
112 struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700113{
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700114 return single_open(file, iommu_debug_attachment_info_show,
115 inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700116}
117
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700118static const struct file_operations iommu_debug_attachment_info_fops = {
119 .open = iommu_debug_attachment_info_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700120 .read = seq_read,
121 .llseek = seq_lseek,
122 .release = single_release,
123};
124
Mitchel Humpherys288086e2015-07-09 16:55:08 -0700125static ssize_t iommu_debug_attachment_trigger_fault_write(
126 struct file *file, const char __user *ubuf, size_t count,
127 loff_t *offset)
128{
129 struct iommu_debug_attachment *attach = file->private_data;
130 unsigned long flags;
131
132 if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
133 pr_err("Invalid flags format\n");
134 return -EFAULT;
135 }
136
137 iommu_trigger_fault(attach->domain, flags);
138
139 return count;
140}
141
142static const struct file_operations
143iommu_debug_attachment_trigger_fault_fops = {
144 .open = simple_open,
145 .write = iommu_debug_attachment_trigger_fault_write,
146};
147
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -0700148static ssize_t iommu_debug_attachment_reg_offset_write(
149 struct file *file, const char __user *ubuf, size_t count,
150 loff_t *offset)
151{
152 struct iommu_debug_attachment *attach = file->private_data;
153 unsigned long reg_offset;
154
155 if (kstrtoul_from_user(ubuf, count, 0, &reg_offset)) {
156 pr_err("Invalid reg_offset format\n");
157 return -EFAULT;
158 }
159
160 attach->reg_offset = reg_offset;
161
162 return count;
163}
164
165static const struct file_operations iommu_debug_attachment_reg_offset_fops = {
166 .open = simple_open,
167 .write = iommu_debug_attachment_reg_offset_write,
168};
169
170static ssize_t iommu_debug_attachment_reg_read_read(
171 struct file *file, char __user *ubuf, size_t count, loff_t *offset)
172{
173 struct iommu_debug_attachment *attach = file->private_data;
174 unsigned long val;
175 char *val_str;
176 ssize_t val_str_len;
177
178 if (*offset)
179 return 0;
180
181 val = iommu_reg_read(attach->domain, attach->reg_offset);
182 val_str = kasprintf(GFP_KERNEL, "0x%lx\n", val);
183 if (!val_str)
184 return -ENOMEM;
185 val_str_len = strlen(val_str);
186
187 if (copy_to_user(ubuf, val_str, val_str_len)) {
188 pr_err("copy_to_user failed\n");
189 val_str_len = -EFAULT;
190 goto out;
191 }
192 *offset = 1; /* non-zero means we're done */
193
194out:
195 kfree(val_str);
196 return val_str_len;
197}
198
199static const struct file_operations iommu_debug_attachment_reg_read_fops = {
200 .open = simple_open,
201 .read = iommu_debug_attachment_reg_read_read,
202};
203
204static ssize_t iommu_debug_attachment_reg_write_write(
205 struct file *file, const char __user *ubuf, size_t count,
206 loff_t *offset)
207{
208 struct iommu_debug_attachment *attach = file->private_data;
209 unsigned long val;
210
211 if (kstrtoul_from_user(ubuf, count, 0, &val)) {
212 pr_err("Invalid val format\n");
213 return -EFAULT;
214 }
215
216 iommu_reg_write(attach->domain, attach->reg_offset, val);
217
218 return count;
219}
220
221static const struct file_operations iommu_debug_attachment_reg_write_fops = {
222 .open = simple_open,
223 .write = iommu_debug_attachment_reg_write_write,
224};
225
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700226/* should be called with iommu_debug_attachments_lock locked */
227static int iommu_debug_attach_add_debugfs(
228 struct iommu_debug_attachment *attach)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700229{
Mitchel Humpherys54379212015-08-26 11:52:57 -0700230 const char *attach_name;
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700231 struct device *dev = attach->dev;
232 struct iommu_domain *domain = attach->domain;
Mitchel Humpherys54379212015-08-26 11:52:57 -0700233 int is_dynamic;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700234
Mitchel Humpherys54379212015-08-26 11:52:57 -0700235 if (iommu_domain_get_attr(domain, DOMAIN_ATTR_DYNAMIC, &is_dynamic))
236 is_dynamic = 0;
237
238 if (is_dynamic) {
239 uuid_le uuid;
240
241 uuid_le_gen(&uuid);
242 attach_name = kasprintf(GFP_KERNEL, "%s-%pUl", dev_name(dev),
243 uuid.b);
244 if (!attach_name)
245 return -ENOMEM;
246 } else {
247 attach_name = dev_name(dev);
248 }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700249
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700250 attach->dentry = debugfs_create_dir(attach_name,
251 debugfs_attachments_dir);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700252 if (!attach->dentry) {
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700253 pr_err("Couldn't create iommu/attachments/%s debugfs directory for domain 0x%p\n",
Mitchel Humpherys876e2be2015-07-10 11:56:56 -0700254 attach_name, domain);
Mitchel Humpherys54379212015-08-26 11:52:57 -0700255 if (is_dynamic)
256 kfree(attach_name);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700257 return -EIO;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700258 }
Mitchel Humpherys54379212015-08-26 11:52:57 -0700259
260 if (is_dynamic)
261 kfree(attach_name);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700262
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700263 if (!debugfs_create_file(
264 "info", S_IRUSR, attach->dentry, attach,
265 &iommu_debug_attachment_info_fops)) {
266 pr_err("Couldn't create iommu/attachments/%s/info debugfs file for domain 0x%p\n",
267 dev_name(dev), domain);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700268 goto err_rmdir;
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700269 }
270
Mitchel Humpherys288086e2015-07-09 16:55:08 -0700271 if (!debugfs_create_file(
272 "trigger_fault", S_IRUSR, attach->dentry, attach,
273 &iommu_debug_attachment_trigger_fault_fops)) {
274 pr_err("Couldn't create iommu/attachments/%s/trigger_fault debugfs file for domain 0x%p\n",
275 dev_name(dev), domain);
276 goto err_rmdir;
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -0700277 }
278
279 if (!debugfs_create_file(
280 "reg_offset", S_IRUSR, attach->dentry, attach,
281 &iommu_debug_attachment_reg_offset_fops)) {
282 pr_err("Couldn't create iommu/attachments/%s/reg_offset debugfs file for domain 0x%p\n",
283 dev_name(dev), domain);
284 goto err_rmdir;
285 }
286
287 if (!debugfs_create_file(
288 "reg_read", S_IRUSR, attach->dentry, attach,
289 &iommu_debug_attachment_reg_read_fops)) {
290 pr_err("Couldn't create iommu/attachments/%s/reg_read debugfs file for domain 0x%p\n",
291 dev_name(dev), domain);
292 goto err_rmdir;
293 }
294
295 if (!debugfs_create_file(
296 "reg_write", S_IRUSR, attach->dentry, attach,
297 &iommu_debug_attachment_reg_write_fops)) {
298 pr_err("Couldn't create iommu/attachments/%s/reg_write debugfs file for domain 0x%p\n",
299 dev_name(dev), domain);
300 goto err_rmdir;
301 }
302
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700303 return 0;
304
305err_rmdir:
306 debugfs_remove_recursive(attach->dentry);
307 return -EIO;
308}
309
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530310void iommu_debug_domain_add(struct iommu_domain *domain)
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700311{
312 struct iommu_debug_attachment *attach;
313
314 mutex_lock(&iommu_debug_attachments_lock);
315
316 attach = kmalloc(sizeof(*attach), GFP_KERNEL);
317 if (!attach)
318 goto out_unlock;
319
320 attach->domain = domain;
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530321 attach->dev = NULL;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700322 list_add(&attach->list, &iommu_debug_attachments);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530323
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700324out_unlock:
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700325 mutex_unlock(&iommu_debug_attachments_lock);
326}
327
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530328void iommu_debug_domain_remove(struct iommu_domain *domain)
329{
330 struct iommu_debug_attachment *it;
331
332 mutex_lock(&iommu_debug_attachments_lock);
333 list_for_each_entry(it, &iommu_debug_attachments, list)
334 if (it->domain == domain && it->dev == NULL)
335 break;
336
337 if (&it->list == &iommu_debug_attachments) {
338 WARN(1, "Couldn't find debug attachment for domain=0x%p",
339 domain);
340 } else {
341 list_del(&it->list);
342 kfree(it);
343 }
344 mutex_unlock(&iommu_debug_attachments_lock);
345}
346
347void iommu_debug_attach_device(struct iommu_domain *domain,
348 struct device *dev)
349{
350 struct iommu_debug_attachment *attach;
351
352 mutex_lock(&iommu_debug_attachments_lock);
353
354 list_for_each_entry(attach, &iommu_debug_attachments, list)
355 if (attach->domain == domain && attach->dev == NULL)
356 break;
357
358 if (&attach->list == &iommu_debug_attachments) {
359 WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
360 domain, dev_name(dev));
361 } else {
362 attach->dev = dev;
363
364 /*
365 * we might not init until after other drivers start calling
366 * iommu_attach_device. Only set up the debugfs nodes if we've
367 * already init'd to avoid polluting the top-level debugfs
368 * directory (by calling debugfs_create_dir with a NULL
369 * parent). These will be flushed out later once we init.
370 */
371
372 if (debugfs_attachments_dir)
373 iommu_debug_attach_add_debugfs(attach);
374 }
375
376 mutex_unlock(&iommu_debug_attachments_lock);
377}
378
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700379void iommu_debug_detach_device(struct iommu_domain *domain,
380 struct device *dev)
381{
382 struct iommu_debug_attachment *it;
383
384 mutex_lock(&iommu_debug_attachments_lock);
385 list_for_each_entry(it, &iommu_debug_attachments, list)
386 if (it->domain == domain && it->dev == dev)
387 break;
388
389 if (&it->list == &iommu_debug_attachments) {
390 WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
391 domain, dev_name(dev));
392 } else {
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530393 /*
394 * Just remove debugfs entry and mark dev as NULL on
395 * iommu_detach call. We would remove the actual
396 * attachment entry from the list only on domain_free call.
397 * This is to ensure we keep track of unattached domains too.
398 */
399
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700400 debugfs_remove_recursive(it->dentry);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530401 it->dev = NULL;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700402 }
403 mutex_unlock(&iommu_debug_attachments_lock);
404}
405
406static int iommu_debug_init_tracking(void)
407{
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700408 int ret = 0;
409 struct iommu_debug_attachment *attach;
410
411 mutex_lock(&iommu_debug_attachments_lock);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700412 debugfs_attachments_dir = debugfs_create_dir("attachments",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700413 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700414 if (!debugfs_attachments_dir) {
415 pr_err("Couldn't create iommu/attachments debugfs directory\n");
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700416 ret = -ENODEV;
417 goto out_unlock;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700418 }
419
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700420 /* set up debugfs entries for attachments made during early boot */
421 list_for_each_entry(attach, &iommu_debug_attachments, list)
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530422 if (attach->dev)
423 iommu_debug_attach_add_debugfs(attach);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700424
425out_unlock:
426 mutex_unlock(&iommu_debug_attachments_lock);
427 return ret;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700428}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700429
430static void iommu_debug_destroy_tracking(void)
431{
432 debugfs_remove_recursive(debugfs_attachments_dir);
433}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700434#else
435static inline int iommu_debug_init_tracking(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700436static inline void iommu_debug_destroy_tracking(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700437#endif
438
439#ifdef CONFIG_IOMMU_TESTS
440
Susheel Khiania4417e72016-07-12 11:28:32 +0530441#ifdef CONFIG_64BIT
442
443#define kstrtoux kstrtou64
444#define kstrtox_from_user kstrtoll_from_user
445#define kstrtosize_t kstrtoul
446
447#else
448
449#define kstrtoux kstrtou32
450#define kstrtox_from_user kstrtoint_from_user
451#define kstrtosize_t kstrtouint
452
453#endif
454
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700455static LIST_HEAD(iommu_debug_devices);
456static struct dentry *debugfs_tests_dir;
Patrick Dalye4e39862015-11-20 20:00:50 -0800457static u32 iters_per_op = 1;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700458
459struct iommu_debug_device {
460 struct device *dev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700461 struct iommu_domain *domain;
462 u64 iova;
463 u64 phys;
464 size_t len;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700465 struct list_head list;
466};
467
468static int iommu_debug_build_phoney_sg_table(struct device *dev,
469 struct sg_table *table,
470 unsigned long total_size,
471 unsigned long chunk_size)
472{
473 unsigned long nents = total_size / chunk_size;
474 struct scatterlist *sg;
475 int i;
476 struct page *page;
477
478 if (!IS_ALIGNED(total_size, PAGE_SIZE))
479 return -EINVAL;
480 if (!IS_ALIGNED(total_size, chunk_size))
481 return -EINVAL;
482 if (sg_alloc_table(table, nents, GFP_KERNEL))
483 return -EINVAL;
484 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
485 if (!page)
486 goto free_table;
487
488 /* all the same page... why not. */
489 for_each_sg(table->sgl, sg, table->nents, i)
490 sg_set_page(sg, page, chunk_size, 0);
491
492 return 0;
493
494free_table:
495 sg_free_table(table);
496 return -ENOMEM;
497}
498
499static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
500 struct sg_table *table,
501 unsigned long chunk_size)
502{
503 __free_pages(sg_page(table->sgl), get_order(chunk_size));
504 sg_free_table(table);
505}
506
507static const char * const _size_to_string(unsigned long size)
508{
509 switch (size) {
510 case SZ_4K:
511 return "4K";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700512 case SZ_8K:
513 return "8K";
514 case SZ_16K:
515 return "16K";
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700516 case SZ_64K:
517 return "64K";
518 case SZ_2M:
519 return "2M";
520 case SZ_1M * 12:
521 return "12M";
522 case SZ_1M * 20:
523 return "20M";
524 }
525 return "unknown size, please add to _size_to_string";
526}
527
Patrick Dalye4e39862015-11-20 20:00:50 -0800528static int nr_iters_set(void *data, u64 val)
529{
530 if (!val)
531 val = 1;
532 if (val > 10000)
533 val = 10000;
534 *(u32 *)data = val;
535 return 0;
536}
537
538static int nr_iters_get(void *data, u64 *val)
539{
540 *val = *(u32 *)data;
541 return 0;
542}
543
544DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
545 nr_iters_get, nr_iters_set, "%llu\n");
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700546
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700547static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700548 enum iommu_attr attrs[],
549 void *attr_values[], int nattrs,
Susheel Khiania4417e72016-07-12 11:28:32 +0530550 const size_t sizes[])
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700551{
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700552 int i;
Susheel Khiania4417e72016-07-12 11:28:32 +0530553 const size_t *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700554 struct iommu_domain *domain;
555 unsigned long iova = 0x10000;
556 phys_addr_t paddr = 0xa000;
557
558 domain = iommu_domain_alloc(&platform_bus_type);
559 if (!domain) {
560 seq_puts(s, "Couldn't allocate domain\n");
561 return;
562 }
563
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700564 seq_puts(s, "Domain attributes: [ ");
565 for (i = 0; i < nattrs; ++i) {
566 /* not all attrs are ints, but this will get us by for now */
567 seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
568 *((int *)attr_values[i]),
569 i < nattrs ? " " : "");
Mitchel Humpherys679567c2015-08-28 10:51:24 -0700570 }
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700571 seq_puts(s, "]\n");
572 for (i = 0; i < nattrs; ++i) {
573 if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
574 seq_printf(s, "Couldn't set %d to the value at %p\n",
575 attrs[i], attr_values[i]);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700576 goto out_domain_free;
577 }
578 }
579
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700580 if (iommu_attach_device(domain, dev)) {
581 seq_puts(s,
582 "Couldn't attach new domain to device. Is it already attached?\n");
583 goto out_domain_free;
584 }
585
Patrick Dalye4e39862015-11-20 20:00:50 -0800586 seq_printf(s, "(average over %d iterations)\n", iters_per_op);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800587 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700588 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530589 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700590 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800591 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700592 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800593 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700594 struct timespec tbefore, tafter, diff;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700595 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700596
Patrick Dalye4e39862015-11-20 20:00:50 -0800597 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700598 getnstimeofday(&tbefore);
599 if (iommu_map(domain, iova, paddr, size,
600 IOMMU_READ | IOMMU_WRITE)) {
601 seq_puts(s, "Failed to map\n");
602 continue;
603 }
604 getnstimeofday(&tafter);
605 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800606 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700607
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700608 getnstimeofday(&tbefore);
609 unmapped = iommu_unmap(domain, iova, size);
610 if (unmapped != size) {
611 seq_printf(s,
612 "Only unmapped %zx instead of %zx\n",
613 unmapped, size);
614 continue;
615 }
616 getnstimeofday(&tafter);
617 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800618 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700619 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700620
Susheel Khiania4417e72016-07-12 11:28:32 +0530621 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
622 &map_elapsed_rem);
623 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
624 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700625
Patrick Daly3ca31e32015-11-20 20:33:04 -0800626 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
627 &map_elapsed_rem);
628 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
629 &unmap_elapsed_rem);
630
631 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
632 _size_to_string(size),
633 map_elapsed_us, map_elapsed_rem,
634 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700635 }
636
637 seq_putc(s, '\n');
Patrick Daly3ca31e32015-11-20 20:33:04 -0800638 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700639 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530640 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700641 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800642 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700643 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800644 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700645 struct timespec tbefore, tafter, diff;
646 struct sg_table table;
647 unsigned long chunk_size = SZ_4K;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700648 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700649
650 if (iommu_debug_build_phoney_sg_table(dev, &table, size,
651 chunk_size)) {
652 seq_puts(s,
653 "couldn't build phoney sg table! bailing...\n");
654 goto out_detach;
655 }
656
Patrick Dalye4e39862015-11-20 20:00:50 -0800657 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700658 getnstimeofday(&tbefore);
659 if (iommu_map_sg(domain, iova, table.sgl, table.nents,
660 IOMMU_READ | IOMMU_WRITE) != size) {
661 seq_puts(s, "Failed to map_sg\n");
662 goto next;
663 }
664 getnstimeofday(&tafter);
665 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800666 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700667
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700668 getnstimeofday(&tbefore);
669 unmapped = iommu_unmap(domain, iova, size);
670 if (unmapped != size) {
671 seq_printf(s,
672 "Only unmapped %zx instead of %zx\n",
673 unmapped, size);
674 goto next;
675 }
676 getnstimeofday(&tafter);
677 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800678 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700679 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700680
Susheel Khiania4417e72016-07-12 11:28:32 +0530681 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
682 &map_elapsed_rem);
683 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
684 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700685
Patrick Daly3ca31e32015-11-20 20:33:04 -0800686 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
687 &map_elapsed_rem);
688 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
689 &unmap_elapsed_rem);
690
691 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
692 _size_to_string(size),
693 map_elapsed_us, map_elapsed_rem,
694 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700695
696next:
697 iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
698 }
699
700out_detach:
701 iommu_detach_device(domain, dev);
702out_domain_free:
703 iommu_domain_free(domain);
704}
705
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700706static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700707{
708 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530709 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700710 SZ_1M * 20, 0 };
711 enum iommu_attr attrs[] = {
712 DOMAIN_ATTR_ATOMIC,
713 };
714 int htw_disable = 1, atomic = 1;
715 void *attr_values[] = { &htw_disable, &atomic };
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700716
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700717 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
718 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700719
720 return 0;
721}
722
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700723static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700724{
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700725 return single_open(file, iommu_debug_profiling_show, inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700726}
727
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700728static const struct file_operations iommu_debug_profiling_fops = {
729 .open = iommu_debug_profiling_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700730 .read = seq_read,
731 .llseek = seq_lseek,
732 .release = single_release,
733};
734
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700735static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
736{
737 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530738 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700739 SZ_1M * 20, 0 };
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700740
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700741 enum iommu_attr attrs[] = {
742 DOMAIN_ATTR_ATOMIC,
743 DOMAIN_ATTR_SECURE_VMID,
744 };
745 int one = 1, secure_vmid = VMID_CP_PIXEL;
746 void *attr_values[] = { &one, &secure_vmid };
747
748 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
749 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700750
751 return 0;
752}
753
754static int iommu_debug_secure_profiling_open(struct inode *inode,
755 struct file *file)
756{
757 return single_open(file, iommu_debug_secure_profiling_show,
758 inode->i_private);
759}
760
761static const struct file_operations iommu_debug_secure_profiling_fops = {
762 .open = iommu_debug_secure_profiling_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700768static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
769{
770 struct iommu_debug_device *ddev = s->private;
771 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
772 enum iommu_attr attrs[] = {
773 DOMAIN_ATTR_FAST,
774 DOMAIN_ATTR_ATOMIC,
775 };
776 int one = 1;
777 void *attr_values[] = { &one, &one };
778
779 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
780 ARRAY_SIZE(attrs), sizes);
781
782 return 0;
783}
784
785static int iommu_debug_profiling_fast_open(struct inode *inode,
786 struct file *file)
787{
788 return single_open(file, iommu_debug_profiling_fast_show,
789 inode->i_private);
790}
791
792static const struct file_operations iommu_debug_profiling_fast_fops = {
793 .open = iommu_debug_profiling_fast_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700799static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
800 void *ignored)
801{
802 int i, experiment;
803 struct iommu_debug_device *ddev = s->private;
804 struct device *dev = ddev->dev;
805 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
806 struct dma_iommu_mapping *mapping;
807 dma_addr_t dma_addr;
808 void *virt;
809 int fast = 1;
810 const char * const extra_labels[] = {
811 "not coherent",
812 "coherent",
813 };
814 unsigned long extra_attrs[] = {
815 0,
816 DMA_ATTR_SKIP_CPU_SYNC,
817 };
818
819 virt = kmalloc(1518, GFP_KERNEL);
820 if (!virt)
821 goto out;
822
Susheel Khiania4417e72016-07-12 11:28:32 +0530823 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700824 if (!mapping) {
825 seq_puts(s, "fast_smmu_create_mapping failed\n");
826 goto out_kfree;
827 }
828
829 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
830 seq_puts(s, "iommu_domain_set_attr failed\n");
831 goto out_release_mapping;
832 }
833
834 if (arm_iommu_attach_device(dev, mapping)) {
835 seq_puts(s, "fast_smmu_attach_device failed\n");
836 goto out_release_mapping;
837 }
838
839 if (iommu_enable_config_clocks(mapping->domain)) {
840 seq_puts(s, "Couldn't enable clocks\n");
841 goto out_detach;
842 }
843 for (experiment = 0; experiment < 2; ++experiment) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530844 size_t map_avg = 0, unmap_avg = 0;
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700845
846 for (i = 0; i < 10; ++i) {
847 struct timespec tbefore, tafter, diff;
848 u64 ns;
849
850 getnstimeofday(&tbefore);
851 dma_addr = dma_map_single_attrs(
852 dev, virt, SZ_4K, DMA_TO_DEVICE,
853 extra_attrs[experiment]);
854 getnstimeofday(&tafter);
855 diff = timespec_sub(tafter, tbefore);
856 ns = timespec_to_ns(&diff);
857 if (dma_mapping_error(dev, dma_addr)) {
858 seq_puts(s, "dma_map_single failed\n");
859 goto out_disable_config_clocks;
860 }
861 map_elapsed_ns[i] = ns;
862
863 getnstimeofday(&tbefore);
864 dma_unmap_single_attrs(
865 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
866 extra_attrs[experiment]);
867 getnstimeofday(&tafter);
868 diff = timespec_sub(tafter, tbefore);
869 ns = timespec_to_ns(&diff);
870 unmap_elapsed_ns[i] = ns;
871 }
872
873 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
874 "dma_map_single_attrs");
875 for (i = 0; i < 10; ++i) {
876 map_avg += map_elapsed_ns[i];
877 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
878 i < 9 ? ", " : "");
879 }
880 map_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530881 seq_printf(s, "] (avg: %zu)\n", map_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700882
883 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
884 "dma_unmap_single_attrs");
885 for (i = 0; i < 10; ++i) {
886 unmap_avg += unmap_elapsed_ns[i];
887 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
888 i < 9 ? ", " : "");
889 }
890 unmap_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530891 seq_printf(s, "] (avg: %zu)\n", unmap_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700892 }
893
894out_disable_config_clocks:
895 iommu_disable_config_clocks(mapping->domain);
896out_detach:
897 arm_iommu_detach_device(dev);
898out_release_mapping:
899 arm_iommu_release_mapping(mapping);
900out_kfree:
901 kfree(virt);
902out:
903 return 0;
904}
905
906static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
907 struct file *file)
908{
909 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
910 inode->i_private);
911}
912
913static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
914 .open = iommu_debug_profiling_fast_dma_api_open,
915 .read = seq_read,
916 .llseek = seq_lseek,
917 .release = single_release,
918};
919
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800920static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
921{
922 int i, ret = 0;
923 unsigned long iova;
924 const unsigned long max = SZ_1G * 4UL;
925 void *virt;
926 phys_addr_t phys;
927 dma_addr_t dma_addr;
928
929 /*
930 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
931 * chunk that we can work with.
932 */
933 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
934 phys = virt_to_phys(virt);
935
936 /* fill the whole 4GB space */
937 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
938 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
939 if (dma_addr == DMA_ERROR_CODE) {
940 dev_err(dev, "Failed map on iter %d\n", i);
941 ret = -EINVAL;
942 goto out;
943 }
944 }
945
946 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
947 dev_err(dev,
948 "dma_map_single unexpectedly (VA should have been exhausted)\n");
949 ret = -EINVAL;
950 goto out;
951 }
952
953 /*
954 * free up 4K at the very beginning, then leave one 4K mapping,
955 * then free up 8K. This will result in the next 8K map to skip
956 * over the 4K hole and take the 8K one.
957 */
958 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
959 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
960 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
961
962 /* remap 8K */
963 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
964 if (dma_addr != SZ_8K) {
965 dma_addr_t expected = SZ_8K;
966
967 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
968 &dma_addr, &expected);
969 ret = -EINVAL;
970 goto out;
971 }
972
973 /*
974 * now remap 4K. We should get the first 4K chunk that was skipped
975 * over during the previous 8K map. If we missed a TLB invalidate
976 * at that point this should explode.
977 */
978 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
979 if (dma_addr != 0) {
980 dma_addr_t expected = 0;
981
982 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
983 &dma_addr, &expected);
984 ret = -EINVAL;
985 goto out;
986 }
987
988 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
989 dev_err(dev,
990 "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
991 ret = -EINVAL;
992 goto out;
993 }
994
995 /* we're all full again. unmap everything. */
996 for (dma_addr = 0; dma_addr < max; dma_addr += SZ_8K)
997 dma_unmap_single(dev, dma_addr, SZ_8K, DMA_TO_DEVICE);
998
999out:
1000 free_pages((unsigned long)virt, get_order(SZ_8K));
1001 return ret;
1002}
1003
1004struct fib_state {
1005 unsigned long cur;
1006 unsigned long prev;
1007};
1008
1009static void fib_init(struct fib_state *f)
1010{
1011 f->cur = f->prev = 1;
1012}
1013
1014static unsigned long get_next_fib(struct fib_state *f)
1015{
1016 int next = f->cur + f->prev;
1017
1018 f->prev = f->cur;
1019 f->cur = next;
1020 return next;
1021}
1022
1023/*
1024 * Not actually random. Just testing the fibs (and max - the fibs).
1025 */
1026static int __rand_va_sweep(struct device *dev, struct seq_file *s,
1027 const size_t size)
1028{
1029 u64 iova;
1030 const unsigned long max = SZ_1G * 4UL;
1031 int i, remapped, unmapped, ret = 0;
1032 void *virt;
1033 dma_addr_t dma_addr, dma_addr2;
1034 struct fib_state fib;
1035
1036 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
1037 if (!virt) {
1038 if (size > SZ_8K) {
1039 dev_err(dev,
1040 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
1041 _size_to_string(size));
1042 return 0;
1043 }
1044 return -ENOMEM;
1045 }
1046
1047 /* fill the whole 4GB space */
1048 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
1049 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1050 if (dma_addr == DMA_ERROR_CODE) {
1051 dev_err(dev, "Failed map on iter %d\n", i);
1052 ret = -EINVAL;
1053 goto out;
1054 }
1055 }
1056
1057 /* now unmap "random" iovas */
1058 unmapped = 0;
1059 fib_init(&fib);
1060 for (iova = get_next_fib(&fib) * size;
1061 iova < max - size;
1062 iova = get_next_fib(&fib) * size) {
1063 dma_addr = iova;
1064 dma_addr2 = max - size - iova;
1065 if (dma_addr == dma_addr2) {
1066 WARN(1,
1067 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
1068 __func__);
1069 return -EINVAL;
1070 }
1071 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
1072 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
1073 unmapped += 2;
1074 }
1075
1076 /* and map until everything fills back up */
1077 for (remapped = 0; ; ++remapped) {
1078 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1079 if (dma_addr == DMA_ERROR_CODE)
1080 break;
1081 }
1082
1083 if (unmapped != remapped) {
1084 dev_err(dev,
1085 "Unexpected random remap count! Unmapped %d but remapped %d\n",
1086 unmapped, remapped);
1087 ret = -EINVAL;
1088 }
1089
1090 for (dma_addr = 0; dma_addr < max; dma_addr += size)
1091 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
1092
1093out:
1094 free_pages((unsigned long)virt, get_order(size));
1095 return ret;
1096}
1097
1098static int __check_mapping(struct device *dev, struct iommu_domain *domain,
1099 dma_addr_t iova, phys_addr_t expected)
1100{
1101 phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
1102 phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
1103
1104 WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
1105
1106 if (res != expected) {
1107 dev_err_ratelimited(dev,
1108 "Bad translation for %pa! Expected: %pa Got: %pa\n",
1109 &iova, &expected, &res);
1110 return -EINVAL;
1111 }
1112
1113 return 0;
1114}
1115
1116static int __full_va_sweep(struct device *dev, struct seq_file *s,
1117 const size_t size, struct iommu_domain *domain)
1118{
1119 unsigned long iova;
1120 dma_addr_t dma_addr;
1121 void *virt;
1122 phys_addr_t phys;
1123 int ret = 0, i;
1124
1125 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
1126 if (!virt) {
1127 if (size > SZ_8K) {
1128 dev_err(dev,
1129 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
1130 _size_to_string(size));
1131 return 0;
1132 }
1133 return -ENOMEM;
1134 }
1135 phys = virt_to_phys(virt);
1136
1137 for (iova = 0, i = 0; iova < SZ_1G * 4UL; iova += size, ++i) {
1138 unsigned long expected = iova;
1139
1140 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1141 if (dma_addr != expected) {
1142 dev_err_ratelimited(dev,
1143 "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
1144 i, expected,
1145 (unsigned long)dma_addr);
1146 ret = -EINVAL;
1147 goto out;
1148 }
1149 }
1150
1151 if (domain) {
1152 /* check every mapping from 0..6M */
1153 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
1154 phys_addr_t expected = phys;
1155
1156 if (__check_mapping(dev, domain, iova, expected)) {
1157 dev_err(dev, "iter: %d\n", i);
1158 ret = -EINVAL;
1159 goto out;
1160 }
1161 }
1162 /* and from 4G..4G-6M */
1163 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
1164 phys_addr_t expected = phys;
1165 unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
1166
1167 if (__check_mapping(dev, domain, theiova, expected)) {
1168 dev_err(dev, "iter: %d\n", i);
1169 ret = -EINVAL;
1170 goto out;
1171 }
1172 }
1173 }
1174
1175 /* at this point, our VA space should be full */
1176 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1177 if (dma_addr != DMA_ERROR_CODE) {
1178 dev_err_ratelimited(dev,
1179 "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
1180 (unsigned long)dma_addr);
1181 ret = -EINVAL;
1182 }
1183
1184out:
1185 for (dma_addr = 0; dma_addr < SZ_1G * 4UL; dma_addr += size)
1186 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
1187
1188 free_pages((unsigned long)virt, get_order(size));
1189 return ret;
1190}
1191
1192#define ds_printf(d, s, fmt, ...) ({ \
1193 dev_err(d, fmt, ##__VA_ARGS__); \
1194 seq_printf(s, fmt, ##__VA_ARGS__); \
1195 })
1196
1197static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
1198 struct iommu_domain *domain, void *priv)
1199{
1200 int i, j, ret = 0;
1201 size_t *sz, *sizes = priv;
1202
1203 for (j = 0; j < 1; ++j) {
1204 for (sz = sizes; *sz; ++sz) {
1205 for (i = 0; i < 2; ++i) {
1206 ds_printf(dev, s, "Full VA sweep @%s %d",
1207 _size_to_string(*sz), i);
1208 if (__full_va_sweep(dev, s, *sz, domain)) {
1209 ds_printf(dev, s, " -> FAILED\n");
1210 ret = -EINVAL;
1211 } else {
1212 ds_printf(dev, s, " -> SUCCEEDED\n");
1213 }
1214 }
1215 }
1216 }
1217
1218 ds_printf(dev, s, "bonus map:");
1219 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
1220 ds_printf(dev, s, " -> FAILED\n");
1221 ret = -EINVAL;
1222 } else {
1223 ds_printf(dev, s, " -> SUCCEEDED\n");
1224 }
1225
1226 for (sz = sizes; *sz; ++sz) {
1227 for (i = 0; i < 2; ++i) {
1228 ds_printf(dev, s, "Rand VA sweep @%s %d",
1229 _size_to_string(*sz), i);
1230 if (__rand_va_sweep(dev, s, *sz)) {
1231 ds_printf(dev, s, " -> FAILED\n");
1232 ret = -EINVAL;
1233 } else {
1234 ds_printf(dev, s, " -> SUCCEEDED\n");
1235 }
1236 }
1237 }
1238
1239 ds_printf(dev, s, "TLB stress sweep");
1240 if (__tlb_stress_sweep(dev, s)) {
1241 ds_printf(dev, s, " -> FAILED\n");
1242 ret = -EINVAL;
1243 } else {
1244 ds_printf(dev, s, " -> SUCCEEDED\n");
1245 }
1246
1247 ds_printf(dev, s, "second bonus map:");
1248 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
1249 ds_printf(dev, s, " -> FAILED\n");
1250 ret = -EINVAL;
1251 } else {
1252 ds_printf(dev, s, " -> SUCCEEDED\n");
1253 }
1254
1255 return ret;
1256}
1257
1258static int __functional_dma_api_alloc_test(struct device *dev,
1259 struct seq_file *s,
1260 struct iommu_domain *domain,
1261 void *ignored)
1262{
1263 size_t size = SZ_1K * 742;
1264 int ret = 0;
1265 u8 *data;
1266 dma_addr_t iova;
1267
1268 /* Make sure we can allocate and use a buffer */
1269 ds_printf(dev, s, "Allocating coherent buffer");
1270 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
1271 if (!data) {
1272 ds_printf(dev, s, " -> FAILED\n");
1273 ret = -EINVAL;
1274 } else {
1275 int i;
1276
1277 ds_printf(dev, s, " -> SUCCEEDED\n");
1278 ds_printf(dev, s, "Using coherent buffer");
1279 for (i = 0; i < 742; ++i) {
1280 int ind = SZ_1K * i;
1281 u8 *p = data + ind;
1282 u8 val = i % 255;
1283
1284 memset(data, 0xa5, size);
1285 *p = val;
1286 (*p)++;
1287 if ((*p) != val + 1) {
1288 ds_printf(dev, s,
1289 " -> FAILED on iter %d since %d != %d\n",
1290 i, *p, val + 1);
1291 ret = -EINVAL;
1292 }
1293 }
1294 if (!ret)
1295 ds_printf(dev, s, " -> SUCCEEDED\n");
1296 dma_free_coherent(dev, size, data, iova);
1297 }
1298
1299 return ret;
1300}
1301
1302static int __functional_dma_api_basic_test(struct device *dev,
1303 struct seq_file *s,
1304 struct iommu_domain *domain,
1305 void *ignored)
1306{
1307 size_t size = 1518;
1308 int i, j, ret = 0;
1309 u8 *data;
1310 dma_addr_t iova;
1311 phys_addr_t pa, pa2;
1312
1313 ds_printf(dev, s, "Basic DMA API test");
1314 /* Make sure we can allocate and use a buffer */
1315 for (i = 0; i < 1000; ++i) {
1316 data = kmalloc(size, GFP_KERNEL);
1317 if (!data) {
1318 ds_printf(dev, s, " -> FAILED\n");
1319 ret = -EINVAL;
1320 goto out;
1321 }
1322 memset(data, 0xa5, size);
1323 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
1324 pa = iommu_iova_to_phys(domain, iova);
1325 pa2 = iommu_iova_to_phys_hard(domain, iova);
1326 if (pa != pa2) {
1327 dev_err(dev,
1328 "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
1329 &pa, &pa2);
1330 ret = -EINVAL;
1331 goto out;
1332 }
1333 pa2 = virt_to_phys(data);
1334 if (pa != pa2) {
1335 dev_err(dev,
1336 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
1337 &pa, &pa2);
1338 ret = -EINVAL;
1339 goto out;
1340 }
1341 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
1342 for (j = 0; j < size; ++j) {
1343 if (data[j] != 0xa5) {
1344 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
1345 ret = -EINVAL;
1346 goto out;
1347 }
1348 }
1349 kfree(data);
1350 }
1351
1352out:
1353 if (ret)
1354 ds_printf(dev, s, " -> FAILED\n");
1355 else
1356 ds_printf(dev, s, " -> SUCCEEDED\n");
1357
1358 return ret;
1359}
1360
1361/* Creates a fresh fast mapping and applies @fn to it */
1362static int __apply_to_new_mapping(struct seq_file *s,
1363 int (*fn)(struct device *dev,
1364 struct seq_file *s,
1365 struct iommu_domain *domain,
1366 void *priv),
1367 void *priv)
1368{
1369 struct dma_iommu_mapping *mapping;
1370 struct iommu_debug_device *ddev = s->private;
1371 struct device *dev = ddev->dev;
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301372 int ret = -EINVAL, fast = 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001373 phys_addr_t pt_phys;
1374
Susheel Khiania4417e72016-07-12 11:28:32 +05301375 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001376 if (!mapping)
1377 goto out;
1378
1379 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
1380 seq_puts(s, "iommu_domain_set_attr failed\n");
1381 goto out_release_mapping;
1382 }
1383
1384 if (arm_iommu_attach_device(dev, mapping))
1385 goto out_release_mapping;
1386
1387 if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
1388 &pt_phys)) {
1389 ds_printf(dev, s, "Couldn't get page table base address\n");
1390 goto out_release_mapping;
1391 }
1392
1393 dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
1394 if (iommu_enable_config_clocks(mapping->domain)) {
1395 ds_printf(dev, s, "Couldn't enable clocks\n");
1396 goto out_release_mapping;
1397 }
1398 ret = fn(dev, s, mapping->domain, priv);
1399 iommu_disable_config_clocks(mapping->domain);
1400
1401 arm_iommu_detach_device(dev);
1402out_release_mapping:
1403 arm_iommu_release_mapping(mapping);
1404out:
1405 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1406 return 0;
1407}
1408
1409static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
1410 void *ignored)
1411{
1412 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
1413 int ret = 0;
1414
1415 ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
1416 ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
1417 ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
1418 return ret;
1419}
1420
1421static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
1422 struct file *file)
1423{
1424 return single_open(file, iommu_debug_functional_fast_dma_api_show,
1425 inode->i_private);
1426}
1427
1428static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
1429 .open = iommu_debug_functional_fast_dma_api_open,
1430 .read = seq_read,
1431 .llseek = seq_lseek,
1432 .release = single_release,
1433};
1434
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001435static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
1436 void *ignored)
1437{
1438 struct dma_iommu_mapping *mapping;
1439 struct iommu_debug_device *ddev = s->private;
1440 struct device *dev = ddev->dev;
1441 size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
1442 int ret = -EINVAL;
1443
1444 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
1445 if (!mapping)
1446 goto out;
1447
1448 if (arm_iommu_attach_device(dev, mapping))
1449 goto out_release_mapping;
1450
1451 ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
1452 ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
1453
1454 arm_iommu_detach_device(dev);
1455out_release_mapping:
1456 arm_iommu_release_mapping(mapping);
1457out:
1458 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1459 return 0;
1460}
1461
1462static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
1463 struct file *file)
1464{
1465 return single_open(file, iommu_debug_functional_arm_dma_api_show,
1466 inode->i_private);
1467}
1468
1469static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
1470 .open = iommu_debug_functional_arm_dma_api_open,
1471 .read = seq_read,
1472 .llseek = seq_lseek,
1473 .release = single_release,
1474};
1475
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001476static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
1477 int val, bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001478{
1479 ddev->domain = iommu_domain_alloc(&platform_bus_type);
1480 if (!ddev->domain) {
1481 pr_err("Couldn't allocate domain\n");
1482 return -ENOMEM;
1483 }
1484
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001485 if (is_secure && iommu_domain_set_attr(ddev->domain,
1486 DOMAIN_ATTR_SECURE_VMID,
1487 &val)) {
1488 pr_err("Couldn't set secure vmid to %d\n", val);
1489 goto out_domain_free;
1490 }
1491
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001492 if (iommu_attach_device(ddev->domain, ddev->dev)) {
1493 pr_err("Couldn't attach new domain to device. Is it already attached?\n");
1494 goto out_domain_free;
1495 }
1496
1497 return 0;
1498
1499out_domain_free:
1500 iommu_domain_free(ddev->domain);
1501 ddev->domain = NULL;
1502 return -EIO;
1503}
1504
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001505static ssize_t __iommu_debug_attach_write(struct file *file,
1506 const char __user *ubuf,
1507 size_t count, loff_t *offset,
1508 bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001509{
1510 struct iommu_debug_device *ddev = file->private_data;
1511 ssize_t retval;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001512 int val;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001513
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001514 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1515 pr_err("Invalid format. Expected a hex or decimal integer");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001516 retval = -EFAULT;
1517 goto out;
1518 }
1519
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001520 if (val) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001521 if (ddev->domain) {
1522 pr_err("Already attached.\n");
1523 retval = -EINVAL;
1524 goto out;
1525 }
1526 if (WARN(ddev->dev->archdata.iommu,
1527 "Attachment tracking out of sync with device\n")) {
1528 retval = -EINVAL;
1529 goto out;
1530 }
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001531 if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001532 retval = -EIO;
1533 goto out;
1534 }
1535 pr_err("Attached\n");
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001536 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001537 if (!ddev->domain) {
1538 pr_err("No domain. Did you already attach?\n");
1539 retval = -EINVAL;
1540 goto out;
1541 }
1542 iommu_detach_device(ddev->domain, ddev->dev);
1543 iommu_domain_free(ddev->domain);
1544 ddev->domain = NULL;
1545 pr_err("Detached\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001546 }
1547
1548 retval = count;
1549out:
1550 return retval;
1551}
1552
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001553static ssize_t iommu_debug_attach_write(struct file *file,
1554 const char __user *ubuf,
1555 size_t count, loff_t *offset)
1556{
1557 return __iommu_debug_attach_write(file, ubuf, count, offset,
1558 false);
1559
1560}
1561
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001562static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
1563 size_t count, loff_t *offset)
1564{
1565 struct iommu_debug_device *ddev = file->private_data;
1566 char c[2];
1567
1568 if (*offset)
1569 return 0;
1570
1571 c[0] = ddev->domain ? '1' : '0';
1572 c[1] = '\n';
1573 if (copy_to_user(ubuf, &c, 2)) {
1574 pr_err("copy_to_user failed\n");
1575 return -EFAULT;
1576 }
1577 *offset = 1; /* non-zero means we're done */
1578
1579 return 2;
1580}
1581
1582static const struct file_operations iommu_debug_attach_fops = {
1583 .open = simple_open,
1584 .write = iommu_debug_attach_write,
1585 .read = iommu_debug_attach_read,
1586};
1587
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001588static ssize_t iommu_debug_attach_write_secure(struct file *file,
1589 const char __user *ubuf,
1590 size_t count, loff_t *offset)
1591{
1592 return __iommu_debug_attach_write(file, ubuf, count, offset,
1593 true);
1594
1595}
1596
1597static const struct file_operations iommu_debug_secure_attach_fops = {
1598 .open = simple_open,
1599 .write = iommu_debug_attach_write_secure,
1600 .read = iommu_debug_attach_read,
1601};
1602
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001603static ssize_t iommu_debug_atos_write(struct file *file,
1604 const char __user *ubuf,
1605 size_t count, loff_t *offset)
1606{
1607 struct iommu_debug_device *ddev = file->private_data;
1608 dma_addr_t iova;
1609
Susheel Khiania4417e72016-07-12 11:28:32 +05301610 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001611 pr_err("Invalid format for iova\n");
1612 ddev->iova = 0;
1613 return -EINVAL;
1614 }
1615
1616 ddev->iova = iova;
1617 pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
1618 return count;
1619}
1620
1621static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
1622 size_t count, loff_t *offset)
1623{
1624 struct iommu_debug_device *ddev = file->private_data;
1625 phys_addr_t phys;
1626 char buf[100];
1627 ssize_t retval;
1628 size_t buflen;
1629
1630 if (!ddev->domain) {
1631 pr_err("No domain. Did you already attach?\n");
1632 return -EINVAL;
1633 }
1634
1635 if (*offset)
1636 return 0;
1637
1638 memset(buf, 0, 100);
1639
1640 phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
1641 if (!phys)
1642 strlcpy(buf, "FAIL\n", 100);
1643 else
1644 snprintf(buf, 100, "%pa\n", &phys);
1645
1646 buflen = strlen(buf);
1647 if (copy_to_user(ubuf, buf, buflen)) {
1648 pr_err("Couldn't copy_to_user\n");
1649 retval = -EFAULT;
1650 } else {
1651 *offset = 1; /* non-zero means we're done */
1652 retval = buflen;
1653 }
1654
1655 return retval;
1656}
1657
1658static const struct file_operations iommu_debug_atos_fops = {
1659 .open = simple_open,
1660 .write = iommu_debug_atos_write,
1661 .read = iommu_debug_atos_read,
1662};
1663
1664static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
1665 size_t count, loff_t *offset)
1666{
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301667 ssize_t retval = -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001668 int ret;
1669 char *comma1, *comma2, *comma3;
1670 char buf[100];
1671 dma_addr_t iova;
1672 phys_addr_t phys;
1673 size_t size;
1674 int prot;
1675 struct iommu_debug_device *ddev = file->private_data;
1676
1677 if (count >= 100) {
1678 pr_err("Value too large\n");
1679 return -EINVAL;
1680 }
1681
1682 if (!ddev->domain) {
1683 pr_err("No domain. Did you already attach?\n");
1684 return -EINVAL;
1685 }
1686
1687 memset(buf, 0, 100);
1688
1689 if (copy_from_user(buf, ubuf, count)) {
1690 pr_err("Couldn't copy from user\n");
1691 retval = -EFAULT;
1692 }
1693
1694 comma1 = strnchr(buf, count, ',');
1695 if (!comma1)
1696 goto invalid_format;
1697
1698 comma2 = strnchr(comma1 + 1, count, ',');
1699 if (!comma2)
1700 goto invalid_format;
1701
1702 comma3 = strnchr(comma2 + 1, count, ',');
1703 if (!comma3)
1704 goto invalid_format;
1705
1706 /* split up the words */
1707 *comma1 = *comma2 = *comma3 = '\0';
1708
Susheel Khiania4417e72016-07-12 11:28:32 +05301709 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001710 goto invalid_format;
1711
Susheel Khiania4417e72016-07-12 11:28:32 +05301712 if (kstrtoux(comma1 + 1, 0, &phys))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001713 goto invalid_format;
1714
Susheel Khiania4417e72016-07-12 11:28:32 +05301715 if (kstrtosize_t(comma2 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001716 goto invalid_format;
1717
1718 if (kstrtoint(comma3 + 1, 0, &prot))
1719 goto invalid_format;
1720
1721 ret = iommu_map(ddev->domain, iova, phys, size, prot);
1722 if (ret) {
1723 pr_err("iommu_map failed with %d\n", ret);
1724 retval = -EIO;
1725 goto out;
1726 }
1727
1728 retval = count;
1729 pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
1730 &iova, &phys, size, prot);
1731out:
1732 return retval;
1733
1734invalid_format:
1735 pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
1736 return -EINVAL;
1737}
1738
1739static const struct file_operations iommu_debug_map_fops = {
1740 .open = simple_open,
1741 .write = iommu_debug_map_write,
1742};
1743
1744static ssize_t iommu_debug_unmap_write(struct file *file,
1745 const char __user *ubuf,
1746 size_t count, loff_t *offset)
1747{
1748 ssize_t retval = 0;
1749 char *comma1;
1750 char buf[100];
1751 dma_addr_t iova;
1752 size_t size;
1753 size_t unmapped;
1754 struct iommu_debug_device *ddev = file->private_data;
1755
1756 if (count >= 100) {
1757 pr_err("Value too large\n");
1758 return -EINVAL;
1759 }
1760
1761 if (!ddev->domain) {
1762 pr_err("No domain. Did you already attach?\n");
1763 return -EINVAL;
1764 }
1765
1766 memset(buf, 0, 100);
1767
1768 if (copy_from_user(buf, ubuf, count)) {
1769 pr_err("Couldn't copy from user\n");
1770 retval = -EFAULT;
1771 goto out;
1772 }
1773
1774 comma1 = strnchr(buf, count, ',');
1775 if (!comma1)
1776 goto invalid_format;
1777
1778 /* split up the words */
1779 *comma1 = '\0';
1780
Susheel Khiania4417e72016-07-12 11:28:32 +05301781 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001782 goto invalid_format;
1783
Susheel Khiania4417e72016-07-12 11:28:32 +05301784 if (kstrtosize_t(comma1 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001785 goto invalid_format;
1786
1787 unmapped = iommu_unmap(ddev->domain, iova, size);
1788 if (unmapped != size) {
1789 pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
1790 size, unmapped);
1791 return -EIO;
1792 }
1793
1794 retval = count;
1795 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
1796out:
1797 return retval;
1798
1799invalid_format:
1800 pr_err("Invalid format. Expected: iova,len\n");
Patrick Daly5a5e3ff2016-10-13 19:31:50 -07001801 return -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001802}
1803
1804static const struct file_operations iommu_debug_unmap_fops = {
1805 .open = simple_open,
1806 .write = iommu_debug_unmap_write,
1807};
1808
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08001809static ssize_t iommu_debug_config_clocks_write(struct file *file,
1810 const char __user *ubuf,
1811 size_t count, loff_t *offset)
1812{
1813 char buf;
1814 struct iommu_debug_device *ddev = file->private_data;
1815 struct device *dev = ddev->dev;
1816
1817 /* we're expecting a single character plus (optionally) a newline */
1818 if (count > 2) {
1819 dev_err(dev, "Invalid value\n");
1820 return -EINVAL;
1821 }
1822
1823 if (!ddev->domain) {
1824 dev_err(dev, "No domain. Did you already attach?\n");
1825 return -EINVAL;
1826 }
1827
1828 if (copy_from_user(&buf, ubuf, 1)) {
1829 dev_err(dev, "Couldn't copy from user\n");
1830 return -EFAULT;
1831 }
1832
1833 switch (buf) {
1834 case '0':
1835 dev_err(dev, "Disabling config clocks\n");
1836 iommu_disable_config_clocks(ddev->domain);
1837 break;
1838 case '1':
1839 dev_err(dev, "Enabling config clocks\n");
1840 if (iommu_enable_config_clocks(ddev->domain))
1841 dev_err(dev, "Failed!\n");
1842 break;
1843 default:
1844 dev_err(dev, "Invalid value. Should be 0 or 1.\n");
1845 return -EINVAL;
1846 }
1847
1848 return count;
1849}
1850
1851static const struct file_operations iommu_debug_config_clocks_fops = {
1852 .open = simple_open,
1853 .write = iommu_debug_config_clocks_write,
1854};
1855
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001856/*
1857 * The following will only work for drivers that implement the generic
1858 * device tree bindings described in
1859 * Documentation/devicetree/bindings/iommu/iommu.txt
1860 */
1861static int snarf_iommu_devices(struct device *dev, void *ignored)
1862{
1863 struct iommu_debug_device *ddev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001864 struct dentry *dir;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001865
1866 if (!of_find_property(dev->of_node, "iommus", NULL))
1867 return 0;
1868
Mitchel Humpherys89924fd2015-07-09 14:50:22 -07001869 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001870 if (!ddev)
1871 return -ENODEV;
1872 ddev->dev = dev;
1873 dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
1874 if (!dir) {
1875 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
1876 dev_name(dev));
1877 goto err;
1878 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001879
Patrick Dalye4e39862015-11-20 20:00:50 -08001880 if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
1881 &iommu_debug_nr_iters_ops)) {
1882 pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
1883 dev_name(dev));
1884 goto err_rmdir;
1885 }
1886
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001887 if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
1888 &iommu_debug_profiling_fops)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001889 pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
1890 dev_name(dev));
1891 goto err_rmdir;
1892 }
1893
Mitchel Humpherys020f90f2015-10-02 16:02:31 -07001894 if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
1895 &iommu_debug_secure_profiling_fops)) {
1896 pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
1897 dev_name(dev));
1898 goto err_rmdir;
1899 }
1900
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -07001901 if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
1902 &iommu_debug_profiling_fast_fops)) {
1903 pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
1904 dev_name(dev));
1905 goto err_rmdir;
1906 }
1907
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -07001908 if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
1909 &iommu_debug_profiling_fast_dma_api_fops)) {
1910 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
1911 dev_name(dev));
1912 goto err_rmdir;
1913 }
1914
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001915 if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
1916 &iommu_debug_functional_fast_dma_api_fops)) {
1917 pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
1918 dev_name(dev));
1919 goto err_rmdir;
1920 }
1921
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001922 if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
1923 &iommu_debug_functional_arm_dma_api_fops)) {
1924 pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
1925 dev_name(dev));
1926 goto err_rmdir;
1927 }
1928
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001929 if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
1930 &iommu_debug_attach_fops)) {
1931 pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
1932 dev_name(dev));
1933 goto err_rmdir;
1934 }
1935
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001936 if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
1937 &iommu_debug_secure_attach_fops)) {
1938 pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
1939 dev_name(dev));
1940 goto err_rmdir;
1941 }
1942
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001943 if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
1944 &iommu_debug_atos_fops)) {
1945 pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
1946 dev_name(dev));
1947 goto err_rmdir;
1948 }
1949
1950 if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
1951 &iommu_debug_map_fops)) {
1952 pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
1953 dev_name(dev));
1954 goto err_rmdir;
1955 }
1956
1957 if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
1958 &iommu_debug_unmap_fops)) {
1959 pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
1960 dev_name(dev));
1961 goto err_rmdir;
1962 }
1963
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08001964 if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
1965 &iommu_debug_config_clocks_fops)) {
1966 pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
1967 dev_name(dev));
1968 goto err_rmdir;
1969 }
1970
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001971 list_add(&ddev->list, &iommu_debug_devices);
1972 return 0;
1973
1974err_rmdir:
1975 debugfs_remove_recursive(dir);
1976err:
1977 kfree(ddev);
1978 return 0;
1979}
1980
1981static int iommu_debug_init_tests(void)
1982{
1983 debugfs_tests_dir = debugfs_create_dir("tests",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001984 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001985 if (!debugfs_tests_dir) {
1986 pr_err("Couldn't create iommu/tests debugfs directory\n");
1987 return -ENODEV;
1988 }
1989
1990 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
1991 snarf_iommu_devices);
1992}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001993
1994static void iommu_debug_destroy_tests(void)
1995{
1996 debugfs_remove_recursive(debugfs_tests_dir);
1997}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001998#else
1999static inline int iommu_debug_init_tests(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002000static inline void iommu_debug_destroy_tests(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002001#endif
2002
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002003/*
2004 * This isn't really a "driver", we just need something in the device tree
2005 * so that our tests can run without any client drivers, and our tests rely
2006 * on parsing the device tree for nodes with the `iommus' property.
2007 */
2008static int iommu_debug_pass(struct platform_device *pdev)
2009{
2010 return 0;
2011}
2012
2013static const struct of_device_id iommu_debug_of_match[] = {
2014 { .compatible = "iommu-debug-test" },
2015 { },
2016};
2017
2018static struct platform_driver iommu_debug_driver = {
2019 .probe = iommu_debug_pass,
2020 .remove = iommu_debug_pass,
2021 .driver = {
2022 .name = "iommu-debug",
2023 .of_match_table = iommu_debug_of_match,
2024 },
2025};
2026
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002027static int iommu_debug_init(void)
2028{
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002029 if (iommu_debug_init_tracking())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002030 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002031
2032 if (iommu_debug_init_tests())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002033 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002034
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002035 return platform_driver_register(&iommu_debug_driver);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002036}
2037
2038static void iommu_debug_exit(void)
2039{
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002040 platform_driver_unregister(&iommu_debug_driver);
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002041 iommu_debug_destroy_tracking();
2042 iommu_debug_destroy_tests();
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002043}
2044
2045module_init(iommu_debug_init);
2046module_exit(iommu_debug_exit);