blob: 5730126dbc753b5421e65643f78024f0c194f36d [file] [log] [blame]
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001/*
Charan Teja Reddy29f61402017-02-09 20:44:29 +05302 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
16
17#include <linux/debugfs.h>
18#include <linux/device.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/module.h>
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -070024#include <linux/uaccess.h>
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070025#include <linux/dma-contiguous.h>
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070026#include <soc/qcom/secure_buffer.h>
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -070027#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma-iommu.h>
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070030
Susheel Khiania4417e72016-07-12 11:28:32 +053031#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
32
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070033static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
34{
35 switch (attr) {
36 case DOMAIN_ATTR_GEOMETRY:
37 return "DOMAIN_ATTR_GEOMETRY";
38 case DOMAIN_ATTR_PAGING:
39 return "DOMAIN_ATTR_PAGING";
40 case DOMAIN_ATTR_WINDOWS:
41 return "DOMAIN_ATTR_WINDOWS";
42 case DOMAIN_ATTR_FSL_PAMU_STASH:
43 return "DOMAIN_ATTR_FSL_PAMU_STASH";
44 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
45 return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
46 case DOMAIN_ATTR_FSL_PAMUV1:
47 return "DOMAIN_ATTR_FSL_PAMUV1";
48 case DOMAIN_ATTR_NESTING:
49 return "DOMAIN_ATTR_NESTING";
50 case DOMAIN_ATTR_PT_BASE_ADDR:
51 return "DOMAIN_ATTR_PT_BASE_ADDR";
52 case DOMAIN_ATTR_SECURE_VMID:
53 return "DOMAIN_ATTR_SECURE_VMID";
54 case DOMAIN_ATTR_ATOMIC:
55 return "DOMAIN_ATTR_ATOMIC";
56 case DOMAIN_ATTR_CONTEXT_BANK:
57 return "DOMAIN_ATTR_CONTEXT_BANK";
58 case DOMAIN_ATTR_TTBR0:
59 return "DOMAIN_ATTR_TTBR0";
60 case DOMAIN_ATTR_CONTEXTIDR:
61 return "DOMAIN_ATTR_CONTEXTIDR";
62 case DOMAIN_ATTR_PROCID:
63 return "DOMAIN_ATTR_PROCID";
64 case DOMAIN_ATTR_DYNAMIC:
65 return "DOMAIN_ATTR_DYNAMIC";
66 case DOMAIN_ATTR_NON_FATAL_FAULTS:
67 return "DOMAIN_ATTR_NON_FATAL_FAULTS";
68 case DOMAIN_ATTR_S1_BYPASS:
69 return "DOMAIN_ATTR_S1_BYPASS";
70 case DOMAIN_ATTR_FAST:
71 return "DOMAIN_ATTR_FAST";
Patrick Dalyef6c1dc2016-11-16 14:35:23 -080072 case DOMAIN_ATTR_EARLY_MAP:
73 return "DOMAIN_ATTR_EARLY_MAP";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070074 default:
75 return "Unknown attr!";
76 }
77}
Susheel Khiania4417e72016-07-12 11:28:32 +053078#endif
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -070079
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070080#ifdef CONFIG_IOMMU_DEBUG_TRACKING
81
82static DEFINE_MUTEX(iommu_debug_attachments_lock);
83static LIST_HEAD(iommu_debug_attachments);
84static struct dentry *debugfs_attachments_dir;
85
86struct iommu_debug_attachment {
87 struct iommu_domain *domain;
88 struct device *dev;
89 struct dentry *dentry;
90 struct list_head list;
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -070091 unsigned long reg_offset;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070092};
93
Mitchel Humpherys088cc582015-07-09 15:02:03 -070094static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070095{
96 struct iommu_debug_attachment *attach = s->private;
Mitchel Humpherys5e991f12015-07-30 19:25:54 -070097 int secure_vmid;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -070098
99 seq_printf(s, "Domain: 0x%p\n", attach->domain);
Mitchel Humpherys5e991f12015-07-30 19:25:54 -0700100
101 seq_puts(s, "SECURE_VMID: ");
102 if (iommu_domain_get_attr(attach->domain,
103 DOMAIN_ATTR_SECURE_VMID,
104 &secure_vmid))
105 seq_puts(s, "(Unknown)\n");
106 else
107 seq_printf(s, "%s (0x%x)\n",
108 msm_secure_vmid_to_string(secure_vmid), secure_vmid);
109
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700110 return 0;
111}
112
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700113static int iommu_debug_attachment_info_open(struct inode *inode,
114 struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700115{
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700116 return single_open(file, iommu_debug_attachment_info_show,
117 inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700118}
119
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700120static const struct file_operations iommu_debug_attachment_info_fops = {
121 .open = iommu_debug_attachment_info_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700122 .read = seq_read,
123 .llseek = seq_lseek,
124 .release = single_release,
125};
126
Mitchel Humpherys288086e2015-07-09 16:55:08 -0700127static ssize_t iommu_debug_attachment_trigger_fault_write(
128 struct file *file, const char __user *ubuf, size_t count,
129 loff_t *offset)
130{
131 struct iommu_debug_attachment *attach = file->private_data;
132 unsigned long flags;
133
134 if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
135 pr_err("Invalid flags format\n");
136 return -EFAULT;
137 }
138
139 iommu_trigger_fault(attach->domain, flags);
140
141 return count;
142}
143
144static const struct file_operations
145iommu_debug_attachment_trigger_fault_fops = {
146 .open = simple_open,
147 .write = iommu_debug_attachment_trigger_fault_write,
148};
149
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -0700150static ssize_t iommu_debug_attachment_reg_offset_write(
151 struct file *file, const char __user *ubuf, size_t count,
152 loff_t *offset)
153{
154 struct iommu_debug_attachment *attach = file->private_data;
155 unsigned long reg_offset;
156
157 if (kstrtoul_from_user(ubuf, count, 0, &reg_offset)) {
158 pr_err("Invalid reg_offset format\n");
159 return -EFAULT;
160 }
161
162 attach->reg_offset = reg_offset;
163
164 return count;
165}
166
167static const struct file_operations iommu_debug_attachment_reg_offset_fops = {
168 .open = simple_open,
169 .write = iommu_debug_attachment_reg_offset_write,
170};
171
172static ssize_t iommu_debug_attachment_reg_read_read(
173 struct file *file, char __user *ubuf, size_t count, loff_t *offset)
174{
175 struct iommu_debug_attachment *attach = file->private_data;
176 unsigned long val;
177 char *val_str;
178 ssize_t val_str_len;
179
180 if (*offset)
181 return 0;
182
183 val = iommu_reg_read(attach->domain, attach->reg_offset);
184 val_str = kasprintf(GFP_KERNEL, "0x%lx\n", val);
185 if (!val_str)
186 return -ENOMEM;
187 val_str_len = strlen(val_str);
188
189 if (copy_to_user(ubuf, val_str, val_str_len)) {
190 pr_err("copy_to_user failed\n");
191 val_str_len = -EFAULT;
192 goto out;
193 }
194 *offset = 1; /* non-zero means we're done */
195
196out:
197 kfree(val_str);
198 return val_str_len;
199}
200
201static const struct file_operations iommu_debug_attachment_reg_read_fops = {
202 .open = simple_open,
203 .read = iommu_debug_attachment_reg_read_read,
204};
205
206static ssize_t iommu_debug_attachment_reg_write_write(
207 struct file *file, const char __user *ubuf, size_t count,
208 loff_t *offset)
209{
210 struct iommu_debug_attachment *attach = file->private_data;
211 unsigned long val;
212
213 if (kstrtoul_from_user(ubuf, count, 0, &val)) {
214 pr_err("Invalid val format\n");
215 return -EFAULT;
216 }
217
218 iommu_reg_write(attach->domain, attach->reg_offset, val);
219
220 return count;
221}
222
223static const struct file_operations iommu_debug_attachment_reg_write_fops = {
224 .open = simple_open,
225 .write = iommu_debug_attachment_reg_write_write,
226};
227
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700228/* should be called with iommu_debug_attachments_lock locked */
229static int iommu_debug_attach_add_debugfs(
230 struct iommu_debug_attachment *attach)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700231{
Mitchel Humpherys54379212015-08-26 11:52:57 -0700232 const char *attach_name;
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700233 struct device *dev = attach->dev;
234 struct iommu_domain *domain = attach->domain;
Mitchel Humpherys54379212015-08-26 11:52:57 -0700235 int is_dynamic;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700236
Mitchel Humpherys54379212015-08-26 11:52:57 -0700237 if (iommu_domain_get_attr(domain, DOMAIN_ATTR_DYNAMIC, &is_dynamic))
238 is_dynamic = 0;
239
240 if (is_dynamic) {
241 uuid_le uuid;
242
243 uuid_le_gen(&uuid);
244 attach_name = kasprintf(GFP_KERNEL, "%s-%pUl", dev_name(dev),
245 uuid.b);
246 if (!attach_name)
247 return -ENOMEM;
248 } else {
249 attach_name = dev_name(dev);
250 }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700251
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700252 attach->dentry = debugfs_create_dir(attach_name,
253 debugfs_attachments_dir);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700254 if (!attach->dentry) {
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700255 pr_err("Couldn't create iommu/attachments/%s debugfs directory for domain 0x%p\n",
Mitchel Humpherys876e2be2015-07-10 11:56:56 -0700256 attach_name, domain);
Mitchel Humpherys54379212015-08-26 11:52:57 -0700257 if (is_dynamic)
258 kfree(attach_name);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700259 return -EIO;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700260 }
Mitchel Humpherys54379212015-08-26 11:52:57 -0700261
262 if (is_dynamic)
263 kfree(attach_name);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700264
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700265 if (!debugfs_create_file(
266 "info", S_IRUSR, attach->dentry, attach,
267 &iommu_debug_attachment_info_fops)) {
268 pr_err("Couldn't create iommu/attachments/%s/info debugfs file for domain 0x%p\n",
269 dev_name(dev), domain);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700270 goto err_rmdir;
Mitchel Humpherys088cc582015-07-09 15:02:03 -0700271 }
272
Mitchel Humpherys288086e2015-07-09 16:55:08 -0700273 if (!debugfs_create_file(
274 "trigger_fault", S_IRUSR, attach->dentry, attach,
275 &iommu_debug_attachment_trigger_fault_fops)) {
276 pr_err("Couldn't create iommu/attachments/%s/trigger_fault debugfs file for domain 0x%p\n",
277 dev_name(dev), domain);
278 goto err_rmdir;
Mitchel Humpherys0dc04de2015-08-21 14:08:40 -0700279 }
280
281 if (!debugfs_create_file(
282 "reg_offset", S_IRUSR, attach->dentry, attach,
283 &iommu_debug_attachment_reg_offset_fops)) {
284 pr_err("Couldn't create iommu/attachments/%s/reg_offset debugfs file for domain 0x%p\n",
285 dev_name(dev), domain);
286 goto err_rmdir;
287 }
288
289 if (!debugfs_create_file(
290 "reg_read", S_IRUSR, attach->dentry, attach,
291 &iommu_debug_attachment_reg_read_fops)) {
292 pr_err("Couldn't create iommu/attachments/%s/reg_read debugfs file for domain 0x%p\n",
293 dev_name(dev), domain);
294 goto err_rmdir;
295 }
296
297 if (!debugfs_create_file(
298 "reg_write", S_IRUSR, attach->dentry, attach,
299 &iommu_debug_attachment_reg_write_fops)) {
300 pr_err("Couldn't create iommu/attachments/%s/reg_write debugfs file for domain 0x%p\n",
301 dev_name(dev), domain);
302 goto err_rmdir;
303 }
304
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700305 return 0;
306
307err_rmdir:
308 debugfs_remove_recursive(attach->dentry);
309 return -EIO;
310}
311
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530312void iommu_debug_domain_add(struct iommu_domain *domain)
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700313{
314 struct iommu_debug_attachment *attach;
315
316 mutex_lock(&iommu_debug_attachments_lock);
317
318 attach = kmalloc(sizeof(*attach), GFP_KERNEL);
319 if (!attach)
320 goto out_unlock;
321
322 attach->domain = domain;
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530323 attach->dev = NULL;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700324 list_add(&attach->list, &iommu_debug_attachments);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530325
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700326out_unlock:
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700327 mutex_unlock(&iommu_debug_attachments_lock);
328}
329
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530330void iommu_debug_domain_remove(struct iommu_domain *domain)
331{
332 struct iommu_debug_attachment *it;
333
334 mutex_lock(&iommu_debug_attachments_lock);
335 list_for_each_entry(it, &iommu_debug_attachments, list)
336 if (it->domain == domain && it->dev == NULL)
337 break;
338
339 if (&it->list == &iommu_debug_attachments) {
340 WARN(1, "Couldn't find debug attachment for domain=0x%p",
341 domain);
342 } else {
343 list_del(&it->list);
344 kfree(it);
345 }
346 mutex_unlock(&iommu_debug_attachments_lock);
347}
348
349void iommu_debug_attach_device(struct iommu_domain *domain,
350 struct device *dev)
351{
352 struct iommu_debug_attachment *attach;
353
354 mutex_lock(&iommu_debug_attachments_lock);
355
356 list_for_each_entry(attach, &iommu_debug_attachments, list)
357 if (attach->domain == domain && attach->dev == NULL)
358 break;
359
360 if (&attach->list == &iommu_debug_attachments) {
361 WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
362 domain, dev_name(dev));
363 } else {
364 attach->dev = dev;
365
366 /*
367 * we might not init until after other drivers start calling
368 * iommu_attach_device. Only set up the debugfs nodes if we've
369 * already init'd to avoid polluting the top-level debugfs
370 * directory (by calling debugfs_create_dir with a NULL
371 * parent). These will be flushed out later once we init.
372 */
373
374 if (debugfs_attachments_dir)
375 iommu_debug_attach_add_debugfs(attach);
376 }
377
378 mutex_unlock(&iommu_debug_attachments_lock);
379}
380
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700381void iommu_debug_detach_device(struct iommu_domain *domain,
382 struct device *dev)
383{
384 struct iommu_debug_attachment *it;
385
386 mutex_lock(&iommu_debug_attachments_lock);
387 list_for_each_entry(it, &iommu_debug_attachments, list)
388 if (it->domain == domain && it->dev == dev)
389 break;
390
391 if (&it->list == &iommu_debug_attachments) {
392 WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
393 domain, dev_name(dev));
394 } else {
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530395 /*
396 * Just remove debugfs entry and mark dev as NULL on
397 * iommu_detach call. We would remove the actual
398 * attachment entry from the list only on domain_free call.
399 * This is to ensure we keep track of unattached domains too.
400 */
401
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700402 debugfs_remove_recursive(it->dentry);
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530403 it->dev = NULL;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700404 }
405 mutex_unlock(&iommu_debug_attachments_lock);
406}
407
408static int iommu_debug_init_tracking(void)
409{
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700410 int ret = 0;
411 struct iommu_debug_attachment *attach;
412
413 mutex_lock(&iommu_debug_attachments_lock);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700414 debugfs_attachments_dir = debugfs_create_dir("attachments",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700415 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700416 if (!debugfs_attachments_dir) {
417 pr_err("Couldn't create iommu/attachments debugfs directory\n");
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700418 ret = -ENODEV;
419 goto out_unlock;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700420 }
421
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700422 /* set up debugfs entries for attachments made during early boot */
423 list_for_each_entry(attach, &iommu_debug_attachments, list)
Susheel Khianie66aa5b2015-08-25 17:25:42 +0530424 if (attach->dev)
425 iommu_debug_attach_add_debugfs(attach);
Mitchel Humpherysa05b2522015-07-14 14:30:33 -0700426
427out_unlock:
428 mutex_unlock(&iommu_debug_attachments_lock);
429 return ret;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700430}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700431
432static void iommu_debug_destroy_tracking(void)
433{
434 debugfs_remove_recursive(debugfs_attachments_dir);
435}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700436#else
437static inline int iommu_debug_init_tracking(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -0700438static inline void iommu_debug_destroy_tracking(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700439#endif
440
441#ifdef CONFIG_IOMMU_TESTS
442
Susheel Khiania4417e72016-07-12 11:28:32 +0530443#ifdef CONFIG_64BIT
444
445#define kstrtoux kstrtou64
Patrick Daly9ef01862016-10-13 20:03:50 -0700446#define kstrtox_from_user kstrtoull_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530447#define kstrtosize_t kstrtoul
448
449#else
450
451#define kstrtoux kstrtou32
Patrick Daly9ef01862016-10-13 20:03:50 -0700452#define kstrtox_from_user kstrtouint_from_user
Susheel Khiania4417e72016-07-12 11:28:32 +0530453#define kstrtosize_t kstrtouint
454
455#endif
456
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700457static LIST_HEAD(iommu_debug_devices);
458static struct dentry *debugfs_tests_dir;
Patrick Dalye4e39862015-11-20 20:00:50 -0800459static u32 iters_per_op = 1;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700460
461struct iommu_debug_device {
462 struct device *dev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -0700463 struct iommu_domain *domain;
464 u64 iova;
465 u64 phys;
466 size_t len;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700467 struct list_head list;
468};
469
470static int iommu_debug_build_phoney_sg_table(struct device *dev,
471 struct sg_table *table,
472 unsigned long total_size,
473 unsigned long chunk_size)
474{
475 unsigned long nents = total_size / chunk_size;
476 struct scatterlist *sg;
477 int i;
478 struct page *page;
479
480 if (!IS_ALIGNED(total_size, PAGE_SIZE))
481 return -EINVAL;
482 if (!IS_ALIGNED(total_size, chunk_size))
483 return -EINVAL;
484 if (sg_alloc_table(table, nents, GFP_KERNEL))
485 return -EINVAL;
486 page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
487 if (!page)
488 goto free_table;
489
490 /* all the same page... why not. */
491 for_each_sg(table->sgl, sg, table->nents, i)
492 sg_set_page(sg, page, chunk_size, 0);
493
494 return 0;
495
496free_table:
497 sg_free_table(table);
498 return -ENOMEM;
499}
500
501static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
502 struct sg_table *table,
503 unsigned long chunk_size)
504{
505 __free_pages(sg_page(table->sgl), get_order(chunk_size));
506 sg_free_table(table);
507}
508
509static const char * const _size_to_string(unsigned long size)
510{
511 switch (size) {
512 case SZ_4K:
513 return "4K";
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700514 case SZ_8K:
515 return "8K";
516 case SZ_16K:
517 return "16K";
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700518 case SZ_64K:
519 return "64K";
520 case SZ_2M:
521 return "2M";
522 case SZ_1M * 12:
523 return "12M";
524 case SZ_1M * 20:
525 return "20M";
526 }
527 return "unknown size, please add to _size_to_string";
528}
529
Patrick Dalye4e39862015-11-20 20:00:50 -0800530static int nr_iters_set(void *data, u64 val)
531{
532 if (!val)
533 val = 1;
534 if (val > 10000)
535 val = 10000;
536 *(u32 *)data = val;
537 return 0;
538}
539
540static int nr_iters_get(void *data, u64 *val)
541{
542 *val = *(u32 *)data;
543 return 0;
544}
545
546DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
547 nr_iters_get, nr_iters_set, "%llu\n");
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700548
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700549static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700550 enum iommu_attr attrs[],
551 void *attr_values[], int nattrs,
Susheel Khiania4417e72016-07-12 11:28:32 +0530552 const size_t sizes[])
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700553{
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700554 int i;
Susheel Khiania4417e72016-07-12 11:28:32 +0530555 const size_t *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700556 struct iommu_domain *domain;
557 unsigned long iova = 0x10000;
558 phys_addr_t paddr = 0xa000;
559
560 domain = iommu_domain_alloc(&platform_bus_type);
561 if (!domain) {
562 seq_puts(s, "Couldn't allocate domain\n");
563 return;
564 }
565
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700566 seq_puts(s, "Domain attributes: [ ");
567 for (i = 0; i < nattrs; ++i) {
568 /* not all attrs are ints, but this will get us by for now */
569 seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
570 *((int *)attr_values[i]),
571 i < nattrs ? " " : "");
Mitchel Humpherys679567c2015-08-28 10:51:24 -0700572 }
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700573 seq_puts(s, "]\n");
574 for (i = 0; i < nattrs; ++i) {
575 if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
576 seq_printf(s, "Couldn't set %d to the value at %p\n",
577 attrs[i], attr_values[i]);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700578 goto out_domain_free;
579 }
580 }
581
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700582 if (iommu_attach_device(domain, dev)) {
583 seq_puts(s,
584 "Couldn't attach new domain to device. Is it already attached?\n");
585 goto out_domain_free;
586 }
587
Patrick Dalye4e39862015-11-20 20:00:50 -0800588 seq_printf(s, "(average over %d iterations)\n", iters_per_op);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800589 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700590 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530591 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700592 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800593 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700594 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800595 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700596 struct timespec tbefore, tafter, diff;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700597 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700598
Patrick Dalye4e39862015-11-20 20:00:50 -0800599 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700600 getnstimeofday(&tbefore);
601 if (iommu_map(domain, iova, paddr, size,
602 IOMMU_READ | IOMMU_WRITE)) {
603 seq_puts(s, "Failed to map\n");
604 continue;
605 }
606 getnstimeofday(&tafter);
607 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800608 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700609
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700610 getnstimeofday(&tbefore);
611 unmapped = iommu_unmap(domain, iova, size);
612 if (unmapped != size) {
613 seq_printf(s,
614 "Only unmapped %zx instead of %zx\n",
615 unmapped, size);
616 continue;
617 }
618 getnstimeofday(&tafter);
619 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800620 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700621 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700622
Susheel Khiania4417e72016-07-12 11:28:32 +0530623 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
624 &map_elapsed_rem);
625 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
626 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700627
Patrick Daly3ca31e32015-11-20 20:33:04 -0800628 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
629 &map_elapsed_rem);
630 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
631 &unmap_elapsed_rem);
632
633 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
634 _size_to_string(size),
635 map_elapsed_us, map_elapsed_rem,
636 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700637 }
638
639 seq_putc(s, '\n');
Patrick Daly3ca31e32015-11-20 20:33:04 -0800640 seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700641 for (sz = sizes; *sz; ++sz) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530642 size_t size = *sz;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700643 size_t unmapped;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800644 u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700645 u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
Patrick Daly3ca31e32015-11-20 20:33:04 -0800646 u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700647 struct timespec tbefore, tafter, diff;
648 struct sg_table table;
649 unsigned long chunk_size = SZ_4K;
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700650 int i;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700651
652 if (iommu_debug_build_phoney_sg_table(dev, &table, size,
653 chunk_size)) {
654 seq_puts(s,
655 "couldn't build phoney sg table! bailing...\n");
656 goto out_detach;
657 }
658
Patrick Dalye4e39862015-11-20 20:00:50 -0800659 for (i = 0; i < iters_per_op; ++i) {
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700660 getnstimeofday(&tbefore);
661 if (iommu_map_sg(domain, iova, table.sgl, table.nents,
662 IOMMU_READ | IOMMU_WRITE) != size) {
663 seq_puts(s, "Failed to map_sg\n");
664 goto next;
665 }
666 getnstimeofday(&tafter);
667 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800668 map_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700669
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700670 getnstimeofday(&tbefore);
671 unmapped = iommu_unmap(domain, iova, size);
672 if (unmapped != size) {
673 seq_printf(s,
674 "Only unmapped %zx instead of %zx\n",
675 unmapped, size);
676 goto next;
677 }
678 getnstimeofday(&tafter);
679 diff = timespec_sub(tafter, tbefore);
Patrick Daly3ca31e32015-11-20 20:33:04 -0800680 unmap_elapsed_ns += timespec_to_ns(&diff);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700681 }
Mitchel Humpherys1c47bb52015-10-02 16:17:57 -0700682
Susheel Khiania4417e72016-07-12 11:28:32 +0530683 map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
684 &map_elapsed_rem);
685 unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
686 &unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700687
Patrick Daly3ca31e32015-11-20 20:33:04 -0800688 map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
689 &map_elapsed_rem);
690 unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
691 &unmap_elapsed_rem);
692
693 seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
694 _size_to_string(size),
695 map_elapsed_us, map_elapsed_rem,
696 unmap_elapsed_us, unmap_elapsed_rem);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700697
698next:
699 iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
700 }
701
702out_detach:
703 iommu_detach_device(domain, dev);
704out_domain_free:
705 iommu_domain_free(domain);
706}
707
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700708static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700709{
710 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530711 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700712 SZ_1M * 20, 0 };
713 enum iommu_attr attrs[] = {
714 DOMAIN_ATTR_ATOMIC,
715 };
716 int htw_disable = 1, atomic = 1;
717 void *attr_values[] = { &htw_disable, &atomic };
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700718
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700719 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
720 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700721
722 return 0;
723}
724
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700725static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700726{
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700727 return single_open(file, iommu_debug_profiling_show, inode->i_private);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700728}
729
Mitchel Humpherys7cc56e42015-07-06 14:58:23 -0700730static const struct file_operations iommu_debug_profiling_fops = {
731 .open = iommu_debug_profiling_open,
Mitchel Humpherys42296fb2015-06-23 16:29:16 -0700732 .read = seq_read,
733 .llseek = seq_lseek,
734 .release = single_release,
735};
736
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700737static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
738{
739 struct iommu_debug_device *ddev = s->private;
Susheel Khiania4417e72016-07-12 11:28:32 +0530740 const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700741 SZ_1M * 20, 0 };
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700742
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700743 enum iommu_attr attrs[] = {
744 DOMAIN_ATTR_ATOMIC,
745 DOMAIN_ATTR_SECURE_VMID,
746 };
747 int one = 1, secure_vmid = VMID_CP_PIXEL;
748 void *attr_values[] = { &one, &secure_vmid };
749
750 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
751 ARRAY_SIZE(attrs), sizes);
Mitchel Humpherys020f90f2015-10-02 16:02:31 -0700752
753 return 0;
754}
755
756static int iommu_debug_secure_profiling_open(struct inode *inode,
757 struct file *file)
758{
759 return single_open(file, iommu_debug_secure_profiling_show,
760 inode->i_private);
761}
762
763static const struct file_operations iommu_debug_secure_profiling_fops = {
764 .open = iommu_debug_secure_profiling_open,
765 .read = seq_read,
766 .llseek = seq_lseek,
767 .release = single_release,
768};
769
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -0700770static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
771{
772 struct iommu_debug_device *ddev = s->private;
773 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
774 enum iommu_attr attrs[] = {
775 DOMAIN_ATTR_FAST,
776 DOMAIN_ATTR_ATOMIC,
777 };
778 int one = 1;
779 void *attr_values[] = { &one, &one };
780
781 iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
782 ARRAY_SIZE(attrs), sizes);
783
784 return 0;
785}
786
787static int iommu_debug_profiling_fast_open(struct inode *inode,
788 struct file *file)
789{
790 return single_open(file, iommu_debug_profiling_fast_show,
791 inode->i_private);
792}
793
794static const struct file_operations iommu_debug_profiling_fast_fops = {
795 .open = iommu_debug_profiling_fast_open,
796 .read = seq_read,
797 .llseek = seq_lseek,
798 .release = single_release,
799};
800
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700801static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
802 void *ignored)
803{
804 int i, experiment;
805 struct iommu_debug_device *ddev = s->private;
806 struct device *dev = ddev->dev;
807 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
808 struct dma_iommu_mapping *mapping;
809 dma_addr_t dma_addr;
810 void *virt;
811 int fast = 1;
812 const char * const extra_labels[] = {
813 "not coherent",
814 "coherent",
815 };
816 unsigned long extra_attrs[] = {
817 0,
818 DMA_ATTR_SKIP_CPU_SYNC,
819 };
820
821 virt = kmalloc(1518, GFP_KERNEL);
822 if (!virt)
823 goto out;
824
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530825 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700826 if (!mapping) {
827 seq_puts(s, "fast_smmu_create_mapping failed\n");
828 goto out_kfree;
829 }
830
831 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
832 seq_puts(s, "iommu_domain_set_attr failed\n");
833 goto out_release_mapping;
834 }
835
836 if (arm_iommu_attach_device(dev, mapping)) {
837 seq_puts(s, "fast_smmu_attach_device failed\n");
838 goto out_release_mapping;
839 }
840
841 if (iommu_enable_config_clocks(mapping->domain)) {
842 seq_puts(s, "Couldn't enable clocks\n");
843 goto out_detach;
844 }
845 for (experiment = 0; experiment < 2; ++experiment) {
Susheel Khiania4417e72016-07-12 11:28:32 +0530846 size_t map_avg = 0, unmap_avg = 0;
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700847
848 for (i = 0; i < 10; ++i) {
849 struct timespec tbefore, tafter, diff;
850 u64 ns;
851
852 getnstimeofday(&tbefore);
853 dma_addr = dma_map_single_attrs(
854 dev, virt, SZ_4K, DMA_TO_DEVICE,
855 extra_attrs[experiment]);
856 getnstimeofday(&tafter);
857 diff = timespec_sub(tafter, tbefore);
858 ns = timespec_to_ns(&diff);
859 if (dma_mapping_error(dev, dma_addr)) {
860 seq_puts(s, "dma_map_single failed\n");
861 goto out_disable_config_clocks;
862 }
863 map_elapsed_ns[i] = ns;
864
865 getnstimeofday(&tbefore);
866 dma_unmap_single_attrs(
867 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
868 extra_attrs[experiment]);
869 getnstimeofday(&tafter);
870 diff = timespec_sub(tafter, tbefore);
871 ns = timespec_to_ns(&diff);
872 unmap_elapsed_ns[i] = ns;
873 }
874
875 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
876 "dma_map_single_attrs");
877 for (i = 0; i < 10; ++i) {
878 map_avg += map_elapsed_ns[i];
879 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
880 i < 9 ? ", " : "");
881 }
882 map_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530883 seq_printf(s, "] (avg: %zu)\n", map_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700884
885 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
886 "dma_unmap_single_attrs");
887 for (i = 0; i < 10; ++i) {
888 unmap_avg += unmap_elapsed_ns[i];
889 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
890 i < 9 ? ", " : "");
891 }
892 unmap_avg /= 10;
Susheel Khiania4417e72016-07-12 11:28:32 +0530893 seq_printf(s, "] (avg: %zu)\n", unmap_avg);
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -0700894 }
895
896out_disable_config_clocks:
897 iommu_disable_config_clocks(mapping->domain);
898out_detach:
899 arm_iommu_detach_device(dev);
900out_release_mapping:
901 arm_iommu_release_mapping(mapping);
902out_kfree:
903 kfree(virt);
904out:
905 return 0;
906}
907
908static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
909 struct file *file)
910{
911 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
912 inode->i_private);
913}
914
915static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
916 .open = iommu_debug_profiling_fast_dma_api_open,
917 .read = seq_read,
918 .llseek = seq_lseek,
919 .release = single_release,
920};
921
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800922static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
923{
924 int i, ret = 0;
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530925 u64 iova;
926 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -0800927 void *virt;
928 phys_addr_t phys;
929 dma_addr_t dma_addr;
930
931 /*
932 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
933 * chunk that we can work with.
934 */
935 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
936 phys = virt_to_phys(virt);
937
938 /* fill the whole 4GB space */
939 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
940 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
941 if (dma_addr == DMA_ERROR_CODE) {
942 dev_err(dev, "Failed map on iter %d\n", i);
943 ret = -EINVAL;
944 goto out;
945 }
946 }
947
948 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
949 dev_err(dev,
950 "dma_map_single unexpectedly (VA should have been exhausted)\n");
951 ret = -EINVAL;
952 goto out;
953 }
954
955 /*
956 * free up 4K at the very beginning, then leave one 4K mapping,
957 * then free up 8K. This will result in the next 8K map to skip
958 * over the 4K hole and take the 8K one.
959 */
960 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
961 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
962 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
963
964 /* remap 8K */
965 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
966 if (dma_addr != SZ_8K) {
967 dma_addr_t expected = SZ_8K;
968
969 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
970 &dma_addr, &expected);
971 ret = -EINVAL;
972 goto out;
973 }
974
975 /*
976 * now remap 4K. We should get the first 4K chunk that was skipped
977 * over during the previous 8K map. If we missed a TLB invalidate
978 * at that point this should explode.
979 */
980 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
981 if (dma_addr != 0) {
982 dma_addr_t expected = 0;
983
984 dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
985 &dma_addr, &expected);
986 ret = -EINVAL;
987 goto out;
988 }
989
990 if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
991 dev_err(dev,
992 "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
993 ret = -EINVAL;
994 goto out;
995 }
996
997 /* we're all full again. unmap everything. */
Charan Teja Reddy29f61402017-02-09 20:44:29 +0530998 for (iova = 0; iova < max; iova += SZ_8K)
999 dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001000
1001out:
1002 free_pages((unsigned long)virt, get_order(SZ_8K));
1003 return ret;
1004}
1005
1006struct fib_state {
1007 unsigned long cur;
1008 unsigned long prev;
1009};
1010
1011static void fib_init(struct fib_state *f)
1012{
1013 f->cur = f->prev = 1;
1014}
1015
1016static unsigned long get_next_fib(struct fib_state *f)
1017{
1018 int next = f->cur + f->prev;
1019
1020 f->prev = f->cur;
1021 f->cur = next;
1022 return next;
1023}
1024
1025/*
1026 * Not actually random. Just testing the fibs (and max - the fibs).
1027 */
1028static int __rand_va_sweep(struct device *dev, struct seq_file *s,
1029 const size_t size)
1030{
1031 u64 iova;
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301032 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001033 int i, remapped, unmapped, ret = 0;
1034 void *virt;
1035 dma_addr_t dma_addr, dma_addr2;
1036 struct fib_state fib;
1037
1038 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
1039 if (!virt) {
1040 if (size > SZ_8K) {
1041 dev_err(dev,
1042 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
1043 _size_to_string(size));
1044 return 0;
1045 }
1046 return -ENOMEM;
1047 }
1048
1049 /* fill the whole 4GB space */
1050 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
1051 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1052 if (dma_addr == DMA_ERROR_CODE) {
1053 dev_err(dev, "Failed map on iter %d\n", i);
1054 ret = -EINVAL;
1055 goto out;
1056 }
1057 }
1058
1059 /* now unmap "random" iovas */
1060 unmapped = 0;
1061 fib_init(&fib);
1062 for (iova = get_next_fib(&fib) * size;
1063 iova < max - size;
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301064 iova = (u64)get_next_fib(&fib) * size) {
1065 dma_addr = (dma_addr_t)(iova);
1066 dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001067 if (dma_addr == dma_addr2) {
1068 WARN(1,
1069 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
1070 __func__);
1071 return -EINVAL;
1072 }
1073 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
1074 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
1075 unmapped += 2;
1076 }
1077
1078 /* and map until everything fills back up */
1079 for (remapped = 0; ; ++remapped) {
1080 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1081 if (dma_addr == DMA_ERROR_CODE)
1082 break;
1083 }
1084
1085 if (unmapped != remapped) {
1086 dev_err(dev,
1087 "Unexpected random remap count! Unmapped %d but remapped %d\n",
1088 unmapped, remapped);
1089 ret = -EINVAL;
1090 }
1091
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301092 for (iova = 0; iova < max; iova += size)
1093 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001094
1095out:
1096 free_pages((unsigned long)virt, get_order(size));
1097 return ret;
1098}
1099
1100static int __check_mapping(struct device *dev, struct iommu_domain *domain,
1101 dma_addr_t iova, phys_addr_t expected)
1102{
1103 phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
1104 phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
1105
1106 WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
1107
1108 if (res != expected) {
1109 dev_err_ratelimited(dev,
1110 "Bad translation for %pa! Expected: %pa Got: %pa\n",
1111 &iova, &expected, &res);
1112 return -EINVAL;
1113 }
1114
1115 return 0;
1116}
1117
1118static int __full_va_sweep(struct device *dev, struct seq_file *s,
1119 const size_t size, struct iommu_domain *domain)
1120{
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301121 u64 iova;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001122 dma_addr_t dma_addr;
1123 void *virt;
1124 phys_addr_t phys;
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301125 const u64 max = SZ_1G * 4ULL - 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001126 int ret = 0, i;
1127
1128 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
1129 if (!virt) {
1130 if (size > SZ_8K) {
1131 dev_err(dev,
1132 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
1133 _size_to_string(size));
1134 return 0;
1135 }
1136 return -ENOMEM;
1137 }
1138 phys = virt_to_phys(virt);
1139
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301140 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001141 unsigned long expected = iova;
1142
1143 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1144 if (dma_addr != expected) {
1145 dev_err_ratelimited(dev,
1146 "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
1147 i, expected,
1148 (unsigned long)dma_addr);
1149 ret = -EINVAL;
1150 goto out;
1151 }
1152 }
1153
1154 if (domain) {
1155 /* check every mapping from 0..6M */
1156 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
1157 phys_addr_t expected = phys;
1158
1159 if (__check_mapping(dev, domain, iova, expected)) {
1160 dev_err(dev, "iter: %d\n", i);
1161 ret = -EINVAL;
1162 goto out;
1163 }
1164 }
1165 /* and from 4G..4G-6M */
1166 for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
1167 phys_addr_t expected = phys;
1168 unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
1169
1170 if (__check_mapping(dev, domain, theiova, expected)) {
1171 dev_err(dev, "iter: %d\n", i);
1172 ret = -EINVAL;
1173 goto out;
1174 }
1175 }
1176 }
1177
1178 /* at this point, our VA space should be full */
1179 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
1180 if (dma_addr != DMA_ERROR_CODE) {
1181 dev_err_ratelimited(dev,
1182 "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
1183 (unsigned long)dma_addr);
1184 ret = -EINVAL;
1185 }
1186
1187out:
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301188 for (iova = 0; iova < max; iova += size)
1189 dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001190
1191 free_pages((unsigned long)virt, get_order(size));
1192 return ret;
1193}
1194
1195#define ds_printf(d, s, fmt, ...) ({ \
1196 dev_err(d, fmt, ##__VA_ARGS__); \
1197 seq_printf(s, fmt, ##__VA_ARGS__); \
1198 })
1199
1200static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
1201 struct iommu_domain *domain, void *priv)
1202{
1203 int i, j, ret = 0;
1204 size_t *sz, *sizes = priv;
1205
1206 for (j = 0; j < 1; ++j) {
1207 for (sz = sizes; *sz; ++sz) {
1208 for (i = 0; i < 2; ++i) {
1209 ds_printf(dev, s, "Full VA sweep @%s %d",
1210 _size_to_string(*sz), i);
1211 if (__full_va_sweep(dev, s, *sz, domain)) {
1212 ds_printf(dev, s, " -> FAILED\n");
1213 ret = -EINVAL;
1214 } else {
1215 ds_printf(dev, s, " -> SUCCEEDED\n");
1216 }
1217 }
1218 }
1219 }
1220
1221 ds_printf(dev, s, "bonus map:");
1222 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
1223 ds_printf(dev, s, " -> FAILED\n");
1224 ret = -EINVAL;
1225 } else {
1226 ds_printf(dev, s, " -> SUCCEEDED\n");
1227 }
1228
1229 for (sz = sizes; *sz; ++sz) {
1230 for (i = 0; i < 2; ++i) {
1231 ds_printf(dev, s, "Rand VA sweep @%s %d",
1232 _size_to_string(*sz), i);
1233 if (__rand_va_sweep(dev, s, *sz)) {
1234 ds_printf(dev, s, " -> FAILED\n");
1235 ret = -EINVAL;
1236 } else {
1237 ds_printf(dev, s, " -> SUCCEEDED\n");
1238 }
1239 }
1240 }
1241
1242 ds_printf(dev, s, "TLB stress sweep");
1243 if (__tlb_stress_sweep(dev, s)) {
1244 ds_printf(dev, s, " -> FAILED\n");
1245 ret = -EINVAL;
1246 } else {
1247 ds_printf(dev, s, " -> SUCCEEDED\n");
1248 }
1249
1250 ds_printf(dev, s, "second bonus map:");
1251 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
1252 ds_printf(dev, s, " -> FAILED\n");
1253 ret = -EINVAL;
1254 } else {
1255 ds_printf(dev, s, " -> SUCCEEDED\n");
1256 }
1257
1258 return ret;
1259}
1260
1261static int __functional_dma_api_alloc_test(struct device *dev,
1262 struct seq_file *s,
1263 struct iommu_domain *domain,
1264 void *ignored)
1265{
1266 size_t size = SZ_1K * 742;
1267 int ret = 0;
1268 u8 *data;
1269 dma_addr_t iova;
1270
1271 /* Make sure we can allocate and use a buffer */
1272 ds_printf(dev, s, "Allocating coherent buffer");
1273 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
1274 if (!data) {
1275 ds_printf(dev, s, " -> FAILED\n");
1276 ret = -EINVAL;
1277 } else {
1278 int i;
1279
1280 ds_printf(dev, s, " -> SUCCEEDED\n");
1281 ds_printf(dev, s, "Using coherent buffer");
1282 for (i = 0; i < 742; ++i) {
1283 int ind = SZ_1K * i;
1284 u8 *p = data + ind;
1285 u8 val = i % 255;
1286
1287 memset(data, 0xa5, size);
1288 *p = val;
1289 (*p)++;
1290 if ((*p) != val + 1) {
1291 ds_printf(dev, s,
1292 " -> FAILED on iter %d since %d != %d\n",
1293 i, *p, val + 1);
1294 ret = -EINVAL;
1295 }
1296 }
1297 if (!ret)
1298 ds_printf(dev, s, " -> SUCCEEDED\n");
1299 dma_free_coherent(dev, size, data, iova);
1300 }
1301
1302 return ret;
1303}
1304
1305static int __functional_dma_api_basic_test(struct device *dev,
1306 struct seq_file *s,
1307 struct iommu_domain *domain,
1308 void *ignored)
1309{
1310 size_t size = 1518;
1311 int i, j, ret = 0;
1312 u8 *data;
1313 dma_addr_t iova;
1314 phys_addr_t pa, pa2;
1315
1316 ds_printf(dev, s, "Basic DMA API test");
1317 /* Make sure we can allocate and use a buffer */
1318 for (i = 0; i < 1000; ++i) {
1319 data = kmalloc(size, GFP_KERNEL);
1320 if (!data) {
1321 ds_printf(dev, s, " -> FAILED\n");
1322 ret = -EINVAL;
1323 goto out;
1324 }
1325 memset(data, 0xa5, size);
1326 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
1327 pa = iommu_iova_to_phys(domain, iova);
1328 pa2 = iommu_iova_to_phys_hard(domain, iova);
1329 if (pa != pa2) {
1330 dev_err(dev,
1331 "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
1332 &pa, &pa2);
1333 ret = -EINVAL;
1334 goto out;
1335 }
1336 pa2 = virt_to_phys(data);
1337 if (pa != pa2) {
1338 dev_err(dev,
1339 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
1340 &pa, &pa2);
1341 ret = -EINVAL;
1342 goto out;
1343 }
1344 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
1345 for (j = 0; j < size; ++j) {
1346 if (data[j] != 0xa5) {
1347 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
1348 ret = -EINVAL;
1349 goto out;
1350 }
1351 }
1352 kfree(data);
1353 }
1354
1355out:
1356 if (ret)
1357 ds_printf(dev, s, " -> FAILED\n");
1358 else
1359 ds_printf(dev, s, " -> SUCCEEDED\n");
1360
1361 return ret;
1362}
1363
1364/* Creates a fresh fast mapping and applies @fn to it */
1365static int __apply_to_new_mapping(struct seq_file *s,
1366 int (*fn)(struct device *dev,
1367 struct seq_file *s,
1368 struct iommu_domain *domain,
1369 void *priv),
1370 void *priv)
1371{
1372 struct dma_iommu_mapping *mapping;
1373 struct iommu_debug_device *ddev = s->private;
1374 struct device *dev = ddev->dev;
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301375 int ret = -EINVAL, fast = 1;
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001376 phys_addr_t pt_phys;
1377
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301378 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1379 (SZ_1G * 4ULL));
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001380 if (!mapping)
1381 goto out;
1382
1383 if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
1384 seq_puts(s, "iommu_domain_set_attr failed\n");
1385 goto out_release_mapping;
1386 }
1387
1388 if (arm_iommu_attach_device(dev, mapping))
1389 goto out_release_mapping;
1390
1391 if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
1392 &pt_phys)) {
1393 ds_printf(dev, s, "Couldn't get page table base address\n");
1394 goto out_release_mapping;
1395 }
1396
1397 dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
1398 if (iommu_enable_config_clocks(mapping->domain)) {
1399 ds_printf(dev, s, "Couldn't enable clocks\n");
1400 goto out_release_mapping;
1401 }
1402 ret = fn(dev, s, mapping->domain, priv);
1403 iommu_disable_config_clocks(mapping->domain);
1404
1405 arm_iommu_detach_device(dev);
1406out_release_mapping:
1407 arm_iommu_release_mapping(mapping);
1408out:
1409 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1410 return 0;
1411}
1412
1413static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
1414 void *ignored)
1415{
1416 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
1417 int ret = 0;
1418
1419 ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
1420 ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
1421 ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
1422 return ret;
1423}
1424
1425static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
1426 struct file *file)
1427{
1428 return single_open(file, iommu_debug_functional_fast_dma_api_show,
1429 inode->i_private);
1430}
1431
1432static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
1433 .open = iommu_debug_functional_fast_dma_api_open,
1434 .read = seq_read,
1435 .llseek = seq_lseek,
1436 .release = single_release,
1437};
1438
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001439static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
1440 void *ignored)
1441{
1442 struct dma_iommu_mapping *mapping;
1443 struct iommu_debug_device *ddev = s->private;
1444 struct device *dev = ddev->dev;
1445 size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
1446 int ret = -EINVAL;
1447
Charan Teja Reddy29f61402017-02-09 20:44:29 +05301448 /* Make the size equal to MAX_ULONG */
1449 mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
1450 (SZ_1G * 4ULL - 1));
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001451 if (!mapping)
1452 goto out;
1453
1454 if (arm_iommu_attach_device(dev, mapping))
1455 goto out_release_mapping;
1456
1457 ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
1458 ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
1459
1460 arm_iommu_detach_device(dev);
1461out_release_mapping:
1462 arm_iommu_release_mapping(mapping);
1463out:
1464 seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
1465 return 0;
1466}
1467
1468static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
1469 struct file *file)
1470{
1471 return single_open(file, iommu_debug_functional_arm_dma_api_show,
1472 inode->i_private);
1473}
1474
1475static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
1476 .open = iommu_debug_functional_arm_dma_api_open,
1477 .read = seq_read,
1478 .llseek = seq_lseek,
1479 .release = single_release,
1480};
1481
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001482static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
1483 int val, bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001484{
1485 ddev->domain = iommu_domain_alloc(&platform_bus_type);
1486 if (!ddev->domain) {
1487 pr_err("Couldn't allocate domain\n");
1488 return -ENOMEM;
1489 }
1490
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001491 if (is_secure && iommu_domain_set_attr(ddev->domain,
1492 DOMAIN_ATTR_SECURE_VMID,
1493 &val)) {
1494 pr_err("Couldn't set secure vmid to %d\n", val);
1495 goto out_domain_free;
1496 }
1497
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001498 if (iommu_attach_device(ddev->domain, ddev->dev)) {
1499 pr_err("Couldn't attach new domain to device. Is it already attached?\n");
1500 goto out_domain_free;
1501 }
1502
1503 return 0;
1504
1505out_domain_free:
1506 iommu_domain_free(ddev->domain);
1507 ddev->domain = NULL;
1508 return -EIO;
1509}
1510
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001511static ssize_t __iommu_debug_attach_write(struct file *file,
1512 const char __user *ubuf,
1513 size_t count, loff_t *offset,
1514 bool is_secure)
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001515{
1516 struct iommu_debug_device *ddev = file->private_data;
1517 ssize_t retval;
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001518 int val;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001519
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001520 if (kstrtoint_from_user(ubuf, count, 0, &val)) {
1521 pr_err("Invalid format. Expected a hex or decimal integer");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001522 retval = -EFAULT;
1523 goto out;
1524 }
1525
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001526 if (val) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001527 if (ddev->domain) {
1528 pr_err("Already attached.\n");
1529 retval = -EINVAL;
1530 goto out;
1531 }
1532 if (WARN(ddev->dev->archdata.iommu,
1533 "Attachment tracking out of sync with device\n")) {
1534 retval = -EINVAL;
1535 goto out;
1536 }
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001537 if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001538 retval = -EIO;
1539 goto out;
1540 }
1541 pr_err("Attached\n");
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001542 } else {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001543 if (!ddev->domain) {
1544 pr_err("No domain. Did you already attach?\n");
1545 retval = -EINVAL;
1546 goto out;
1547 }
1548 iommu_detach_device(ddev->domain, ddev->dev);
1549 iommu_domain_free(ddev->domain);
1550 ddev->domain = NULL;
1551 pr_err("Detached\n");
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001552 }
1553
1554 retval = count;
1555out:
1556 return retval;
1557}
1558
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001559static ssize_t iommu_debug_attach_write(struct file *file,
1560 const char __user *ubuf,
1561 size_t count, loff_t *offset)
1562{
1563 return __iommu_debug_attach_write(file, ubuf, count, offset,
1564 false);
1565
1566}
1567
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001568static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
1569 size_t count, loff_t *offset)
1570{
1571 struct iommu_debug_device *ddev = file->private_data;
1572 char c[2];
1573
1574 if (*offset)
1575 return 0;
1576
1577 c[0] = ddev->domain ? '1' : '0';
1578 c[1] = '\n';
1579 if (copy_to_user(ubuf, &c, 2)) {
1580 pr_err("copy_to_user failed\n");
1581 return -EFAULT;
1582 }
1583 *offset = 1; /* non-zero means we're done */
1584
1585 return 2;
1586}
1587
1588static const struct file_operations iommu_debug_attach_fops = {
1589 .open = simple_open,
1590 .write = iommu_debug_attach_write,
1591 .read = iommu_debug_attach_read,
1592};
1593
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001594static ssize_t iommu_debug_attach_write_secure(struct file *file,
1595 const char __user *ubuf,
1596 size_t count, loff_t *offset)
1597{
1598 return __iommu_debug_attach_write(file, ubuf, count, offset,
1599 true);
1600
1601}
1602
1603static const struct file_operations iommu_debug_secure_attach_fops = {
1604 .open = simple_open,
1605 .write = iommu_debug_attach_write_secure,
1606 .read = iommu_debug_attach_read,
1607};
1608
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001609static ssize_t iommu_debug_atos_write(struct file *file,
1610 const char __user *ubuf,
1611 size_t count, loff_t *offset)
1612{
1613 struct iommu_debug_device *ddev = file->private_data;
1614 dma_addr_t iova;
1615
Susheel Khiania4417e72016-07-12 11:28:32 +05301616 if (kstrtox_from_user(ubuf, count, 0, &iova)) {
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001617 pr_err("Invalid format for iova\n");
1618 ddev->iova = 0;
1619 return -EINVAL;
1620 }
1621
1622 ddev->iova = iova;
1623 pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
1624 return count;
1625}
1626
1627static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
1628 size_t count, loff_t *offset)
1629{
1630 struct iommu_debug_device *ddev = file->private_data;
1631 phys_addr_t phys;
1632 char buf[100];
1633 ssize_t retval;
1634 size_t buflen;
1635
1636 if (!ddev->domain) {
1637 pr_err("No domain. Did you already attach?\n");
1638 return -EINVAL;
1639 }
1640
1641 if (*offset)
1642 return 0;
1643
1644 memset(buf, 0, 100);
1645
1646 phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
1647 if (!phys)
1648 strlcpy(buf, "FAIL\n", 100);
1649 else
1650 snprintf(buf, 100, "%pa\n", &phys);
1651
1652 buflen = strlen(buf);
1653 if (copy_to_user(ubuf, buf, buflen)) {
1654 pr_err("Couldn't copy_to_user\n");
1655 retval = -EFAULT;
1656 } else {
1657 *offset = 1; /* non-zero means we're done */
1658 retval = buflen;
1659 }
1660
1661 return retval;
1662}
1663
1664static const struct file_operations iommu_debug_atos_fops = {
1665 .open = simple_open,
1666 .write = iommu_debug_atos_write,
1667 .read = iommu_debug_atos_read,
1668};
1669
1670static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
1671 size_t count, loff_t *offset)
1672{
Shiraz Hashim3c28c962016-07-04 15:05:35 +05301673 ssize_t retval = -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001674 int ret;
1675 char *comma1, *comma2, *comma3;
1676 char buf[100];
1677 dma_addr_t iova;
1678 phys_addr_t phys;
1679 size_t size;
1680 int prot;
1681 struct iommu_debug_device *ddev = file->private_data;
1682
1683 if (count >= 100) {
1684 pr_err("Value too large\n");
1685 return -EINVAL;
1686 }
1687
1688 if (!ddev->domain) {
1689 pr_err("No domain. Did you already attach?\n");
1690 return -EINVAL;
1691 }
1692
1693 memset(buf, 0, 100);
1694
1695 if (copy_from_user(buf, ubuf, count)) {
1696 pr_err("Couldn't copy from user\n");
1697 retval = -EFAULT;
1698 }
1699
1700 comma1 = strnchr(buf, count, ',');
1701 if (!comma1)
1702 goto invalid_format;
1703
1704 comma2 = strnchr(comma1 + 1, count, ',');
1705 if (!comma2)
1706 goto invalid_format;
1707
1708 comma3 = strnchr(comma2 + 1, count, ',');
1709 if (!comma3)
1710 goto invalid_format;
1711
1712 /* split up the words */
1713 *comma1 = *comma2 = *comma3 = '\0';
1714
Susheel Khiania4417e72016-07-12 11:28:32 +05301715 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001716 goto invalid_format;
1717
Susheel Khiania4417e72016-07-12 11:28:32 +05301718 if (kstrtoux(comma1 + 1, 0, &phys))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001719 goto invalid_format;
1720
Susheel Khiania4417e72016-07-12 11:28:32 +05301721 if (kstrtosize_t(comma2 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001722 goto invalid_format;
1723
1724 if (kstrtoint(comma3 + 1, 0, &prot))
1725 goto invalid_format;
1726
1727 ret = iommu_map(ddev->domain, iova, phys, size, prot);
1728 if (ret) {
1729 pr_err("iommu_map failed with %d\n", ret);
1730 retval = -EIO;
1731 goto out;
1732 }
1733
1734 retval = count;
1735 pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
1736 &iova, &phys, size, prot);
1737out:
1738 return retval;
1739
1740invalid_format:
1741 pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
1742 return -EINVAL;
1743}
1744
1745static const struct file_operations iommu_debug_map_fops = {
1746 .open = simple_open,
1747 .write = iommu_debug_map_write,
1748};
1749
1750static ssize_t iommu_debug_unmap_write(struct file *file,
1751 const char __user *ubuf,
1752 size_t count, loff_t *offset)
1753{
1754 ssize_t retval = 0;
1755 char *comma1;
1756 char buf[100];
1757 dma_addr_t iova;
1758 size_t size;
1759 size_t unmapped;
1760 struct iommu_debug_device *ddev = file->private_data;
1761
1762 if (count >= 100) {
1763 pr_err("Value too large\n");
1764 return -EINVAL;
1765 }
1766
1767 if (!ddev->domain) {
1768 pr_err("No domain. Did you already attach?\n");
1769 return -EINVAL;
1770 }
1771
1772 memset(buf, 0, 100);
1773
1774 if (copy_from_user(buf, ubuf, count)) {
1775 pr_err("Couldn't copy from user\n");
1776 retval = -EFAULT;
1777 goto out;
1778 }
1779
1780 comma1 = strnchr(buf, count, ',');
1781 if (!comma1)
1782 goto invalid_format;
1783
1784 /* split up the words */
1785 *comma1 = '\0';
1786
Susheel Khiania4417e72016-07-12 11:28:32 +05301787 if (kstrtoux(buf, 0, &iova))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001788 goto invalid_format;
1789
Susheel Khiania4417e72016-07-12 11:28:32 +05301790 if (kstrtosize_t(comma1 + 1, 0, &size))
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001791 goto invalid_format;
1792
1793 unmapped = iommu_unmap(ddev->domain, iova, size);
1794 if (unmapped != size) {
1795 pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
1796 size, unmapped);
1797 return -EIO;
1798 }
1799
1800 retval = count;
1801 pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
1802out:
1803 return retval;
1804
1805invalid_format:
1806 pr_err("Invalid format. Expected: iova,len\n");
Patrick Daly5a5e3ff2016-10-13 19:31:50 -07001807 return -EINVAL;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001808}
1809
1810static const struct file_operations iommu_debug_unmap_fops = {
1811 .open = simple_open,
1812 .write = iommu_debug_unmap_write,
1813};
1814
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08001815static ssize_t iommu_debug_config_clocks_write(struct file *file,
1816 const char __user *ubuf,
1817 size_t count, loff_t *offset)
1818{
1819 char buf;
1820 struct iommu_debug_device *ddev = file->private_data;
1821 struct device *dev = ddev->dev;
1822
1823 /* we're expecting a single character plus (optionally) a newline */
1824 if (count > 2) {
1825 dev_err(dev, "Invalid value\n");
1826 return -EINVAL;
1827 }
1828
1829 if (!ddev->domain) {
1830 dev_err(dev, "No domain. Did you already attach?\n");
1831 return -EINVAL;
1832 }
1833
1834 if (copy_from_user(&buf, ubuf, 1)) {
1835 dev_err(dev, "Couldn't copy from user\n");
1836 return -EFAULT;
1837 }
1838
1839 switch (buf) {
1840 case '0':
1841 dev_err(dev, "Disabling config clocks\n");
1842 iommu_disable_config_clocks(ddev->domain);
1843 break;
1844 case '1':
1845 dev_err(dev, "Enabling config clocks\n");
1846 if (iommu_enable_config_clocks(ddev->domain))
1847 dev_err(dev, "Failed!\n");
1848 break;
1849 default:
1850 dev_err(dev, "Invalid value. Should be 0 or 1.\n");
1851 return -EINVAL;
1852 }
1853
1854 return count;
1855}
1856
1857static const struct file_operations iommu_debug_config_clocks_fops = {
1858 .open = simple_open,
1859 .write = iommu_debug_config_clocks_write,
1860};
1861
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001862/*
1863 * The following will only work for drivers that implement the generic
1864 * device tree bindings described in
1865 * Documentation/devicetree/bindings/iommu/iommu.txt
1866 */
1867static int snarf_iommu_devices(struct device *dev, void *ignored)
1868{
1869 struct iommu_debug_device *ddev;
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001870 struct dentry *dir;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001871
1872 if (!of_find_property(dev->of_node, "iommus", NULL))
1873 return 0;
1874
Mitchel Humpherys89924fd2015-07-09 14:50:22 -07001875 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001876 if (!ddev)
1877 return -ENODEV;
1878 ddev->dev = dev;
1879 dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
1880 if (!dir) {
1881 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
1882 dev_name(dev));
1883 goto err;
1884 }
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001885
Patrick Dalye4e39862015-11-20 20:00:50 -08001886 if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
1887 &iommu_debug_nr_iters_ops)) {
1888 pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
1889 dev_name(dev));
1890 goto err_rmdir;
1891 }
1892
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001893 if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
1894 &iommu_debug_profiling_fops)) {
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001895 pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
1896 dev_name(dev));
1897 goto err_rmdir;
1898 }
1899
Mitchel Humpherys020f90f2015-10-02 16:02:31 -07001900 if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
1901 &iommu_debug_secure_profiling_fops)) {
1902 pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
1903 dev_name(dev));
1904 goto err_rmdir;
1905 }
1906
Mitchel Humpherysbc367fd2015-10-05 14:44:58 -07001907 if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
1908 &iommu_debug_profiling_fast_fops)) {
1909 pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
1910 dev_name(dev));
1911 goto err_rmdir;
1912 }
1913
Mitchel Humpherysbe3060c2015-10-08 15:08:01 -07001914 if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
1915 &iommu_debug_profiling_fast_dma_api_fops)) {
1916 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
1917 dev_name(dev));
1918 goto err_rmdir;
1919 }
1920
Mitchel Humpherys45fc7122015-12-11 15:22:15 -08001921 if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
1922 &iommu_debug_functional_fast_dma_api_fops)) {
1923 pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
1924 dev_name(dev));
1925 goto err_rmdir;
1926 }
1927
Mitchel Humpherys10215fd2015-12-15 18:45:57 -08001928 if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
1929 &iommu_debug_functional_arm_dma_api_fops)) {
1930 pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
1931 dev_name(dev));
1932 goto err_rmdir;
1933 }
1934
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001935 if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
1936 &iommu_debug_attach_fops)) {
1937 pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
1938 dev_name(dev));
1939 goto err_rmdir;
1940 }
1941
Mitchel Humpherysac4c38b2015-07-30 19:40:21 -07001942 if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
1943 &iommu_debug_secure_attach_fops)) {
1944 pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
1945 dev_name(dev));
1946 goto err_rmdir;
1947 }
1948
Mitchel Humpherys0fe337d2015-07-06 15:21:24 -07001949 if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
1950 &iommu_debug_atos_fops)) {
1951 pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
1952 dev_name(dev));
1953 goto err_rmdir;
1954 }
1955
1956 if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
1957 &iommu_debug_map_fops)) {
1958 pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
1959 dev_name(dev));
1960 goto err_rmdir;
1961 }
1962
1963 if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
1964 &iommu_debug_unmap_fops)) {
1965 pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
1966 dev_name(dev));
1967 goto err_rmdir;
1968 }
1969
Mitchel Humpherys0d1b8262016-02-01 16:53:39 -08001970 if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
1971 &iommu_debug_config_clocks_fops)) {
1972 pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
1973 dev_name(dev));
1974 goto err_rmdir;
1975 }
1976
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001977 list_add(&ddev->list, &iommu_debug_devices);
1978 return 0;
1979
1980err_rmdir:
1981 debugfs_remove_recursive(dir);
1982err:
1983 kfree(ddev);
1984 return 0;
1985}
1986
1987static int iommu_debug_init_tests(void)
1988{
1989 debugfs_tests_dir = debugfs_create_dir("tests",
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001990 iommu_debugfs_top);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07001991 if (!debugfs_tests_dir) {
1992 pr_err("Couldn't create iommu/tests debugfs directory\n");
1993 return -ENODEV;
1994 }
1995
1996 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
1997 snarf_iommu_devices);
1998}
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07001999
2000static void iommu_debug_destroy_tests(void)
2001{
2002 debugfs_remove_recursive(debugfs_tests_dir);
2003}
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002004#else
2005static inline int iommu_debug_init_tests(void) { return 0; }
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002006static inline void iommu_debug_destroy_tests(void) { }
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002007#endif
2008
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002009/*
2010 * This isn't really a "driver", we just need something in the device tree
2011 * so that our tests can run without any client drivers, and our tests rely
2012 * on parsing the device tree for nodes with the `iommus' property.
2013 */
2014static int iommu_debug_pass(struct platform_device *pdev)
2015{
2016 return 0;
2017}
2018
2019static const struct of_device_id iommu_debug_of_match[] = {
2020 { .compatible = "iommu-debug-test" },
2021 { },
2022};
2023
2024static struct platform_driver iommu_debug_driver = {
2025 .probe = iommu_debug_pass,
2026 .remove = iommu_debug_pass,
2027 .driver = {
2028 .name = "iommu-debug",
2029 .of_match_table = iommu_debug_of_match,
2030 },
2031};
2032
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002033static int iommu_debug_init(void)
2034{
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002035 if (iommu_debug_init_tracking())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002036 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002037
2038 if (iommu_debug_init_tests())
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002039 return -ENODEV;
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002040
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002041 return platform_driver_register(&iommu_debug_driver);
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002042}
2043
2044static void iommu_debug_exit(void)
2045{
Mitchel Humpherys93f7eef2016-04-13 17:08:49 -07002046 platform_driver_unregister(&iommu_debug_driver);
Mitchel Humpherysc75ae492015-07-15 18:27:36 -07002047 iommu_debug_destroy_tracking();
2048 iommu_debug_destroy_tests();
Mitchel Humpherys42296fb2015-06-23 16:29:16 -07002049}
2050
2051module_init(iommu_debug_init);
2052module_exit(iommu_debug_exit);