blob: fea790a2b17659e9b701987101db7929bdfbe8df [file] [log] [blame]
Joerg Roedelf2f45e52009-01-09 12:19:52 +01001/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Joerg Roedel972aa452009-01-09 14:19:54 +010020#include <linux/scatterlist.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010021#include <linux/dma-mapping.h>
David Woodhouse6c132d12009-01-19 16:52:39 +010022#include <linux/stacktrace.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010023#include <linux/dma-debug.h>
Joerg Roedel30dfa902009-01-09 12:34:49 +010024#include <linux/spinlock.h>
Joerg Roedel788dcfa2009-01-09 13:13:27 +010025#include <linux/debugfs.h>
Joerg Roedel8a6fc702009-05-22 21:23:13 +020026#include <linux/uaccess.h>
Paul Gortmaker23a7bfa2011-07-01 16:23:59 -040027#include <linux/export.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010028#include <linux/device.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010029#include <linux/types.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010030#include <linux/sched.h>
Joerg Roedel8a6fc702009-05-22 21:23:13 +020031#include <linux/ctype.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010032#include <linux/list.h>
Joerg Roedel6bf07872009-01-09 12:54:42 +010033#include <linux/slab.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010034
Joerg Roedel2e34bde2009-03-16 16:51:55 +010035#include <asm/sections.h>
36
Joerg Roedel30dfa902009-01-09 12:34:49 +010037#define HASH_SIZE 1024ULL
38#define HASH_FN_SHIFT 13
39#define HASH_FN_MASK (HASH_SIZE - 1)
40
Joerg Roedelf2f45e52009-01-09 12:19:52 +010041enum {
42 dma_debug_single,
43 dma_debug_page,
44 dma_debug_sg,
45 dma_debug_coherent,
46};
47
David Woodhouse6c132d12009-01-19 16:52:39 +010048#define DMA_DEBUG_STACKTRACE_ENTRIES 5
49
Joerg Roedelf2f45e52009-01-09 12:19:52 +010050struct dma_debug_entry {
51 struct list_head list;
52 struct device *dev;
53 int type;
54 phys_addr_t paddr;
55 u64 dev_addr;
56 u64 size;
57 int direction;
58 int sg_call_ents;
59 int sg_mapped_ents;
David Woodhouse6c132d12009-01-19 16:52:39 +010060#ifdef CONFIG_STACKTRACE
61 struct stack_trace stacktrace;
62 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
63#endif
Joerg Roedelf2f45e52009-01-09 12:19:52 +010064};
65
Neil Hormanc6a21d02011-08-08 15:13:54 -040066typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
67
Joerg Roedel30dfa902009-01-09 12:34:49 +010068struct hash_bucket {
69 struct list_head list;
70 spinlock_t lock;
Joerg Roedel2d62ece2009-01-09 14:10:26 +010071} ____cacheline_aligned_in_smp;
Joerg Roedel30dfa902009-01-09 12:34:49 +010072
73/* Hash list to save the allocated dma addresses */
74static struct hash_bucket dma_entry_hash[HASH_SIZE];
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010075/* List of pre-allocated dma_debug_entry's */
76static LIST_HEAD(free_entries);
77/* Lock for the list above */
78static DEFINE_SPINLOCK(free_entries_lock);
79
80/* Global disable flag - will be set in case of an error */
81static bool global_disable __read_mostly;
82
Joerg Roedel788dcfa2009-01-09 13:13:27 +010083/* Global error count */
84static u32 error_count;
85
86/* Global error show enable*/
87static u32 show_all_errors __read_mostly;
88/* Number of errors to show */
89static u32 show_num_errors = 1;
90
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010091static u32 num_free_entries;
92static u32 min_free_entries;
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +090093static u32 nr_total_entries;
Joerg Roedel30dfa902009-01-09 12:34:49 +010094
Joerg Roedel59d3daa2009-01-09 13:01:56 +010095/* number of preallocated entries requested by kernel cmdline */
96static u32 req_entries;
97
Joerg Roedel788dcfa2009-01-09 13:13:27 +010098/* debugfs dentry's for the stuff above */
99static struct dentry *dma_debug_dent __read_mostly;
100static struct dentry *global_disable_dent __read_mostly;
101static struct dentry *error_count_dent __read_mostly;
102static struct dentry *show_all_errors_dent __read_mostly;
103static struct dentry *show_num_errors_dent __read_mostly;
104static struct dentry *num_free_entries_dent __read_mostly;
105static struct dentry *min_free_entries_dent __read_mostly;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200106static struct dentry *filter_dent __read_mostly;
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100107
Joerg Roedel2e507d82009-05-22 18:24:20 +0200108/* per-driver filter related state */
109
110#define NAME_MAX_LEN 64
111
112static char current_driver_name[NAME_MAX_LEN] __read_mostly;
113static struct device_driver *current_driver __read_mostly;
114
115static DEFINE_RWLOCK(driver_name_lock);
Joerg Roedel30dfa902009-01-09 12:34:49 +0100116
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100117static const char *type2name[4] = { "single", "page",
118 "scather-gather", "coherent" };
119
120static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
121 "DMA_FROM_DEVICE", "DMA_NONE" };
122
Joerg Roedeled888ae2009-05-22 17:16:04 +0200123/* little merge helper - remove it after the merge window */
124#ifndef BUS_NOTIFY_UNBOUND_DRIVER
125#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
126#endif
127
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100128/*
129 * The access to some variables in this macro is racy. We can't use atomic_t
130 * here because all these variables are exported to debugfs. Some of them even
131 * writeable. This is also the reason why a lock won't help much. But anyway,
132 * the races are no big deal. Here is why:
133 *
134 * error_count: the addition is racy, but the worst thing that can happen is
135 * that we don't count some errors
136 * show_num_errors: the subtraction is racy. Also no big deal because in
137 * worst case this will result in one warning more in the
138 * system log than the user configured. This variable is
139 * writeable via debugfs.
140 */
David Woodhouse6c132d12009-01-19 16:52:39 +0100141static inline void dump_entry_trace(struct dma_debug_entry *entry)
142{
143#ifdef CONFIG_STACKTRACE
144 if (entry) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200145 pr_warning("Mapped at:\n");
David Woodhouse6c132d12009-01-19 16:52:39 +0100146 print_stack_trace(&entry->stacktrace, 0);
147 }
148#endif
149}
150
Joerg Roedel2e507d82009-05-22 18:24:20 +0200151static bool driver_filter(struct device *dev)
152{
Joerg Roedel0bf84122009-06-08 15:53:46 +0200153 struct device_driver *drv;
154 unsigned long flags;
155 bool ret;
156
Joerg Roedel2e507d82009-05-22 18:24:20 +0200157 /* driver filter off */
158 if (likely(!current_driver_name[0]))
159 return true;
160
161 /* driver filter on and initialized */
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400162 if (current_driver && dev && dev->driver == current_driver)
Joerg Roedel2e507d82009-05-22 18:24:20 +0200163 return true;
164
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400165 /* driver filter on, but we can't filter on a NULL device... */
166 if (!dev)
167 return false;
168
Joerg Roedel0bf84122009-06-08 15:53:46 +0200169 if (current_driver || !current_driver_name[0])
170 return false;
171
Joerg Roedel2e507d82009-05-22 18:24:20 +0200172 /* driver filter on but not yet initialized */
Joerg Roedel0bf84122009-06-08 15:53:46 +0200173 drv = get_driver(dev->driver);
174 if (!drv)
175 return false;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200176
Joerg Roedel0bf84122009-06-08 15:53:46 +0200177 /* lock to protect against change of current_driver_name */
178 read_lock_irqsave(&driver_name_lock, flags);
Joerg Roedel2e507d82009-05-22 18:24:20 +0200179
Joerg Roedel0bf84122009-06-08 15:53:46 +0200180 ret = false;
181 if (drv->name &&
182 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
183 current_driver = drv;
184 ret = true;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200185 }
186
Joerg Roedel0bf84122009-06-08 15:53:46 +0200187 read_unlock_irqrestore(&driver_name_lock, flags);
188 put_driver(drv);
189
190 return ret;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200191}
192
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400193#define err_printk(dev, entry, format, arg...) do { \
194 error_count += 1; \
195 if (driver_filter(dev) && \
196 (show_all_errors || show_num_errors > 0)) { \
197 WARN(1, "%s %s: " format, \
198 dev ? dev_driver_string(dev) : "NULL", \
199 dev ? dev_name(dev) : "NULL", ## arg); \
200 dump_entry_trace(entry); \
201 } \
202 if (!show_all_errors && show_num_errors > 0) \
203 show_num_errors -= 1; \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100204 } while (0);
205
Joerg Roedel30dfa902009-01-09 12:34:49 +0100206/*
207 * Hash related functions
208 *
209 * Every DMA-API request is saved into a struct dma_debug_entry. To
210 * have quick access to these structs they are stored into a hash.
211 */
212static int hash_fn(struct dma_debug_entry *entry)
213{
214 /*
215 * Hash function is based on the dma address.
216 * We use bits 20-27 here as the index into the hash
217 */
218 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
219}
220
221/*
222 * Request exclusive access to a hash bucket for a given dma_debug_entry.
223 */
224static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
225 unsigned long *flags)
226{
227 int idx = hash_fn(entry);
228 unsigned long __flags;
229
230 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
231 *flags = __flags;
232 return &dma_entry_hash[idx];
233}
234
235/*
236 * Give up exclusive access to the hash bucket
237 */
238static void put_hash_bucket(struct hash_bucket *bucket,
239 unsigned long *flags)
240{
241 unsigned long __flags = *flags;
242
243 spin_unlock_irqrestore(&bucket->lock, __flags);
244}
245
Neil Hormanc6a21d02011-08-08 15:13:54 -0400246static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
247{
Thomas Jarosch91ec37c2011-11-17 20:31:02 +0100248 return ((a->dev_addr == b->dev_addr) &&
Neil Hormanc6a21d02011-08-08 15:13:54 -0400249 (a->dev == b->dev)) ? true : false;
250}
251
252static bool containing_match(struct dma_debug_entry *a,
253 struct dma_debug_entry *b)
254{
255 if (a->dev != b->dev)
256 return false;
257
258 if ((b->dev_addr <= a->dev_addr) &&
259 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
260 return true;
261
262 return false;
263}
264
Joerg Roedel30dfa902009-01-09 12:34:49 +0100265/*
266 * Search a given entry in the hash bucket list
267 */
Neil Hormanc6a21d02011-08-08 15:13:54 -0400268static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
269 struct dma_debug_entry *ref,
270 match_fn match)
Joerg Roedel30dfa902009-01-09 12:34:49 +0100271{
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200272 struct dma_debug_entry *entry, *ret = NULL;
273 int matches = 0, match_lvl, last_lvl = 0;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100274
275 list_for_each_entry(entry, &bucket->list, list) {
Neil Hormanc6a21d02011-08-08 15:13:54 -0400276 if (!match(ref, entry))
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200277 continue;
278
279 /*
280 * Some drivers map the same physical address multiple
281 * times. Without a hardware IOMMU this results in the
282 * same device addresses being put into the dma-debug
283 * hash multiple times too. This can result in false
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200284 * positives being reported. Therefore we implement a
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200285 * best-fit algorithm here which returns the entry from
286 * the hash which fits best to the reference value
287 * instead of the first-fit.
288 */
289 matches += 1;
290 match_lvl = 0;
Joerg Roedele5e8c5b2009-06-11 10:03:42 +0200291 entry->size == ref->size ? ++match_lvl : 0;
292 entry->type == ref->type ? ++match_lvl : 0;
293 entry->direction == ref->direction ? ++match_lvl : 0;
294 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200295
Joerg Roedele5e8c5b2009-06-11 10:03:42 +0200296 if (match_lvl == 4) {
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200297 /* perfect-fit - return the result */
Joerg Roedel30dfa902009-01-09 12:34:49 +0100298 return entry;
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200299 } else if (match_lvl > last_lvl) {
300 /*
301 * We found an entry that fits better then the
302 * previous one
303 */
304 last_lvl = match_lvl;
305 ret = entry;
306 }
Joerg Roedel30dfa902009-01-09 12:34:49 +0100307 }
308
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200309 /*
310 * If we have multiple matches but no perfect-fit, just return
311 * NULL.
312 */
313 ret = (matches == 1) ? ret : NULL;
314
315 return ret;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100316}
317
Neil Hormanc6a21d02011-08-08 15:13:54 -0400318static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
319 struct dma_debug_entry *ref)
320{
321 return __hash_bucket_find(bucket, ref, exact_match);
322}
323
324static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
325 struct dma_debug_entry *ref,
326 unsigned long *flags)
327{
328
329 unsigned int max_range = dma_get_max_seg_size(ref->dev);
330 struct dma_debug_entry *entry, index = *ref;
331 unsigned int range = 0;
332
333 while (range <= max_range) {
334 entry = __hash_bucket_find(*bucket, &index, containing_match);
335
336 if (entry)
337 return entry;
338
339 /*
340 * Nothing found, go back a hash bucket
341 */
342 put_hash_bucket(*bucket, flags);
343 range += (1 << HASH_FN_SHIFT);
344 index.dev_addr -= (1 << HASH_FN_SHIFT);
345 *bucket = get_hash_bucket(&index, flags);
346 }
347
348 return NULL;
349}
350
Joerg Roedel30dfa902009-01-09 12:34:49 +0100351/*
352 * Add an entry to a hash bucket
353 */
354static void hash_bucket_add(struct hash_bucket *bucket,
355 struct dma_debug_entry *entry)
356{
357 list_add_tail(&entry->list, &bucket->list);
358}
359
360/*
361 * Remove entry from a hash bucket list
362 */
363static void hash_bucket_del(struct dma_debug_entry *entry)
364{
365 list_del(&entry->list);
366}
367
368/*
David Woodhouseac26c182009-02-12 16:19:13 +0100369 * Dump mapping entries for debugging purposes
370 */
371void debug_dma_dump_mappings(struct device *dev)
372{
373 int idx;
374
375 for (idx = 0; idx < HASH_SIZE; idx++) {
376 struct hash_bucket *bucket = &dma_entry_hash[idx];
377 struct dma_debug_entry *entry;
378 unsigned long flags;
379
380 spin_lock_irqsave(&bucket->lock, flags);
381
382 list_for_each_entry(entry, &bucket->list, list) {
383 if (!dev || dev == entry->dev) {
384 dev_info(entry->dev,
385 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
386 type2name[entry->type], idx,
387 (unsigned long long)entry->paddr,
388 entry->dev_addr, entry->size,
389 dir2name[entry->direction]);
390 }
391 }
392
393 spin_unlock_irqrestore(&bucket->lock, flags);
394 }
395}
396EXPORT_SYMBOL(debug_dma_dump_mappings);
397
398/*
Joerg Roedel30dfa902009-01-09 12:34:49 +0100399 * Wrapper function for adding an entry to the hash.
400 * This function takes care of locking itself.
401 */
402static void add_dma_entry(struct dma_debug_entry *entry)
403{
404 struct hash_bucket *bucket;
405 unsigned long flags;
406
407 bucket = get_hash_bucket(entry, &flags);
408 hash_bucket_add(bucket, entry);
409 put_hash_bucket(bucket, &flags);
410}
411
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900412static struct dma_debug_entry *__dma_entry_alloc(void)
413{
414 struct dma_debug_entry *entry;
415
416 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
417 list_del(&entry->list);
418 memset(entry, 0, sizeof(*entry));
419
420 num_free_entries -= 1;
421 if (num_free_entries < min_free_entries)
422 min_free_entries = num_free_entries;
423
424 return entry;
425}
426
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100427/* struct dma_entry allocator
428 *
429 * The next two functions implement the allocator for
430 * struct dma_debug_entries.
431 */
432static struct dma_debug_entry *dma_entry_alloc(void)
433{
434 struct dma_debug_entry *entry = NULL;
435 unsigned long flags;
436
437 spin_lock_irqsave(&free_entries_lock, flags);
438
439 if (list_empty(&free_entries)) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200440 pr_err("DMA-API: debugging out of memory - disabling\n");
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100441 global_disable = true;
442 goto out;
443 }
444
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900445 entry = __dma_entry_alloc();
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100446
David Woodhouse6c132d12009-01-19 16:52:39 +0100447#ifdef CONFIG_STACKTRACE
448 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
449 entry->stacktrace.entries = entry->st_entries;
450 entry->stacktrace.skip = 2;
451 save_stack_trace(&entry->stacktrace);
452#endif
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100453
454out:
455 spin_unlock_irqrestore(&free_entries_lock, flags);
456
457 return entry;
458}
459
460static void dma_entry_free(struct dma_debug_entry *entry)
461{
462 unsigned long flags;
463
464 /*
465 * add to beginning of the list - this way the entries are
466 * more likely cache hot when they are reallocated.
467 */
468 spin_lock_irqsave(&free_entries_lock, flags);
469 list_add(&entry->list, &free_entries);
470 num_free_entries += 1;
471 spin_unlock_irqrestore(&free_entries_lock, flags);
472}
473
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900474int dma_debug_resize_entries(u32 num_entries)
475{
476 int i, delta, ret = 0;
477 unsigned long flags;
478 struct dma_debug_entry *entry;
479 LIST_HEAD(tmp);
480
481 spin_lock_irqsave(&free_entries_lock, flags);
482
483 if (nr_total_entries < num_entries) {
484 delta = num_entries - nr_total_entries;
485
486 spin_unlock_irqrestore(&free_entries_lock, flags);
487
488 for (i = 0; i < delta; i++) {
489 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
490 if (!entry)
491 break;
492
493 list_add_tail(&entry->list, &tmp);
494 }
495
496 spin_lock_irqsave(&free_entries_lock, flags);
497
498 list_splice(&tmp, &free_entries);
499 nr_total_entries += i;
500 num_free_entries += i;
501 } else {
502 delta = nr_total_entries - num_entries;
503
504 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
505 entry = __dma_entry_alloc();
506 kfree(entry);
507 }
508
509 nr_total_entries -= i;
510 }
511
512 if (nr_total_entries != num_entries)
513 ret = 1;
514
515 spin_unlock_irqrestore(&free_entries_lock, flags);
516
517 return ret;
518}
519EXPORT_SYMBOL(dma_debug_resize_entries);
520
Joerg Roedel6bf07872009-01-09 12:54:42 +0100521/*
522 * DMA-API debugging init code
523 *
524 * The init code does two things:
525 * 1. Initialize core data structures
526 * 2. Preallocate a given number of dma_debug_entry structs
527 */
528
529static int prealloc_memory(u32 num_entries)
530{
531 struct dma_debug_entry *entry, *next_entry;
532 int i;
533
534 for (i = 0; i < num_entries; ++i) {
535 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
536 if (!entry)
537 goto out_err;
538
539 list_add_tail(&entry->list, &free_entries);
540 }
541
542 num_free_entries = num_entries;
543 min_free_entries = num_entries;
544
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200545 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
Joerg Roedel6bf07872009-01-09 12:54:42 +0100546
547 return 0;
548
549out_err:
550
551 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
552 list_del(&entry->list);
553 kfree(entry);
554 }
555
556 return -ENOMEM;
557}
558
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200559static ssize_t filter_read(struct file *file, char __user *user_buf,
560 size_t count, loff_t *ppos)
561{
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200562 char buf[NAME_MAX_LEN + 1];
Joerg Roedelc17e2cf2009-06-08 15:19:29 +0200563 unsigned long flags;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200564 int len;
565
566 if (!current_driver_name[0])
567 return 0;
568
569 /*
570 * We can't copy to userspace directly because current_driver_name can
571 * only be read under the driver_name_lock with irqs disabled. So
572 * create a temporary copy first.
573 */
574 read_lock_irqsave(&driver_name_lock, flags);
575 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
576 read_unlock_irqrestore(&driver_name_lock, flags);
577
578 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
579}
580
581static ssize_t filter_write(struct file *file, const char __user *userbuf,
582 size_t count, loff_t *ppos)
583{
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200584 char buf[NAME_MAX_LEN];
Joerg Roedelc17e2cf2009-06-08 15:19:29 +0200585 unsigned long flags;
586 size_t len;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200587 int i;
588
589 /*
590 * We can't copy from userspace directly. Access to
591 * current_driver_name is protected with a write_lock with irqs
592 * disabled. Since copy_from_user can fault and may sleep we
593 * need to copy to temporary buffer first
594 */
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200595 len = min(count, (size_t)(NAME_MAX_LEN - 1));
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200596 if (copy_from_user(buf, userbuf, len))
597 return -EFAULT;
598
599 buf[len] = 0;
600
601 write_lock_irqsave(&driver_name_lock, flags);
602
Joerg Roedel31232502009-06-08 15:07:08 +0200603 /*
604 * Now handle the string we got from userspace very carefully.
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200605 * The rules are:
606 * - only use the first token we got
607 * - token delimiter is everything looking like a space
608 * character (' ', '\n', '\t' ...)
609 *
610 */
611 if (!isalnum(buf[0])) {
612 /*
Joerg Roedel31232502009-06-08 15:07:08 +0200613 * If the first character userspace gave us is not
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200614 * alphanumerical then assume the filter should be
615 * switched off.
616 */
617 if (current_driver_name[0])
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200618 pr_info("DMA-API: switching off dma-debug driver filter\n");
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200619 current_driver_name[0] = 0;
620 current_driver = NULL;
621 goto out_unlock;
622 }
623
624 /*
625 * Now parse out the first token and use it as the name for the
626 * driver to filter for.
627 */
Dan Carpenter39a37ce2010-04-06 19:45:12 +0300628 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200629 current_driver_name[i] = buf[i];
630 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
631 break;
632 }
633 current_driver_name[i] = 0;
634 current_driver = NULL;
635
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200636 pr_info("DMA-API: enable driver filter for driver [%s]\n",
637 current_driver_name);
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200638
639out_unlock:
640 write_unlock_irqrestore(&driver_name_lock, flags);
641
642 return count;
643}
644
Thiago Farinaaeb583d2010-01-18 18:57:33 -0500645static const struct file_operations filter_fops = {
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200646 .read = filter_read,
647 .write = filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200648 .llseek = default_llseek,
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200649};
650
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100651static int dma_debug_fs_init(void)
652{
653 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
654 if (!dma_debug_dent) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200655 pr_err("DMA-API: can not create debugfs directory\n");
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100656 return -ENOMEM;
657 }
658
659 global_disable_dent = debugfs_create_bool("disabled", 0444,
660 dma_debug_dent,
661 (u32 *)&global_disable);
662 if (!global_disable_dent)
663 goto out_err;
664
665 error_count_dent = debugfs_create_u32("error_count", 0444,
666 dma_debug_dent, &error_count);
667 if (!error_count_dent)
668 goto out_err;
669
670 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
671 dma_debug_dent,
672 &show_all_errors);
673 if (!show_all_errors_dent)
674 goto out_err;
675
676 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
677 dma_debug_dent,
678 &show_num_errors);
679 if (!show_num_errors_dent)
680 goto out_err;
681
682 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
683 dma_debug_dent,
684 &num_free_entries);
685 if (!num_free_entries_dent)
686 goto out_err;
687
688 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
689 dma_debug_dent,
690 &min_free_entries);
691 if (!min_free_entries_dent)
692 goto out_err;
693
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200694 filter_dent = debugfs_create_file("driver_filter", 0644,
695 dma_debug_dent, NULL, &filter_fops);
696 if (!filter_dent)
697 goto out_err;
698
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100699 return 0;
700
701out_err:
702 debugfs_remove_recursive(dma_debug_dent);
703
704 return -ENOMEM;
705}
706
Stanislaw Gruszkaba4b87a2011-03-31 08:08:09 -0400707static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
Joerg Roedeled888ae2009-05-22 17:16:04 +0200708{
709 struct dma_debug_entry *entry;
710 unsigned long flags;
711 int count = 0, i;
712
Joerg Roedelbe81c6e2009-06-08 15:46:19 +0200713 local_irq_save(flags);
714
Joerg Roedeled888ae2009-05-22 17:16:04 +0200715 for (i = 0; i < HASH_SIZE; ++i) {
Joerg Roedelbe81c6e2009-06-08 15:46:19 +0200716 spin_lock(&dma_entry_hash[i].lock);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200717 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
Stanislaw Gruszkaba4b87a2011-03-31 08:08:09 -0400718 if (entry->dev == dev) {
Joerg Roedeled888ae2009-05-22 17:16:04 +0200719 count += 1;
Stanislaw Gruszkaba4b87a2011-03-31 08:08:09 -0400720 *out_entry = entry;
721 }
Joerg Roedeled888ae2009-05-22 17:16:04 +0200722 }
Joerg Roedelbe81c6e2009-06-08 15:46:19 +0200723 spin_unlock(&dma_entry_hash[i].lock);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200724 }
725
Joerg Roedelbe81c6e2009-06-08 15:46:19 +0200726 local_irq_restore(flags);
727
Joerg Roedeled888ae2009-05-22 17:16:04 +0200728 return count;
729}
730
Ingo Molnara8fe9ea2009-12-31 15:16:23 +0100731static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
Joerg Roedeled888ae2009-05-22 17:16:04 +0200732{
733 struct device *dev = data;
Stanislaw Gruszkaba4b87a2011-03-31 08:08:09 -0400734 struct dma_debug_entry *uninitialized_var(entry);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200735 int count;
736
Shaun Ruffellf797d982009-12-17 18:00:36 -0600737 if (global_disable)
Ingo Molnara8fe9ea2009-12-31 15:16:23 +0100738 return 0;
Joerg Roedeled888ae2009-05-22 17:16:04 +0200739
740 switch (action) {
741 case BUS_NOTIFY_UNBOUND_DRIVER:
Stanislaw Gruszkaba4b87a2011-03-31 08:08:09 -0400742 count = device_dma_allocations(dev, &entry);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200743 if (count == 0)
744 break;
Stanislaw Gruszkaba4b87a2011-03-31 08:08:09 -0400745 err_printk(dev, entry, "DMA-API: device driver has pending "
Joerg Roedeled888ae2009-05-22 17:16:04 +0200746 "DMA allocations while released from device "
Stanislaw Gruszkaba4b87a2011-03-31 08:08:09 -0400747 "[count=%d]\n"
748 "One of leaked entries details: "
749 "[device address=0x%016llx] [size=%llu bytes] "
750 "[mapped with %s] [mapped as %s]\n",
751 count, entry->dev_addr, entry->size,
752 dir2name[entry->direction], type2name[entry->type]);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200753 break;
754 default:
755 break;
756 }
757
758 return 0;
759}
760
Joerg Roedel41531c82009-03-16 17:32:14 +0100761void dma_debug_add_bus(struct bus_type *bus)
762{
Joerg Roedeled888ae2009-05-22 17:16:04 +0200763 struct notifier_block *nb;
764
Shaun Ruffellf797d982009-12-17 18:00:36 -0600765 if (global_disable)
766 return;
767
Joerg Roedeled888ae2009-05-22 17:16:04 +0200768 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
769 if (nb == NULL) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200770 pr_err("dma_debug_add_bus: out of memory\n");
Joerg Roedeled888ae2009-05-22 17:16:04 +0200771 return;
772 }
773
774 nb->notifier_call = dma_debug_device_change;
775
776 bus_register_notifier(bus, nb);
Joerg Roedel41531c82009-03-16 17:32:14 +0100777}
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100778
Joerg Roedel6bf07872009-01-09 12:54:42 +0100779/*
780 * Let the architectures decide how many entries should be preallocated.
781 */
782void dma_debug_init(u32 num_entries)
783{
784 int i;
785
786 if (global_disable)
787 return;
788
789 for (i = 0; i < HASH_SIZE; ++i) {
790 INIT_LIST_HEAD(&dma_entry_hash[i].list);
Ingo Molnarb0a5b832009-06-16 16:11:14 +0200791 spin_lock_init(&dma_entry_hash[i].lock);
Joerg Roedel6bf07872009-01-09 12:54:42 +0100792 }
793
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100794 if (dma_debug_fs_init() != 0) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200795 pr_err("DMA-API: error creating debugfs entries - disabling\n");
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100796 global_disable = true;
797
798 return;
799 }
800
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100801 if (req_entries)
802 num_entries = req_entries;
803
Joerg Roedel6bf07872009-01-09 12:54:42 +0100804 if (prealloc_memory(num_entries) != 0) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200805 pr_err("DMA-API: debugging out of memory error - disabled\n");
Joerg Roedel6bf07872009-01-09 12:54:42 +0100806 global_disable = true;
807
808 return;
809 }
810
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900811 nr_total_entries = num_free_entries;
812
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200813 pr_info("DMA-API: debugging enabled by kernel config\n");
Joerg Roedel6bf07872009-01-09 12:54:42 +0100814}
815
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100816static __init int dma_debug_cmdline(char *str)
817{
818 if (!str)
819 return -EINVAL;
820
821 if (strncmp(str, "off", 3) == 0) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200822 pr_info("DMA-API: debugging disabled on kernel command line\n");
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100823 global_disable = true;
824 }
825
826 return 0;
827}
828
829static __init int dma_debug_entries_cmdline(char *str)
830{
831 int res;
832
833 if (!str)
834 return -EINVAL;
835
836 res = get_option(&str, &req_entries);
837
838 if (!res)
839 req_entries = 0;
840
841 return 0;
842}
843
844__setup("dma_debug=", dma_debug_cmdline);
845__setup("dma_debug_entries=", dma_debug_entries_cmdline);
846
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100847static void check_unmap(struct dma_debug_entry *ref)
848{
849 struct dma_debug_entry *entry;
850 struct hash_bucket *bucket;
851 unsigned long flags;
852
FUJITA Tomonori35d40952009-03-19 10:39:31 +0900853 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
854 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
855 "to free an invalid DMA memory address\n");
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100856 return;
FUJITA Tomonori35d40952009-03-19 10:39:31 +0900857 }
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100858
859 bucket = get_hash_bucket(ref, &flags);
Neil Hormanc6a21d02011-08-08 15:13:54 -0400860 entry = bucket_find_exact(bucket, ref);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100861
862 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100863 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100864 "to free DMA memory it has not allocated "
865 "[device address=0x%016llx] [size=%llu bytes]\n",
866 ref->dev_addr, ref->size);
867 goto out;
868 }
869
870 if (ref->size != entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100871 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100872 "DMA memory with different size "
873 "[device address=0x%016llx] [map size=%llu bytes] "
874 "[unmap size=%llu bytes]\n",
875 ref->dev_addr, entry->size, ref->size);
876 }
877
878 if (ref->type != entry->type) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100879 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100880 "DMA memory with wrong function "
881 "[device address=0x%016llx] [size=%llu bytes] "
882 "[mapped as %s] [unmapped as %s]\n",
883 ref->dev_addr, ref->size,
884 type2name[entry->type], type2name[ref->type]);
885 } else if ((entry->type == dma_debug_coherent) &&
886 (ref->paddr != entry->paddr)) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100887 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100888 "DMA memory with different CPU address "
889 "[device address=0x%016llx] [size=%llu bytes] "
Joerg Roedel59a40e702009-10-29 16:25:50 +0100890 "[cpu alloc address=0x%016llx] "
891 "[cpu free address=0x%016llx]",
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100892 ref->dev_addr, ref->size,
Joerg Roedel59a40e702009-10-29 16:25:50 +0100893 (unsigned long long)entry->paddr,
894 (unsigned long long)ref->paddr);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100895 }
896
897 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
898 ref->sg_call_ents != entry->sg_call_ents) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100899 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100900 "DMA sg list with different entry count "
901 "[map count=%d] [unmap count=%d]\n",
902 entry->sg_call_ents, ref->sg_call_ents);
903 }
904
905 /*
906 * This may be no bug in reality - but most implementations of the
907 * DMA API don't handle this properly, so check for it here
908 */
909 if (ref->direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100910 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100911 "DMA memory with different direction "
912 "[device address=0x%016llx] [size=%llu bytes] "
913 "[mapped with %s] [unmapped with %s]\n",
914 ref->dev_addr, ref->size,
915 dir2name[entry->direction],
916 dir2name[ref->direction]);
917 }
918
919 hash_bucket_del(entry);
920 dma_entry_free(entry);
921
922out:
923 put_hash_bucket(bucket, &flags);
924}
925
926static void check_for_stack(struct device *dev, void *addr)
927{
928 if (object_is_on_stack(addr))
David Woodhouse6c132d12009-01-19 16:52:39 +0100929 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
930 "stack [addr=%p]\n", addr);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100931}
932
Ingo Molnarf39d1b92009-07-10 21:38:02 +0200933static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100934{
Ingo Molnarf39d1b92009-07-10 21:38:02 +0200935 unsigned long a1 = (unsigned long)addr;
936 unsigned long b1 = a1 + len;
937 unsigned long a2 = (unsigned long)start;
938 unsigned long b2 = (unsigned long)end;
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100939
Ingo Molnarf39d1b92009-07-10 21:38:02 +0200940 return !(b1 <= a2 || a1 >= b2);
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100941}
942
Ingo Molnarf39d1b92009-07-10 21:38:02 +0200943static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100944{
Ingo Molnarf39d1b92009-07-10 21:38:02 +0200945 if (overlap(addr, len, _text, _etext) ||
946 overlap(addr, len, __start_rodata, __end_rodata))
947 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100948}
949
Joerg Roedelaa010ef2009-06-12 15:25:06 +0200950static void check_sync(struct device *dev,
951 struct dma_debug_entry *ref,
952 bool to_cpu)
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100953{
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100954 struct dma_debug_entry *entry;
955 struct hash_bucket *bucket;
956 unsigned long flags;
957
Joerg Roedelaa010ef2009-06-12 15:25:06 +0200958 bucket = get_hash_bucket(ref, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100959
Neil Hormanc6a21d02011-08-08 15:13:54 -0400960 entry = bucket_find_contain(&bucket, ref, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100961
962 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100963 err_printk(dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100964 "to sync DMA memory it has not allocated "
965 "[device address=0x%016llx] [size=%llu bytes]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +0200966 (unsigned long long)ref->dev_addr, ref->size);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100967 goto out;
968 }
969
Joerg Roedelaa010ef2009-06-12 15:25:06 +0200970 if (ref->size > entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100971 err_printk(dev, entry, "DMA-API: device driver syncs"
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100972 " DMA memory outside allocated range "
973 "[device address=0x%016llx] "
Joerg Roedelaa010ef2009-06-12 15:25:06 +0200974 "[allocation size=%llu bytes] "
975 "[sync offset+size=%llu]\n",
976 entry->dev_addr, entry->size,
977 ref->size);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100978 }
979
Krzysztof Halasa42d53b42010-01-08 14:42:36 -0800980 if (entry->direction == DMA_BIDIRECTIONAL)
981 goto out;
982
Joerg Roedelaa010ef2009-06-12 15:25:06 +0200983 if (ref->direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100984 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100985 "DMA memory with different direction "
986 "[device address=0x%016llx] [size=%llu bytes] "
987 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +0200988 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100989 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +0200990 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100991 }
992
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100993 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
Joerg Roedelaa010ef2009-06-12 15:25:06 +0200994 !(ref->direction == DMA_TO_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +0100995 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100996 "device read-only DMA memory for cpu "
997 "[device address=0x%016llx] [size=%llu bytes] "
998 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +0200999 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001000 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001001 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001002
1003 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001004 !(ref->direction == DMA_FROM_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +01001005 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001006 "device write-only DMA memory to device "
1007 "[device address=0x%016llx] [size=%llu bytes] "
1008 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001009 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001010 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001011 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001012
1013out:
1014 put_hash_bucket(bucket, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001015}
1016
Joerg Roedelf62bc982009-01-09 14:14:49 +01001017void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1018 size_t size, int direction, dma_addr_t dma_addr,
1019 bool map_single)
1020{
1021 struct dma_debug_entry *entry;
1022
1023 if (unlikely(global_disable))
1024 return;
1025
1026 if (unlikely(dma_mapping_error(dev, dma_addr)))
1027 return;
1028
1029 entry = dma_entry_alloc();
1030 if (!entry)
1031 return;
1032
1033 entry->dev = dev;
1034 entry->type = dma_debug_page;
1035 entry->paddr = page_to_phys(page) + offset;
1036 entry->dev_addr = dma_addr;
1037 entry->size = size;
1038 entry->direction = direction;
1039
Joerg Roedel9537a482009-03-23 15:35:08 +01001040 if (map_single)
Joerg Roedelf62bc982009-01-09 14:14:49 +01001041 entry->type = dma_debug_single;
Joerg Roedel9537a482009-03-23 15:35:08 +01001042
1043 if (!PageHighMem(page)) {
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001044 void *addr = page_address(page) + offset;
1045
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001046 check_for_stack(dev, addr);
1047 check_for_illegal_area(dev, addr, size);
Joerg Roedelf62bc982009-01-09 14:14:49 +01001048 }
1049
1050 add_dma_entry(entry);
1051}
1052EXPORT_SYMBOL(debug_dma_map_page);
1053
1054void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1055 size_t size, int direction, bool map_single)
1056{
1057 struct dma_debug_entry ref = {
1058 .type = dma_debug_page,
1059 .dev = dev,
1060 .dev_addr = addr,
1061 .size = size,
1062 .direction = direction,
1063 };
1064
1065 if (unlikely(global_disable))
1066 return;
1067
1068 if (map_single)
1069 ref.type = dma_debug_single;
1070
1071 check_unmap(&ref);
1072}
1073EXPORT_SYMBOL(debug_dma_unmap_page);
1074
Joerg Roedel972aa452009-01-09 14:19:54 +01001075void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1076 int nents, int mapped_ents, int direction)
1077{
1078 struct dma_debug_entry *entry;
1079 struct scatterlist *s;
1080 int i;
1081
1082 if (unlikely(global_disable))
1083 return;
1084
1085 for_each_sg(sg, s, mapped_ents, i) {
1086 entry = dma_entry_alloc();
1087 if (!entry)
1088 return;
1089
1090 entry->type = dma_debug_sg;
1091 entry->dev = dev;
1092 entry->paddr = sg_phys(s);
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001093 entry->size = sg_dma_len(s);
FUJITA Tomonori15aedea2009-05-27 09:43:01 +09001094 entry->dev_addr = sg_dma_address(s);
Joerg Roedel972aa452009-01-09 14:19:54 +01001095 entry->direction = direction;
1096 entry->sg_call_ents = nents;
1097 entry->sg_mapped_ents = mapped_ents;
1098
Joerg Roedel9537a482009-03-23 15:35:08 +01001099 if (!PageHighMem(sg_page(s))) {
1100 check_for_stack(dev, sg_virt(s));
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001101 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
Joerg Roedel9537a482009-03-23 15:35:08 +01001102 }
Joerg Roedel972aa452009-01-09 14:19:54 +01001103
1104 add_dma_entry(entry);
1105 }
1106}
1107EXPORT_SYMBOL(debug_dma_map_sg);
1108
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001109static int get_nr_mapped_entries(struct device *dev,
1110 struct dma_debug_entry *ref)
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001111{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001112 struct dma_debug_entry *entry;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001113 struct hash_bucket *bucket;
1114 unsigned long flags;
Joerg Roedelc17e2cf2009-06-08 15:19:29 +02001115 int mapped_ents;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001116
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001117 bucket = get_hash_bucket(ref, &flags);
Neil Hormanc6a21d02011-08-08 15:13:54 -04001118 entry = bucket_find_exact(bucket, ref);
Joerg Roedelc17e2cf2009-06-08 15:19:29 +02001119 mapped_ents = 0;
1120
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001121 if (entry)
1122 mapped_ents = entry->sg_mapped_ents;
1123 put_hash_bucket(bucket, &flags);
1124
1125 return mapped_ents;
1126}
1127
Joerg Roedel972aa452009-01-09 14:19:54 +01001128void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1129 int nelems, int dir)
1130{
Joerg Roedel972aa452009-01-09 14:19:54 +01001131 struct scatterlist *s;
1132 int mapped_ents = 0, i;
Joerg Roedel972aa452009-01-09 14:19:54 +01001133
1134 if (unlikely(global_disable))
1135 return;
1136
1137 for_each_sg(sglist, s, nelems, i) {
1138
1139 struct dma_debug_entry ref = {
1140 .type = dma_debug_sg,
1141 .dev = dev,
1142 .paddr = sg_phys(s),
FUJITA Tomonori15aedea2009-05-27 09:43:01 +09001143 .dev_addr = sg_dma_address(s),
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001144 .size = sg_dma_len(s),
Joerg Roedel972aa452009-01-09 14:19:54 +01001145 .direction = dir,
Joerg Roedele5e8c5b2009-06-11 10:03:42 +02001146 .sg_call_ents = nelems,
Joerg Roedel972aa452009-01-09 14:19:54 +01001147 };
1148
1149 if (mapped_ents && i >= mapped_ents)
1150 break;
1151
Joerg Roedele5e8c5b2009-06-11 10:03:42 +02001152 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001153 mapped_ents = get_nr_mapped_entries(dev, &ref);
Joerg Roedel972aa452009-01-09 14:19:54 +01001154
1155 check_unmap(&ref);
1156 }
1157}
1158EXPORT_SYMBOL(debug_dma_unmap_sg);
1159
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001160void debug_dma_alloc_coherent(struct device *dev, size_t size,
1161 dma_addr_t dma_addr, void *virt)
1162{
1163 struct dma_debug_entry *entry;
1164
1165 if (unlikely(global_disable))
1166 return;
1167
1168 if (unlikely(virt == NULL))
1169 return;
1170
1171 entry = dma_entry_alloc();
1172 if (!entry)
1173 return;
1174
1175 entry->type = dma_debug_coherent;
1176 entry->dev = dev;
1177 entry->paddr = virt_to_phys(virt);
1178 entry->size = size;
1179 entry->dev_addr = dma_addr;
1180 entry->direction = DMA_BIDIRECTIONAL;
1181
1182 add_dma_entry(entry);
1183}
1184EXPORT_SYMBOL(debug_dma_alloc_coherent);
1185
1186void debug_dma_free_coherent(struct device *dev, size_t size,
1187 void *virt, dma_addr_t addr)
1188{
1189 struct dma_debug_entry ref = {
1190 .type = dma_debug_coherent,
1191 .dev = dev,
1192 .paddr = virt_to_phys(virt),
1193 .dev_addr = addr,
1194 .size = size,
1195 .direction = DMA_BIDIRECTIONAL,
1196 };
1197
1198 if (unlikely(global_disable))
1199 return;
1200
1201 check_unmap(&ref);
1202}
1203EXPORT_SYMBOL(debug_dma_free_coherent);
1204
Joerg Roedelb9d23172009-01-09 14:43:04 +01001205void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1206 size_t size, int direction)
1207{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001208 struct dma_debug_entry ref;
1209
Joerg Roedelb9d23172009-01-09 14:43:04 +01001210 if (unlikely(global_disable))
1211 return;
1212
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001213 ref.type = dma_debug_single;
1214 ref.dev = dev;
1215 ref.dev_addr = dma_handle;
1216 ref.size = size;
1217 ref.direction = direction;
1218 ref.sg_call_ents = 0;
1219
1220 check_sync(dev, &ref, true);
Joerg Roedelb9d23172009-01-09 14:43:04 +01001221}
1222EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1223
1224void debug_dma_sync_single_for_device(struct device *dev,
1225 dma_addr_t dma_handle, size_t size,
1226 int direction)
1227{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001228 struct dma_debug_entry ref;
1229
Joerg Roedelb9d23172009-01-09 14:43:04 +01001230 if (unlikely(global_disable))
1231 return;
1232
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001233 ref.type = dma_debug_single;
1234 ref.dev = dev;
1235 ref.dev_addr = dma_handle;
1236 ref.size = size;
1237 ref.direction = direction;
1238 ref.sg_call_ents = 0;
1239
1240 check_sync(dev, &ref, false);
Joerg Roedelb9d23172009-01-09 14:43:04 +01001241}
1242EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1243
Joerg Roedel948408b2009-01-09 14:55:38 +01001244void debug_dma_sync_single_range_for_cpu(struct device *dev,
1245 dma_addr_t dma_handle,
1246 unsigned long offset, size_t size,
1247 int direction)
1248{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001249 struct dma_debug_entry ref;
1250
Joerg Roedel948408b2009-01-09 14:55:38 +01001251 if (unlikely(global_disable))
1252 return;
1253
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001254 ref.type = dma_debug_single;
1255 ref.dev = dev;
1256 ref.dev_addr = dma_handle;
1257 ref.size = offset + size;
1258 ref.direction = direction;
1259 ref.sg_call_ents = 0;
1260
1261 check_sync(dev, &ref, true);
Joerg Roedel948408b2009-01-09 14:55:38 +01001262}
1263EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1264
1265void debug_dma_sync_single_range_for_device(struct device *dev,
1266 dma_addr_t dma_handle,
1267 unsigned long offset,
1268 size_t size, int direction)
1269{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001270 struct dma_debug_entry ref;
1271
Joerg Roedel948408b2009-01-09 14:55:38 +01001272 if (unlikely(global_disable))
1273 return;
1274
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001275 ref.type = dma_debug_single;
1276 ref.dev = dev;
1277 ref.dev_addr = dma_handle;
1278 ref.size = offset + size;
1279 ref.direction = direction;
1280 ref.sg_call_ents = 0;
1281
1282 check_sync(dev, &ref, false);
Joerg Roedel948408b2009-01-09 14:55:38 +01001283}
1284EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1285
Joerg Roedela31fba52009-01-09 15:01:12 +01001286void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1287 int nelems, int direction)
1288{
1289 struct scatterlist *s;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001290 int mapped_ents = 0, i;
Joerg Roedela31fba52009-01-09 15:01:12 +01001291
1292 if (unlikely(global_disable))
1293 return;
1294
1295 for_each_sg(sg, s, nelems, i) {
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001296
1297 struct dma_debug_entry ref = {
1298 .type = dma_debug_sg,
1299 .dev = dev,
1300 .paddr = sg_phys(s),
1301 .dev_addr = sg_dma_address(s),
1302 .size = sg_dma_len(s),
1303 .direction = direction,
1304 .sg_call_ents = nelems,
1305 };
1306
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001307 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001308 mapped_ents = get_nr_mapped_entries(dev, &ref);
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001309
1310 if (i >= mapped_ents)
1311 break;
1312
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001313 check_sync(dev, &ref, true);
Joerg Roedela31fba52009-01-09 15:01:12 +01001314 }
1315}
1316EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1317
1318void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1319 int nelems, int direction)
1320{
1321 struct scatterlist *s;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001322 int mapped_ents = 0, i;
Joerg Roedela31fba52009-01-09 15:01:12 +01001323
1324 if (unlikely(global_disable))
1325 return;
1326
1327 for_each_sg(sg, s, nelems, i) {
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001328
1329 struct dma_debug_entry ref = {
1330 .type = dma_debug_sg,
1331 .dev = dev,
1332 .paddr = sg_phys(s),
1333 .dev_addr = sg_dma_address(s),
1334 .size = sg_dma_len(s),
1335 .direction = direction,
1336 .sg_call_ents = nelems,
1337 };
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001338 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001339 mapped_ents = get_nr_mapped_entries(dev, &ref);
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001340
1341 if (i >= mapped_ents)
1342 break;
1343
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001344 check_sync(dev, &ref, false);
Joerg Roedela31fba52009-01-09 15:01:12 +01001345 }
1346}
1347EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1348
Joerg Roedel1745de52009-05-22 21:49:51 +02001349static int __init dma_debug_driver_setup(char *str)
1350{
1351 int i;
1352
1353 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1354 current_driver_name[i] = *str;
1355 if (*str == 0)
1356 break;
1357 }
1358
1359 if (current_driver_name[0])
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001360 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1361 current_driver_name);
Joerg Roedel1745de52009-05-22 21:49:51 +02001362
1363
1364 return 1;
1365}
1366__setup("dma_debug_driver=", dma_debug_driver_setup);