blob: cdd205d6bf7c6bab0afa0679c26f04acce3955dd [file] [log] [blame]
Joerg Roedelf2f45e52009-01-09 12:19:52 +01001/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Joerg Roedel972aa452009-01-09 14:19:54 +010020#include <linux/scatterlist.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010021#include <linux/dma-mapping.h>
David Woodhouse6c132d12009-01-19 16:52:39 +010022#include <linux/stacktrace.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010023#include <linux/dma-debug.h>
Joerg Roedel30dfa902009-01-09 12:34:49 +010024#include <linux/spinlock.h>
Joerg Roedel788dcfa2009-01-09 13:13:27 +010025#include <linux/debugfs.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010026#include <linux/device.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010027#include <linux/types.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010028#include <linux/sched.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010029#include <linux/list.h>
Joerg Roedel6bf07872009-01-09 12:54:42 +010030#include <linux/slab.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010031
Joerg Roedel2e34bde2009-03-16 16:51:55 +010032#include <asm/sections.h>
33
Joerg Roedel30dfa902009-01-09 12:34:49 +010034#define HASH_SIZE 1024ULL
35#define HASH_FN_SHIFT 13
36#define HASH_FN_MASK (HASH_SIZE - 1)
37
Joerg Roedelf2f45e52009-01-09 12:19:52 +010038enum {
39 dma_debug_single,
40 dma_debug_page,
41 dma_debug_sg,
42 dma_debug_coherent,
43};
44
David Woodhouse6c132d12009-01-19 16:52:39 +010045#define DMA_DEBUG_STACKTRACE_ENTRIES 5
46
Joerg Roedelf2f45e52009-01-09 12:19:52 +010047struct dma_debug_entry {
48 struct list_head list;
49 struct device *dev;
50 int type;
51 phys_addr_t paddr;
52 u64 dev_addr;
53 u64 size;
54 int direction;
55 int sg_call_ents;
56 int sg_mapped_ents;
David Woodhouse6c132d12009-01-19 16:52:39 +010057#ifdef CONFIG_STACKTRACE
58 struct stack_trace stacktrace;
59 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
60#endif
Joerg Roedelf2f45e52009-01-09 12:19:52 +010061};
62
Joerg Roedel30dfa902009-01-09 12:34:49 +010063struct hash_bucket {
64 struct list_head list;
65 spinlock_t lock;
Joerg Roedel2d62ece2009-01-09 14:10:26 +010066} ____cacheline_aligned_in_smp;
Joerg Roedel30dfa902009-01-09 12:34:49 +010067
68/* Hash list to save the allocated dma addresses */
69static struct hash_bucket dma_entry_hash[HASH_SIZE];
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010070/* List of pre-allocated dma_debug_entry's */
71static LIST_HEAD(free_entries);
72/* Lock for the list above */
73static DEFINE_SPINLOCK(free_entries_lock);
74
75/* Global disable flag - will be set in case of an error */
76static bool global_disable __read_mostly;
77
Joerg Roedel788dcfa2009-01-09 13:13:27 +010078/* Global error count */
79static u32 error_count;
80
81/* Global error show enable*/
82static u32 show_all_errors __read_mostly;
83/* Number of errors to show */
84static u32 show_num_errors = 1;
85
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010086static u32 num_free_entries;
87static u32 min_free_entries;
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +090088static u32 nr_total_entries;
Joerg Roedel30dfa902009-01-09 12:34:49 +010089
Joerg Roedel59d3daa2009-01-09 13:01:56 +010090/* number of preallocated entries requested by kernel cmdline */
91static u32 req_entries;
92
Joerg Roedel788dcfa2009-01-09 13:13:27 +010093/* debugfs dentry's for the stuff above */
94static struct dentry *dma_debug_dent __read_mostly;
95static struct dentry *global_disable_dent __read_mostly;
96static struct dentry *error_count_dent __read_mostly;
97static struct dentry *show_all_errors_dent __read_mostly;
98static struct dentry *show_num_errors_dent __read_mostly;
99static struct dentry *num_free_entries_dent __read_mostly;
100static struct dentry *min_free_entries_dent __read_mostly;
101
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100102static const char *type2name[4] = { "single", "page",
103 "scather-gather", "coherent" };
104
105static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
106 "DMA_FROM_DEVICE", "DMA_NONE" };
107
108/*
109 * The access to some variables in this macro is racy. We can't use atomic_t
110 * here because all these variables are exported to debugfs. Some of them even
111 * writeable. This is also the reason why a lock won't help much. But anyway,
112 * the races are no big deal. Here is why:
113 *
114 * error_count: the addition is racy, but the worst thing that can happen is
115 * that we don't count some errors
116 * show_num_errors: the subtraction is racy. Also no big deal because in
117 * worst case this will result in one warning more in the
118 * system log than the user configured. This variable is
119 * writeable via debugfs.
120 */
David Woodhouse6c132d12009-01-19 16:52:39 +0100121static inline void dump_entry_trace(struct dma_debug_entry *entry)
122{
123#ifdef CONFIG_STACKTRACE
124 if (entry) {
125 printk(KERN_WARNING "Mapped at:\n");
126 print_stack_trace(&entry->stacktrace, 0);
127 }
128#endif
129}
130
131#define err_printk(dev, entry, format, arg...) do { \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100132 error_count += 1; \
133 if (show_all_errors || show_num_errors > 0) { \
134 WARN(1, "%s %s: " format, \
135 dev_driver_string(dev), \
136 dev_name(dev) , ## arg); \
David Woodhouse6c132d12009-01-19 16:52:39 +0100137 dump_entry_trace(entry); \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100138 } \
139 if (!show_all_errors && show_num_errors > 0) \
140 show_num_errors -= 1; \
141 } while (0);
142
Joerg Roedel30dfa902009-01-09 12:34:49 +0100143/*
144 * Hash related functions
145 *
146 * Every DMA-API request is saved into a struct dma_debug_entry. To
147 * have quick access to these structs they are stored into a hash.
148 */
149static int hash_fn(struct dma_debug_entry *entry)
150{
151 /*
152 * Hash function is based on the dma address.
153 * We use bits 20-27 here as the index into the hash
154 */
155 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
156}
157
158/*
159 * Request exclusive access to a hash bucket for a given dma_debug_entry.
160 */
161static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
162 unsigned long *flags)
163{
164 int idx = hash_fn(entry);
165 unsigned long __flags;
166
167 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
168 *flags = __flags;
169 return &dma_entry_hash[idx];
170}
171
172/*
173 * Give up exclusive access to the hash bucket
174 */
175static void put_hash_bucket(struct hash_bucket *bucket,
176 unsigned long *flags)
177{
178 unsigned long __flags = *flags;
179
180 spin_unlock_irqrestore(&bucket->lock, __flags);
181}
182
183/*
184 * Search a given entry in the hash bucket list
185 */
186static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
187 struct dma_debug_entry *ref)
188{
189 struct dma_debug_entry *entry;
190
191 list_for_each_entry(entry, &bucket->list, list) {
192 if ((entry->dev_addr == ref->dev_addr) &&
193 (entry->dev == ref->dev))
194 return entry;
195 }
196
197 return NULL;
198}
199
200/*
201 * Add an entry to a hash bucket
202 */
203static void hash_bucket_add(struct hash_bucket *bucket,
204 struct dma_debug_entry *entry)
205{
206 list_add_tail(&entry->list, &bucket->list);
207}
208
209/*
210 * Remove entry from a hash bucket list
211 */
212static void hash_bucket_del(struct dma_debug_entry *entry)
213{
214 list_del(&entry->list);
215}
216
217/*
David Woodhouseac26c182009-02-12 16:19:13 +0100218 * Dump mapping entries for debugging purposes
219 */
220void debug_dma_dump_mappings(struct device *dev)
221{
222 int idx;
223
224 for (idx = 0; idx < HASH_SIZE; idx++) {
225 struct hash_bucket *bucket = &dma_entry_hash[idx];
226 struct dma_debug_entry *entry;
227 unsigned long flags;
228
229 spin_lock_irqsave(&bucket->lock, flags);
230
231 list_for_each_entry(entry, &bucket->list, list) {
232 if (!dev || dev == entry->dev) {
233 dev_info(entry->dev,
234 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
235 type2name[entry->type], idx,
236 (unsigned long long)entry->paddr,
237 entry->dev_addr, entry->size,
238 dir2name[entry->direction]);
239 }
240 }
241
242 spin_unlock_irqrestore(&bucket->lock, flags);
243 }
244}
245EXPORT_SYMBOL(debug_dma_dump_mappings);
246
247/*
Joerg Roedel30dfa902009-01-09 12:34:49 +0100248 * Wrapper function for adding an entry to the hash.
249 * This function takes care of locking itself.
250 */
251static void add_dma_entry(struct dma_debug_entry *entry)
252{
253 struct hash_bucket *bucket;
254 unsigned long flags;
255
256 bucket = get_hash_bucket(entry, &flags);
257 hash_bucket_add(bucket, entry);
258 put_hash_bucket(bucket, &flags);
259}
260
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900261static struct dma_debug_entry *__dma_entry_alloc(void)
262{
263 struct dma_debug_entry *entry;
264
265 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
266 list_del(&entry->list);
267 memset(entry, 0, sizeof(*entry));
268
269 num_free_entries -= 1;
270 if (num_free_entries < min_free_entries)
271 min_free_entries = num_free_entries;
272
273 return entry;
274}
275
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100276/* struct dma_entry allocator
277 *
278 * The next two functions implement the allocator for
279 * struct dma_debug_entries.
280 */
281static struct dma_debug_entry *dma_entry_alloc(void)
282{
283 struct dma_debug_entry *entry = NULL;
284 unsigned long flags;
285
286 spin_lock_irqsave(&free_entries_lock, flags);
287
288 if (list_empty(&free_entries)) {
289 printk(KERN_ERR "DMA-API: debugging out of memory "
290 "- disabling\n");
291 global_disable = true;
292 goto out;
293 }
294
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900295 entry = __dma_entry_alloc();
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100296
David Woodhouse6c132d12009-01-19 16:52:39 +0100297#ifdef CONFIG_STACKTRACE
298 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
299 entry->stacktrace.entries = entry->st_entries;
300 entry->stacktrace.skip = 2;
301 save_stack_trace(&entry->stacktrace);
302#endif
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100303
304out:
305 spin_unlock_irqrestore(&free_entries_lock, flags);
306
307 return entry;
308}
309
310static void dma_entry_free(struct dma_debug_entry *entry)
311{
312 unsigned long flags;
313
314 /*
315 * add to beginning of the list - this way the entries are
316 * more likely cache hot when they are reallocated.
317 */
318 spin_lock_irqsave(&free_entries_lock, flags);
319 list_add(&entry->list, &free_entries);
320 num_free_entries += 1;
321 spin_unlock_irqrestore(&free_entries_lock, flags);
322}
323
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900324int dma_debug_resize_entries(u32 num_entries)
325{
326 int i, delta, ret = 0;
327 unsigned long flags;
328 struct dma_debug_entry *entry;
329 LIST_HEAD(tmp);
330
331 spin_lock_irqsave(&free_entries_lock, flags);
332
333 if (nr_total_entries < num_entries) {
334 delta = num_entries - nr_total_entries;
335
336 spin_unlock_irqrestore(&free_entries_lock, flags);
337
338 for (i = 0; i < delta; i++) {
339 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
340 if (!entry)
341 break;
342
343 list_add_tail(&entry->list, &tmp);
344 }
345
346 spin_lock_irqsave(&free_entries_lock, flags);
347
348 list_splice(&tmp, &free_entries);
349 nr_total_entries += i;
350 num_free_entries += i;
351 } else {
352 delta = nr_total_entries - num_entries;
353
354 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
355 entry = __dma_entry_alloc();
356 kfree(entry);
357 }
358
359 nr_total_entries -= i;
360 }
361
362 if (nr_total_entries != num_entries)
363 ret = 1;
364
365 spin_unlock_irqrestore(&free_entries_lock, flags);
366
367 return ret;
368}
369EXPORT_SYMBOL(dma_debug_resize_entries);
370
Joerg Roedel6bf07872009-01-09 12:54:42 +0100371/*
372 * DMA-API debugging init code
373 *
374 * The init code does two things:
375 * 1. Initialize core data structures
376 * 2. Preallocate a given number of dma_debug_entry structs
377 */
378
379static int prealloc_memory(u32 num_entries)
380{
381 struct dma_debug_entry *entry, *next_entry;
382 int i;
383
384 for (i = 0; i < num_entries; ++i) {
385 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
386 if (!entry)
387 goto out_err;
388
389 list_add_tail(&entry->list, &free_entries);
390 }
391
392 num_free_entries = num_entries;
393 min_free_entries = num_entries;
394
395 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
396 num_entries);
397
398 return 0;
399
400out_err:
401
402 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
403 list_del(&entry->list);
404 kfree(entry);
405 }
406
407 return -ENOMEM;
408}
409
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100410static int dma_debug_fs_init(void)
411{
412 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
413 if (!dma_debug_dent) {
414 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
415 return -ENOMEM;
416 }
417
418 global_disable_dent = debugfs_create_bool("disabled", 0444,
419 dma_debug_dent,
420 (u32 *)&global_disable);
421 if (!global_disable_dent)
422 goto out_err;
423
424 error_count_dent = debugfs_create_u32("error_count", 0444,
425 dma_debug_dent, &error_count);
426 if (!error_count_dent)
427 goto out_err;
428
429 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
430 dma_debug_dent,
431 &show_all_errors);
432 if (!show_all_errors_dent)
433 goto out_err;
434
435 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
436 dma_debug_dent,
437 &show_num_errors);
438 if (!show_num_errors_dent)
439 goto out_err;
440
441 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
442 dma_debug_dent,
443 &num_free_entries);
444 if (!num_free_entries_dent)
445 goto out_err;
446
447 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
448 dma_debug_dent,
449 &min_free_entries);
450 if (!min_free_entries_dent)
451 goto out_err;
452
453 return 0;
454
455out_err:
456 debugfs_remove_recursive(dma_debug_dent);
457
458 return -ENOMEM;
459}
460
Joerg Roedel41531c82009-03-16 17:32:14 +0100461void dma_debug_add_bus(struct bus_type *bus)
462{
Joerg Roedel314eeac2009-04-24 14:35:57 +0200463 /* FIXME: register notifier */
Joerg Roedel41531c82009-03-16 17:32:14 +0100464}
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100465
Joerg Roedel6bf07872009-01-09 12:54:42 +0100466/*
467 * Let the architectures decide how many entries should be preallocated.
468 */
469void dma_debug_init(u32 num_entries)
470{
471 int i;
472
473 if (global_disable)
474 return;
475
476 for (i = 0; i < HASH_SIZE; ++i) {
477 INIT_LIST_HEAD(&dma_entry_hash[i].list);
478 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
479 }
480
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100481 if (dma_debug_fs_init() != 0) {
482 printk(KERN_ERR "DMA-API: error creating debugfs entries "
483 "- disabling\n");
484 global_disable = true;
485
486 return;
487 }
488
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100489 if (req_entries)
490 num_entries = req_entries;
491
Joerg Roedel6bf07872009-01-09 12:54:42 +0100492 if (prealloc_memory(num_entries) != 0) {
493 printk(KERN_ERR "DMA-API: debugging out of memory error "
494 "- disabled\n");
495 global_disable = true;
496
497 return;
498 }
499
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900500 nr_total_entries = num_free_entries;
501
Joerg Roedel6bf07872009-01-09 12:54:42 +0100502 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
503}
504
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100505static __init int dma_debug_cmdline(char *str)
506{
507 if (!str)
508 return -EINVAL;
509
510 if (strncmp(str, "off", 3) == 0) {
511 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
512 "command line\n");
513 global_disable = true;
514 }
515
516 return 0;
517}
518
519static __init int dma_debug_entries_cmdline(char *str)
520{
521 int res;
522
523 if (!str)
524 return -EINVAL;
525
526 res = get_option(&str, &req_entries);
527
528 if (!res)
529 req_entries = 0;
530
531 return 0;
532}
533
534__setup("dma_debug=", dma_debug_cmdline);
535__setup("dma_debug_entries=", dma_debug_entries_cmdline);
536
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100537static void check_unmap(struct dma_debug_entry *ref)
538{
539 struct dma_debug_entry *entry;
540 struct hash_bucket *bucket;
541 unsigned long flags;
542
FUJITA Tomonori35d40952009-03-19 10:39:31 +0900543 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
544 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
545 "to free an invalid DMA memory address\n");
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100546 return;
FUJITA Tomonori35d40952009-03-19 10:39:31 +0900547 }
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100548
549 bucket = get_hash_bucket(ref, &flags);
550 entry = hash_bucket_find(bucket, ref);
551
552 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100553 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100554 "to free DMA memory it has not allocated "
555 "[device address=0x%016llx] [size=%llu bytes]\n",
556 ref->dev_addr, ref->size);
557 goto out;
558 }
559
560 if (ref->size != entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100561 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100562 "DMA memory with different size "
563 "[device address=0x%016llx] [map size=%llu bytes] "
564 "[unmap size=%llu bytes]\n",
565 ref->dev_addr, entry->size, ref->size);
566 }
567
568 if (ref->type != entry->type) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100569 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100570 "DMA memory with wrong function "
571 "[device address=0x%016llx] [size=%llu bytes] "
572 "[mapped as %s] [unmapped as %s]\n",
573 ref->dev_addr, ref->size,
574 type2name[entry->type], type2name[ref->type]);
575 } else if ((entry->type == dma_debug_coherent) &&
576 (ref->paddr != entry->paddr)) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100577 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100578 "DMA memory with different CPU address "
579 "[device address=0x%016llx] [size=%llu bytes] "
580 "[cpu alloc address=%p] [cpu free address=%p]",
581 ref->dev_addr, ref->size,
582 (void *)entry->paddr, (void *)ref->paddr);
583 }
584
585 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
586 ref->sg_call_ents != entry->sg_call_ents) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100587 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100588 "DMA sg list with different entry count "
589 "[map count=%d] [unmap count=%d]\n",
590 entry->sg_call_ents, ref->sg_call_ents);
591 }
592
593 /*
594 * This may be no bug in reality - but most implementations of the
595 * DMA API don't handle this properly, so check for it here
596 */
597 if (ref->direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100598 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100599 "DMA memory with different direction "
600 "[device address=0x%016llx] [size=%llu bytes] "
601 "[mapped with %s] [unmapped with %s]\n",
602 ref->dev_addr, ref->size,
603 dir2name[entry->direction],
604 dir2name[ref->direction]);
605 }
606
607 hash_bucket_del(entry);
608 dma_entry_free(entry);
609
610out:
611 put_hash_bucket(bucket, &flags);
612}
613
614static void check_for_stack(struct device *dev, void *addr)
615{
616 if (object_is_on_stack(addr))
David Woodhouse6c132d12009-01-19 16:52:39 +0100617 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
618 "stack [addr=%p]\n", addr);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100619}
620
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100621static inline bool overlap(void *addr, u64 size, void *start, void *end)
622{
623 void *addr2 = (char *)addr + size;
624
625 return ((addr >= start && addr < end) ||
626 (addr2 >= start && addr2 < end) ||
627 ((addr < start) && (addr2 >= end)));
628}
629
630static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
631{
632 if (overlap(addr, size, _text, _etext) ||
633 overlap(addr, size, __start_rodata, __end_rodata))
634 err_printk(dev, NULL, "DMA-API: device driver maps "
635 "memory from kernel text or rodata "
636 "[addr=%p] [size=%llu]\n", addr, size);
637}
638
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100639static void check_sync(struct device *dev, dma_addr_t addr,
640 u64 size, u64 offset, int direction, bool to_cpu)
641{
642 struct dma_debug_entry ref = {
643 .dev = dev,
644 .dev_addr = addr,
645 .size = size,
646 .direction = direction,
647 };
648 struct dma_debug_entry *entry;
649 struct hash_bucket *bucket;
650 unsigned long flags;
651
652 bucket = get_hash_bucket(&ref, &flags);
653
654 entry = hash_bucket_find(bucket, &ref);
655
656 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100657 err_printk(dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100658 "to sync DMA memory it has not allocated "
659 "[device address=0x%016llx] [size=%llu bytes]\n",
Randy Dunlap93c36ed2009-03-30 14:08:44 -0700660 (unsigned long long)addr, size);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100661 goto out;
662 }
663
664 if ((offset + size) > entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100665 err_printk(dev, entry, "DMA-API: device driver syncs"
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100666 " DMA memory outside allocated range "
667 "[device address=0x%016llx] "
668 "[allocation size=%llu bytes] [sync offset=%llu] "
669 "[sync size=%llu]\n", entry->dev_addr, entry->size,
670 offset, size);
671 }
672
673 if (direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100674 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100675 "DMA memory with different direction "
676 "[device address=0x%016llx] [size=%llu bytes] "
677 "[mapped with %s] [synced with %s]\n",
Randy Dunlap93c36ed2009-03-30 14:08:44 -0700678 (unsigned long long)addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100679 dir2name[entry->direction],
680 dir2name[direction]);
681 }
682
683 if (entry->direction == DMA_BIDIRECTIONAL)
684 goto out;
685
686 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
687 !(direction == DMA_TO_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +0100688 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100689 "device read-only DMA memory for cpu "
690 "[device address=0x%016llx] [size=%llu bytes] "
691 "[mapped with %s] [synced with %s]\n",
Randy Dunlap93c36ed2009-03-30 14:08:44 -0700692 (unsigned long long)addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100693 dir2name[entry->direction],
694 dir2name[direction]);
695
696 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
697 !(direction == DMA_FROM_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +0100698 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100699 "device write-only DMA memory to device "
700 "[device address=0x%016llx] [size=%llu bytes] "
701 "[mapped with %s] [synced with %s]\n",
Randy Dunlap93c36ed2009-03-30 14:08:44 -0700702 (unsigned long long)addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100703 dir2name[entry->direction],
704 dir2name[direction]);
705
706out:
707 put_hash_bucket(bucket, &flags);
708
709}
710
Joerg Roedelf62bc982009-01-09 14:14:49 +0100711void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
712 size_t size, int direction, dma_addr_t dma_addr,
713 bool map_single)
714{
715 struct dma_debug_entry *entry;
716
717 if (unlikely(global_disable))
718 return;
719
720 if (unlikely(dma_mapping_error(dev, dma_addr)))
721 return;
722
723 entry = dma_entry_alloc();
724 if (!entry)
725 return;
726
727 entry->dev = dev;
728 entry->type = dma_debug_page;
729 entry->paddr = page_to_phys(page) + offset;
730 entry->dev_addr = dma_addr;
731 entry->size = size;
732 entry->direction = direction;
733
Joerg Roedel9537a482009-03-23 15:35:08 +0100734 if (map_single)
Joerg Roedelf62bc982009-01-09 14:14:49 +0100735 entry->type = dma_debug_single;
Joerg Roedel9537a482009-03-23 15:35:08 +0100736
737 if (!PageHighMem(page)) {
738 void *addr = ((char *)page_address(page)) + offset;
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100739 check_for_stack(dev, addr);
740 check_for_illegal_area(dev, addr, size);
Joerg Roedelf62bc982009-01-09 14:14:49 +0100741 }
742
743 add_dma_entry(entry);
744}
745EXPORT_SYMBOL(debug_dma_map_page);
746
747void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
748 size_t size, int direction, bool map_single)
749{
750 struct dma_debug_entry ref = {
751 .type = dma_debug_page,
752 .dev = dev,
753 .dev_addr = addr,
754 .size = size,
755 .direction = direction,
756 };
757
758 if (unlikely(global_disable))
759 return;
760
761 if (map_single)
762 ref.type = dma_debug_single;
763
764 check_unmap(&ref);
765}
766EXPORT_SYMBOL(debug_dma_unmap_page);
767
Joerg Roedel972aa452009-01-09 14:19:54 +0100768void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
769 int nents, int mapped_ents, int direction)
770{
771 struct dma_debug_entry *entry;
772 struct scatterlist *s;
773 int i;
774
775 if (unlikely(global_disable))
776 return;
777
778 for_each_sg(sg, s, mapped_ents, i) {
779 entry = dma_entry_alloc();
780 if (!entry)
781 return;
782
783 entry->type = dma_debug_sg;
784 entry->dev = dev;
785 entry->paddr = sg_phys(s);
786 entry->size = s->length;
787 entry->dev_addr = s->dma_address;
788 entry->direction = direction;
789 entry->sg_call_ents = nents;
790 entry->sg_mapped_ents = mapped_ents;
791
Joerg Roedel9537a482009-03-23 15:35:08 +0100792 if (!PageHighMem(sg_page(s))) {
793 check_for_stack(dev, sg_virt(s));
794 check_for_illegal_area(dev, sg_virt(s), s->length);
795 }
Joerg Roedel972aa452009-01-09 14:19:54 +0100796
797 add_dma_entry(entry);
798 }
799}
800EXPORT_SYMBOL(debug_dma_map_sg);
801
802void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
803 int nelems, int dir)
804{
805 struct dma_debug_entry *entry;
806 struct scatterlist *s;
807 int mapped_ents = 0, i;
808 unsigned long flags;
809
810 if (unlikely(global_disable))
811 return;
812
813 for_each_sg(sglist, s, nelems, i) {
814
815 struct dma_debug_entry ref = {
816 .type = dma_debug_sg,
817 .dev = dev,
818 .paddr = sg_phys(s),
819 .dev_addr = s->dma_address,
820 .size = s->length,
821 .direction = dir,
822 .sg_call_ents = 0,
823 };
824
825 if (mapped_ents && i >= mapped_ents)
826 break;
827
828 if (mapped_ents == 0) {
829 struct hash_bucket *bucket;
830 ref.sg_call_ents = nelems;
831 bucket = get_hash_bucket(&ref, &flags);
832 entry = hash_bucket_find(bucket, &ref);
833 if (entry)
834 mapped_ents = entry->sg_mapped_ents;
835 put_hash_bucket(bucket, &flags);
836 }
837
838 check_unmap(&ref);
839 }
840}
841EXPORT_SYMBOL(debug_dma_unmap_sg);
842
Joerg Roedel6bfd4492009-01-09 14:38:50 +0100843void debug_dma_alloc_coherent(struct device *dev, size_t size,
844 dma_addr_t dma_addr, void *virt)
845{
846 struct dma_debug_entry *entry;
847
848 if (unlikely(global_disable))
849 return;
850
851 if (unlikely(virt == NULL))
852 return;
853
854 entry = dma_entry_alloc();
855 if (!entry)
856 return;
857
858 entry->type = dma_debug_coherent;
859 entry->dev = dev;
860 entry->paddr = virt_to_phys(virt);
861 entry->size = size;
862 entry->dev_addr = dma_addr;
863 entry->direction = DMA_BIDIRECTIONAL;
864
865 add_dma_entry(entry);
866}
867EXPORT_SYMBOL(debug_dma_alloc_coherent);
868
869void debug_dma_free_coherent(struct device *dev, size_t size,
870 void *virt, dma_addr_t addr)
871{
872 struct dma_debug_entry ref = {
873 .type = dma_debug_coherent,
874 .dev = dev,
875 .paddr = virt_to_phys(virt),
876 .dev_addr = addr,
877 .size = size,
878 .direction = DMA_BIDIRECTIONAL,
879 };
880
881 if (unlikely(global_disable))
882 return;
883
884 check_unmap(&ref);
885}
886EXPORT_SYMBOL(debug_dma_free_coherent);
887
Joerg Roedelb9d23172009-01-09 14:43:04 +0100888void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
889 size_t size, int direction)
890{
891 if (unlikely(global_disable))
892 return;
893
894 check_sync(dev, dma_handle, size, 0, direction, true);
895}
896EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
897
898void debug_dma_sync_single_for_device(struct device *dev,
899 dma_addr_t dma_handle, size_t size,
900 int direction)
901{
902 if (unlikely(global_disable))
903 return;
904
905 check_sync(dev, dma_handle, size, 0, direction, false);
906}
907EXPORT_SYMBOL(debug_dma_sync_single_for_device);
908
Joerg Roedel948408b2009-01-09 14:55:38 +0100909void debug_dma_sync_single_range_for_cpu(struct device *dev,
910 dma_addr_t dma_handle,
911 unsigned long offset, size_t size,
912 int direction)
913{
914 if (unlikely(global_disable))
915 return;
916
917 check_sync(dev, dma_handle, size, offset, direction, true);
918}
919EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
920
921void debug_dma_sync_single_range_for_device(struct device *dev,
922 dma_addr_t dma_handle,
923 unsigned long offset,
924 size_t size, int direction)
925{
926 if (unlikely(global_disable))
927 return;
928
929 check_sync(dev, dma_handle, size, offset, direction, false);
930}
931EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
932
Joerg Roedela31fba52009-01-09 15:01:12 +0100933void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
934 int nelems, int direction)
935{
936 struct scatterlist *s;
937 int i;
938
939 if (unlikely(global_disable))
940 return;
941
942 for_each_sg(sg, s, nelems, i) {
943 check_sync(dev, s->dma_address, s->dma_length, 0,
944 direction, true);
945 }
946}
947EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
948
949void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
950 int nelems, int direction)
951{
952 struct scatterlist *s;
953 int i;
954
955 if (unlikely(global_disable))
956 return;
957
958 for_each_sg(sg, s, nelems, i) {
959 check_sync(dev, s->dma_address, s->dma_length, 0,
960 direction, false);
961 }
962}
963EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
964