blob: 5d61019330cd8c331e9d97be4cbd4f907547b245 [file] [log] [blame]
Joerg Roedelf2f45e52009-01-09 12:19:52 +01001/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Joerg Roedel972aa452009-01-09 14:19:54 +010020#include <linux/scatterlist.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010021#include <linux/dma-mapping.h>
David Woodhouse6c132d12009-01-19 16:52:39 +010022#include <linux/stacktrace.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010023#include <linux/dma-debug.h>
Joerg Roedel30dfa902009-01-09 12:34:49 +010024#include <linux/spinlock.h>
Joerg Roedel788dcfa2009-01-09 13:13:27 +010025#include <linux/debugfs.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010026#include <linux/device.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010027#include <linux/types.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010028#include <linux/sched.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010029#include <linux/list.h>
Joerg Roedel6bf07872009-01-09 12:54:42 +010030#include <linux/slab.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010031
Joerg Roedel2e34bde2009-03-16 16:51:55 +010032#include <asm/sections.h>
33
Joerg Roedel30dfa902009-01-09 12:34:49 +010034#define HASH_SIZE 1024ULL
35#define HASH_FN_SHIFT 13
36#define HASH_FN_MASK (HASH_SIZE - 1)
37
Joerg Roedelf2f45e52009-01-09 12:19:52 +010038enum {
39 dma_debug_single,
40 dma_debug_page,
41 dma_debug_sg,
42 dma_debug_coherent,
43};
44
David Woodhouse6c132d12009-01-19 16:52:39 +010045#define DMA_DEBUG_STACKTRACE_ENTRIES 5
46
Joerg Roedelf2f45e52009-01-09 12:19:52 +010047struct dma_debug_entry {
48 struct list_head list;
49 struct device *dev;
50 int type;
51 phys_addr_t paddr;
52 u64 dev_addr;
53 u64 size;
54 int direction;
55 int sg_call_ents;
56 int sg_mapped_ents;
David Woodhouse6c132d12009-01-19 16:52:39 +010057#ifdef CONFIG_STACKTRACE
58 struct stack_trace stacktrace;
59 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
60#endif
Joerg Roedelf2f45e52009-01-09 12:19:52 +010061};
62
Joerg Roedel30dfa902009-01-09 12:34:49 +010063struct hash_bucket {
64 struct list_head list;
65 spinlock_t lock;
Joerg Roedel2d62ece2009-01-09 14:10:26 +010066} ____cacheline_aligned_in_smp;
Joerg Roedel30dfa902009-01-09 12:34:49 +010067
68/* Hash list to save the allocated dma addresses */
69static struct hash_bucket dma_entry_hash[HASH_SIZE];
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010070/* List of pre-allocated dma_debug_entry's */
71static LIST_HEAD(free_entries);
72/* Lock for the list above */
73static DEFINE_SPINLOCK(free_entries_lock);
74
75/* Global disable flag - will be set in case of an error */
76static bool global_disable __read_mostly;
77
Joerg Roedel788dcfa2009-01-09 13:13:27 +010078/* Global error count */
79static u32 error_count;
80
81/* Global error show enable*/
82static u32 show_all_errors __read_mostly;
83/* Number of errors to show */
84static u32 show_num_errors = 1;
85
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010086static u32 num_free_entries;
87static u32 min_free_entries;
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +090088static u32 nr_total_entries;
Joerg Roedel30dfa902009-01-09 12:34:49 +010089
Joerg Roedel59d3daa2009-01-09 13:01:56 +010090/* number of preallocated entries requested by kernel cmdline */
91static u32 req_entries;
92
Joerg Roedel788dcfa2009-01-09 13:13:27 +010093/* debugfs dentry's for the stuff above */
94static struct dentry *dma_debug_dent __read_mostly;
95static struct dentry *global_disable_dent __read_mostly;
96static struct dentry *error_count_dent __read_mostly;
97static struct dentry *show_all_errors_dent __read_mostly;
98static struct dentry *show_num_errors_dent __read_mostly;
99static struct dentry *num_free_entries_dent __read_mostly;
100static struct dentry *min_free_entries_dent __read_mostly;
101
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100102static const char *type2name[4] = { "single", "page",
103 "scather-gather", "coherent" };
104
105static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
106 "DMA_FROM_DEVICE", "DMA_NONE" };
107
108/*
109 * The access to some variables in this macro is racy. We can't use atomic_t
110 * here because all these variables are exported to debugfs. Some of them even
111 * writeable. This is also the reason why a lock won't help much. But anyway,
112 * the races are no big deal. Here is why:
113 *
114 * error_count: the addition is racy, but the worst thing that can happen is
115 * that we don't count some errors
116 * show_num_errors: the subtraction is racy. Also no big deal because in
117 * worst case this will result in one warning more in the
118 * system log than the user configured. This variable is
119 * writeable via debugfs.
120 */
David Woodhouse6c132d12009-01-19 16:52:39 +0100121static inline void dump_entry_trace(struct dma_debug_entry *entry)
122{
123#ifdef CONFIG_STACKTRACE
124 if (entry) {
125 printk(KERN_WARNING "Mapped at:\n");
126 print_stack_trace(&entry->stacktrace, 0);
127 }
128#endif
129}
130
131#define err_printk(dev, entry, format, arg...) do { \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100132 error_count += 1; \
133 if (show_all_errors || show_num_errors > 0) { \
134 WARN(1, "%s %s: " format, \
135 dev_driver_string(dev), \
136 dev_name(dev) , ## arg); \
David Woodhouse6c132d12009-01-19 16:52:39 +0100137 dump_entry_trace(entry); \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100138 } \
139 if (!show_all_errors && show_num_errors > 0) \
140 show_num_errors -= 1; \
141 } while (0);
142
Joerg Roedel30dfa902009-01-09 12:34:49 +0100143/*
144 * Hash related functions
145 *
146 * Every DMA-API request is saved into a struct dma_debug_entry. To
147 * have quick access to these structs they are stored into a hash.
148 */
149static int hash_fn(struct dma_debug_entry *entry)
150{
151 /*
152 * Hash function is based on the dma address.
153 * We use bits 20-27 here as the index into the hash
154 */
155 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
156}
157
158/*
159 * Request exclusive access to a hash bucket for a given dma_debug_entry.
160 */
161static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
162 unsigned long *flags)
163{
164 int idx = hash_fn(entry);
165 unsigned long __flags;
166
167 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
168 *flags = __flags;
169 return &dma_entry_hash[idx];
170}
171
172/*
173 * Give up exclusive access to the hash bucket
174 */
175static void put_hash_bucket(struct hash_bucket *bucket,
176 unsigned long *flags)
177{
178 unsigned long __flags = *flags;
179
180 spin_unlock_irqrestore(&bucket->lock, __flags);
181}
182
183/*
184 * Search a given entry in the hash bucket list
185 */
186static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
187 struct dma_debug_entry *ref)
188{
189 struct dma_debug_entry *entry;
190
191 list_for_each_entry(entry, &bucket->list, list) {
192 if ((entry->dev_addr == ref->dev_addr) &&
193 (entry->dev == ref->dev))
194 return entry;
195 }
196
197 return NULL;
198}
199
200/*
201 * Add an entry to a hash bucket
202 */
203static void hash_bucket_add(struct hash_bucket *bucket,
204 struct dma_debug_entry *entry)
205{
206 list_add_tail(&entry->list, &bucket->list);
207}
208
209/*
210 * Remove entry from a hash bucket list
211 */
212static void hash_bucket_del(struct dma_debug_entry *entry)
213{
214 list_del(&entry->list);
215}
216
217/*
David Woodhouseac26c182009-02-12 16:19:13 +0100218 * Dump mapping entries for debugging purposes
219 */
220void debug_dma_dump_mappings(struct device *dev)
221{
222 int idx;
223
224 for (idx = 0; idx < HASH_SIZE; idx++) {
225 struct hash_bucket *bucket = &dma_entry_hash[idx];
226 struct dma_debug_entry *entry;
227 unsigned long flags;
228
229 spin_lock_irqsave(&bucket->lock, flags);
230
231 list_for_each_entry(entry, &bucket->list, list) {
232 if (!dev || dev == entry->dev) {
233 dev_info(entry->dev,
234 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
235 type2name[entry->type], idx,
236 (unsigned long long)entry->paddr,
237 entry->dev_addr, entry->size,
238 dir2name[entry->direction]);
239 }
240 }
241
242 spin_unlock_irqrestore(&bucket->lock, flags);
243 }
244}
245EXPORT_SYMBOL(debug_dma_dump_mappings);
246
247/*
Joerg Roedel30dfa902009-01-09 12:34:49 +0100248 * Wrapper function for adding an entry to the hash.
249 * This function takes care of locking itself.
250 */
251static void add_dma_entry(struct dma_debug_entry *entry)
252{
253 struct hash_bucket *bucket;
254 unsigned long flags;
255
256 bucket = get_hash_bucket(entry, &flags);
257 hash_bucket_add(bucket, entry);
258 put_hash_bucket(bucket, &flags);
259}
260
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900261static struct dma_debug_entry *__dma_entry_alloc(void)
262{
263 struct dma_debug_entry *entry;
264
265 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
266 list_del(&entry->list);
267 memset(entry, 0, sizeof(*entry));
268
269 num_free_entries -= 1;
270 if (num_free_entries < min_free_entries)
271 min_free_entries = num_free_entries;
272
273 return entry;
274}
275
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100276/* struct dma_entry allocator
277 *
278 * The next two functions implement the allocator for
279 * struct dma_debug_entries.
280 */
281static struct dma_debug_entry *dma_entry_alloc(void)
282{
283 struct dma_debug_entry *entry = NULL;
284 unsigned long flags;
285
286 spin_lock_irqsave(&free_entries_lock, flags);
287
288 if (list_empty(&free_entries)) {
289 printk(KERN_ERR "DMA-API: debugging out of memory "
290 "- disabling\n");
291 global_disable = true;
292 goto out;
293 }
294
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900295 entry = __dma_entry_alloc();
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100296
David Woodhouse6c132d12009-01-19 16:52:39 +0100297#ifdef CONFIG_STACKTRACE
298 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
299 entry->stacktrace.entries = entry->st_entries;
300 entry->stacktrace.skip = 2;
301 save_stack_trace(&entry->stacktrace);
302#endif
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100303
304out:
305 spin_unlock_irqrestore(&free_entries_lock, flags);
306
307 return entry;
308}
309
310static void dma_entry_free(struct dma_debug_entry *entry)
311{
312 unsigned long flags;
313
314 /*
315 * add to beginning of the list - this way the entries are
316 * more likely cache hot when they are reallocated.
317 */
318 spin_lock_irqsave(&free_entries_lock, flags);
319 list_add(&entry->list, &free_entries);
320 num_free_entries += 1;
321 spin_unlock_irqrestore(&free_entries_lock, flags);
322}
323
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900324int dma_debug_resize_entries(u32 num_entries)
325{
326 int i, delta, ret = 0;
327 unsigned long flags;
328 struct dma_debug_entry *entry;
329 LIST_HEAD(tmp);
330
331 spin_lock_irqsave(&free_entries_lock, flags);
332
333 if (nr_total_entries < num_entries) {
334 delta = num_entries - nr_total_entries;
335
336 spin_unlock_irqrestore(&free_entries_lock, flags);
337
338 for (i = 0; i < delta; i++) {
339 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
340 if (!entry)
341 break;
342
343 list_add_tail(&entry->list, &tmp);
344 }
345
346 spin_lock_irqsave(&free_entries_lock, flags);
347
348 list_splice(&tmp, &free_entries);
349 nr_total_entries += i;
350 num_free_entries += i;
351 } else {
352 delta = nr_total_entries - num_entries;
353
354 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
355 entry = __dma_entry_alloc();
356 kfree(entry);
357 }
358
359 nr_total_entries -= i;
360 }
361
362 if (nr_total_entries != num_entries)
363 ret = 1;
364
365 spin_unlock_irqrestore(&free_entries_lock, flags);
366
367 return ret;
368}
369EXPORT_SYMBOL(dma_debug_resize_entries);
370
Joerg Roedel6bf07872009-01-09 12:54:42 +0100371/*
372 * DMA-API debugging init code
373 *
374 * The init code does two things:
375 * 1. Initialize core data structures
376 * 2. Preallocate a given number of dma_debug_entry structs
377 */
378
379static int prealloc_memory(u32 num_entries)
380{
381 struct dma_debug_entry *entry, *next_entry;
382 int i;
383
384 for (i = 0; i < num_entries; ++i) {
385 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
386 if (!entry)
387 goto out_err;
388
389 list_add_tail(&entry->list, &free_entries);
390 }
391
392 num_free_entries = num_entries;
393 min_free_entries = num_entries;
394
395 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
396 num_entries);
397
398 return 0;
399
400out_err:
401
402 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
403 list_del(&entry->list);
404 kfree(entry);
405 }
406
407 return -ENOMEM;
408}
409
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100410static int dma_debug_fs_init(void)
411{
412 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
413 if (!dma_debug_dent) {
414 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
415 return -ENOMEM;
416 }
417
418 global_disable_dent = debugfs_create_bool("disabled", 0444,
419 dma_debug_dent,
420 (u32 *)&global_disable);
421 if (!global_disable_dent)
422 goto out_err;
423
424 error_count_dent = debugfs_create_u32("error_count", 0444,
425 dma_debug_dent, &error_count);
426 if (!error_count_dent)
427 goto out_err;
428
429 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
430 dma_debug_dent,
431 &show_all_errors);
432 if (!show_all_errors_dent)
433 goto out_err;
434
435 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
436 dma_debug_dent,
437 &show_num_errors);
438 if (!show_num_errors_dent)
439 goto out_err;
440
441 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
442 dma_debug_dent,
443 &num_free_entries);
444 if (!num_free_entries_dent)
445 goto out_err;
446
447 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
448 dma_debug_dent,
449 &min_free_entries);
450 if (!min_free_entries_dent)
451 goto out_err;
452
453 return 0;
454
455out_err:
456 debugfs_remove_recursive(dma_debug_dent);
457
458 return -ENOMEM;
459}
460
Joerg Roedel41531c82009-03-16 17:32:14 +0100461static int device_dma_allocations(struct device *dev)
462{
463 struct dma_debug_entry *entry;
464 unsigned long flags;
465 int count = 0, i;
466
467 for (i = 0; i < HASH_SIZE; ++i) {
468 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
469 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
470 if (entry->dev == dev)
471 count += 1;
472 }
473 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
474 }
475
476 return count;
477}
478
479static int dma_debug_device_change(struct notifier_block *nb,
480 unsigned long action, void *data)
481{
482 struct device *dev = data;
483 int count;
484
485
486 switch (action) {
487 case BUS_NOTIFY_UNBIND_DRIVER:
488 count = device_dma_allocations(dev);
489 if (count == 0)
490 break;
491 err_printk(dev, NULL, "DMA-API: device driver has pending "
492 "DMA allocations while released from device "
493 "[count=%d]\n", count);
494 break;
495 default:
496 break;
497 }
498
499 return 0;
500}
501
502void dma_debug_add_bus(struct bus_type *bus)
503{
504 struct notifier_block *nb;
505
506 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
507 if (nb == NULL) {
508 printk(KERN_ERR "dma_debug_add_bus: out of memory\n");
509 return;
510 }
511
512 nb->notifier_call = dma_debug_device_change;
513
514 bus_register_notifier(bus, nb);
515}
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100516
Joerg Roedel6bf07872009-01-09 12:54:42 +0100517/*
518 * Let the architectures decide how many entries should be preallocated.
519 */
520void dma_debug_init(u32 num_entries)
521{
522 int i;
523
524 if (global_disable)
525 return;
526
527 for (i = 0; i < HASH_SIZE; ++i) {
528 INIT_LIST_HEAD(&dma_entry_hash[i].list);
529 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
530 }
531
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100532 if (dma_debug_fs_init() != 0) {
533 printk(KERN_ERR "DMA-API: error creating debugfs entries "
534 "- disabling\n");
535 global_disable = true;
536
537 return;
538 }
539
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100540 if (req_entries)
541 num_entries = req_entries;
542
Joerg Roedel6bf07872009-01-09 12:54:42 +0100543 if (prealloc_memory(num_entries) != 0) {
544 printk(KERN_ERR "DMA-API: debugging out of memory error "
545 "- disabled\n");
546 global_disable = true;
547
548 return;
549 }
550
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900551 nr_total_entries = num_free_entries;
552
Joerg Roedel6bf07872009-01-09 12:54:42 +0100553 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
554}
555
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100556static __init int dma_debug_cmdline(char *str)
557{
558 if (!str)
559 return -EINVAL;
560
561 if (strncmp(str, "off", 3) == 0) {
562 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
563 "command line\n");
564 global_disable = true;
565 }
566
567 return 0;
568}
569
570static __init int dma_debug_entries_cmdline(char *str)
571{
572 int res;
573
574 if (!str)
575 return -EINVAL;
576
577 res = get_option(&str, &req_entries);
578
579 if (!res)
580 req_entries = 0;
581
582 return 0;
583}
584
585__setup("dma_debug=", dma_debug_cmdline);
586__setup("dma_debug_entries=", dma_debug_entries_cmdline);
587
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100588static void check_unmap(struct dma_debug_entry *ref)
589{
590 struct dma_debug_entry *entry;
591 struct hash_bucket *bucket;
592 unsigned long flags;
593
FUJITA Tomonori35d40952009-03-19 10:39:31 +0900594 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
595 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
596 "to free an invalid DMA memory address\n");
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100597 return;
FUJITA Tomonori35d40952009-03-19 10:39:31 +0900598 }
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100599
600 bucket = get_hash_bucket(ref, &flags);
601 entry = hash_bucket_find(bucket, ref);
602
603 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100604 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100605 "to free DMA memory it has not allocated "
606 "[device address=0x%016llx] [size=%llu bytes]\n",
607 ref->dev_addr, ref->size);
608 goto out;
609 }
610
611 if (ref->size != entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100612 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100613 "DMA memory with different size "
614 "[device address=0x%016llx] [map size=%llu bytes] "
615 "[unmap size=%llu bytes]\n",
616 ref->dev_addr, entry->size, ref->size);
617 }
618
619 if (ref->type != entry->type) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100620 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100621 "DMA memory with wrong function "
622 "[device address=0x%016llx] [size=%llu bytes] "
623 "[mapped as %s] [unmapped as %s]\n",
624 ref->dev_addr, ref->size,
625 type2name[entry->type], type2name[ref->type]);
626 } else if ((entry->type == dma_debug_coherent) &&
627 (ref->paddr != entry->paddr)) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100628 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100629 "DMA memory with different CPU address "
630 "[device address=0x%016llx] [size=%llu bytes] "
631 "[cpu alloc address=%p] [cpu free address=%p]",
632 ref->dev_addr, ref->size,
633 (void *)entry->paddr, (void *)ref->paddr);
634 }
635
636 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
637 ref->sg_call_ents != entry->sg_call_ents) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100638 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100639 "DMA sg list with different entry count "
640 "[map count=%d] [unmap count=%d]\n",
641 entry->sg_call_ents, ref->sg_call_ents);
642 }
643
644 /*
645 * This may be no bug in reality - but most implementations of the
646 * DMA API don't handle this properly, so check for it here
647 */
648 if (ref->direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100649 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100650 "DMA memory with different direction "
651 "[device address=0x%016llx] [size=%llu bytes] "
652 "[mapped with %s] [unmapped with %s]\n",
653 ref->dev_addr, ref->size,
654 dir2name[entry->direction],
655 dir2name[ref->direction]);
656 }
657
658 hash_bucket_del(entry);
659 dma_entry_free(entry);
660
661out:
662 put_hash_bucket(bucket, &flags);
663}
664
665static void check_for_stack(struct device *dev, void *addr)
666{
667 if (object_is_on_stack(addr))
David Woodhouse6c132d12009-01-19 16:52:39 +0100668 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
669 "stack [addr=%p]\n", addr);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100670}
671
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100672static inline bool overlap(void *addr, u64 size, void *start, void *end)
673{
674 void *addr2 = (char *)addr + size;
675
676 return ((addr >= start && addr < end) ||
677 (addr2 >= start && addr2 < end) ||
678 ((addr < start) && (addr2 >= end)));
679}
680
681static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
682{
683 if (overlap(addr, size, _text, _etext) ||
684 overlap(addr, size, __start_rodata, __end_rodata))
685 err_printk(dev, NULL, "DMA-API: device driver maps "
686 "memory from kernel text or rodata "
687 "[addr=%p] [size=%llu]\n", addr, size);
688}
689
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100690static void check_sync(struct device *dev, dma_addr_t addr,
691 u64 size, u64 offset, int direction, bool to_cpu)
692{
693 struct dma_debug_entry ref = {
694 .dev = dev,
695 .dev_addr = addr,
696 .size = size,
697 .direction = direction,
698 };
699 struct dma_debug_entry *entry;
700 struct hash_bucket *bucket;
701 unsigned long flags;
702
703 bucket = get_hash_bucket(&ref, &flags);
704
705 entry = hash_bucket_find(bucket, &ref);
706
707 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100708 err_printk(dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100709 "to sync DMA memory it has not allocated "
710 "[device address=0x%016llx] [size=%llu bytes]\n",
Randy Dunlap93c36ed2009-03-30 14:08:44 -0700711 (unsigned long long)addr, size);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100712 goto out;
713 }
714
715 if ((offset + size) > entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100716 err_printk(dev, entry, "DMA-API: device driver syncs"
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100717 " DMA memory outside allocated range "
718 "[device address=0x%016llx] "
719 "[allocation size=%llu bytes] [sync offset=%llu] "
720 "[sync size=%llu]\n", entry->dev_addr, entry->size,
721 offset, size);
722 }
723
724 if (direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100725 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100726 "DMA memory with different direction "
727 "[device address=0x%016llx] [size=%llu bytes] "
728 "[mapped with %s] [synced with %s]\n",
Randy Dunlap93c36ed2009-03-30 14:08:44 -0700729 (unsigned long long)addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100730 dir2name[entry->direction],
731 dir2name[direction]);
732 }
733
734 if (entry->direction == DMA_BIDIRECTIONAL)
735 goto out;
736
737 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
738 !(direction == DMA_TO_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +0100739 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100740 "device read-only DMA memory for cpu "
741 "[device address=0x%016llx] [size=%llu bytes] "
742 "[mapped with %s] [synced with %s]\n",
Randy Dunlap93c36ed2009-03-30 14:08:44 -0700743 (unsigned long long)addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100744 dir2name[entry->direction],
745 dir2name[direction]);
746
747 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
748 !(direction == DMA_FROM_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +0100749 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100750 "device write-only DMA memory to device "
751 "[device address=0x%016llx] [size=%llu bytes] "
752 "[mapped with %s] [synced with %s]\n",
Randy Dunlap93c36ed2009-03-30 14:08:44 -0700753 (unsigned long long)addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100754 dir2name[entry->direction],
755 dir2name[direction]);
756
757out:
758 put_hash_bucket(bucket, &flags);
759
760}
761
Joerg Roedelf62bc982009-01-09 14:14:49 +0100762void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
763 size_t size, int direction, dma_addr_t dma_addr,
764 bool map_single)
765{
766 struct dma_debug_entry *entry;
767
768 if (unlikely(global_disable))
769 return;
770
771 if (unlikely(dma_mapping_error(dev, dma_addr)))
772 return;
773
774 entry = dma_entry_alloc();
775 if (!entry)
776 return;
777
778 entry->dev = dev;
779 entry->type = dma_debug_page;
780 entry->paddr = page_to_phys(page) + offset;
781 entry->dev_addr = dma_addr;
782 entry->size = size;
783 entry->direction = direction;
784
Joerg Roedel9537a482009-03-23 15:35:08 +0100785 if (map_single)
Joerg Roedelf62bc982009-01-09 14:14:49 +0100786 entry->type = dma_debug_single;
Joerg Roedel9537a482009-03-23 15:35:08 +0100787
788 if (!PageHighMem(page)) {
789 void *addr = ((char *)page_address(page)) + offset;
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100790 check_for_stack(dev, addr);
791 check_for_illegal_area(dev, addr, size);
Joerg Roedelf62bc982009-01-09 14:14:49 +0100792 }
793
794 add_dma_entry(entry);
795}
796EXPORT_SYMBOL(debug_dma_map_page);
797
798void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
799 size_t size, int direction, bool map_single)
800{
801 struct dma_debug_entry ref = {
802 .type = dma_debug_page,
803 .dev = dev,
804 .dev_addr = addr,
805 .size = size,
806 .direction = direction,
807 };
808
809 if (unlikely(global_disable))
810 return;
811
812 if (map_single)
813 ref.type = dma_debug_single;
814
815 check_unmap(&ref);
816}
817EXPORT_SYMBOL(debug_dma_unmap_page);
818
Joerg Roedel972aa452009-01-09 14:19:54 +0100819void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
820 int nents, int mapped_ents, int direction)
821{
822 struct dma_debug_entry *entry;
823 struct scatterlist *s;
824 int i;
825
826 if (unlikely(global_disable))
827 return;
828
829 for_each_sg(sg, s, mapped_ents, i) {
830 entry = dma_entry_alloc();
831 if (!entry)
832 return;
833
834 entry->type = dma_debug_sg;
835 entry->dev = dev;
836 entry->paddr = sg_phys(s);
837 entry->size = s->length;
838 entry->dev_addr = s->dma_address;
839 entry->direction = direction;
840 entry->sg_call_ents = nents;
841 entry->sg_mapped_ents = mapped_ents;
842
Joerg Roedel9537a482009-03-23 15:35:08 +0100843 if (!PageHighMem(sg_page(s))) {
844 check_for_stack(dev, sg_virt(s));
845 check_for_illegal_area(dev, sg_virt(s), s->length);
846 }
Joerg Roedel972aa452009-01-09 14:19:54 +0100847
848 add_dma_entry(entry);
849 }
850}
851EXPORT_SYMBOL(debug_dma_map_sg);
852
853void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
854 int nelems, int dir)
855{
856 struct dma_debug_entry *entry;
857 struct scatterlist *s;
858 int mapped_ents = 0, i;
859 unsigned long flags;
860
861 if (unlikely(global_disable))
862 return;
863
864 for_each_sg(sglist, s, nelems, i) {
865
866 struct dma_debug_entry ref = {
867 .type = dma_debug_sg,
868 .dev = dev,
869 .paddr = sg_phys(s),
870 .dev_addr = s->dma_address,
871 .size = s->length,
872 .direction = dir,
873 .sg_call_ents = 0,
874 };
875
876 if (mapped_ents && i >= mapped_ents)
877 break;
878
879 if (mapped_ents == 0) {
880 struct hash_bucket *bucket;
881 ref.sg_call_ents = nelems;
882 bucket = get_hash_bucket(&ref, &flags);
883 entry = hash_bucket_find(bucket, &ref);
884 if (entry)
885 mapped_ents = entry->sg_mapped_ents;
886 put_hash_bucket(bucket, &flags);
887 }
888
889 check_unmap(&ref);
890 }
891}
892EXPORT_SYMBOL(debug_dma_unmap_sg);
893
Joerg Roedel6bfd4492009-01-09 14:38:50 +0100894void debug_dma_alloc_coherent(struct device *dev, size_t size,
895 dma_addr_t dma_addr, void *virt)
896{
897 struct dma_debug_entry *entry;
898
899 if (unlikely(global_disable))
900 return;
901
902 if (unlikely(virt == NULL))
903 return;
904
905 entry = dma_entry_alloc();
906 if (!entry)
907 return;
908
909 entry->type = dma_debug_coherent;
910 entry->dev = dev;
911 entry->paddr = virt_to_phys(virt);
912 entry->size = size;
913 entry->dev_addr = dma_addr;
914 entry->direction = DMA_BIDIRECTIONAL;
915
916 add_dma_entry(entry);
917}
918EXPORT_SYMBOL(debug_dma_alloc_coherent);
919
920void debug_dma_free_coherent(struct device *dev, size_t size,
921 void *virt, dma_addr_t addr)
922{
923 struct dma_debug_entry ref = {
924 .type = dma_debug_coherent,
925 .dev = dev,
926 .paddr = virt_to_phys(virt),
927 .dev_addr = addr,
928 .size = size,
929 .direction = DMA_BIDIRECTIONAL,
930 };
931
932 if (unlikely(global_disable))
933 return;
934
935 check_unmap(&ref);
936}
937EXPORT_SYMBOL(debug_dma_free_coherent);
938
Joerg Roedelb9d23172009-01-09 14:43:04 +0100939void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
940 size_t size, int direction)
941{
942 if (unlikely(global_disable))
943 return;
944
945 check_sync(dev, dma_handle, size, 0, direction, true);
946}
947EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
948
949void debug_dma_sync_single_for_device(struct device *dev,
950 dma_addr_t dma_handle, size_t size,
951 int direction)
952{
953 if (unlikely(global_disable))
954 return;
955
956 check_sync(dev, dma_handle, size, 0, direction, false);
957}
958EXPORT_SYMBOL(debug_dma_sync_single_for_device);
959
Joerg Roedel948408b2009-01-09 14:55:38 +0100960void debug_dma_sync_single_range_for_cpu(struct device *dev,
961 dma_addr_t dma_handle,
962 unsigned long offset, size_t size,
963 int direction)
964{
965 if (unlikely(global_disable))
966 return;
967
968 check_sync(dev, dma_handle, size, offset, direction, true);
969}
970EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
971
972void debug_dma_sync_single_range_for_device(struct device *dev,
973 dma_addr_t dma_handle,
974 unsigned long offset,
975 size_t size, int direction)
976{
977 if (unlikely(global_disable))
978 return;
979
980 check_sync(dev, dma_handle, size, offset, direction, false);
981}
982EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
983
Joerg Roedela31fba52009-01-09 15:01:12 +0100984void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
985 int nelems, int direction)
986{
987 struct scatterlist *s;
988 int i;
989
990 if (unlikely(global_disable))
991 return;
992
993 for_each_sg(sg, s, nelems, i) {
994 check_sync(dev, s->dma_address, s->dma_length, 0,
995 direction, true);
996 }
997}
998EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
999
1000void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1001 int nelems, int direction)
1002{
1003 struct scatterlist *s;
1004 int i;
1005
1006 if (unlikely(global_disable))
1007 return;
1008
1009 for_each_sg(sg, s, nelems, i) {
1010 check_sync(dev, s->dma_address, s->dma_length, 0,
1011 direction, false);
1012 }
1013}
1014EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1015