blob: dba02f138bd357df55262b590e424ce55b0e9d26 [file] [log] [blame]
Joerg Roedelf2f45e52009-01-09 12:19:52 +01001/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Joerg Roedel972aa452009-01-09 14:19:54 +010020#include <linux/scatterlist.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010021#include <linux/dma-mapping.h>
David Woodhouse6c132d12009-01-19 16:52:39 +010022#include <linux/stacktrace.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010023#include <linux/dma-debug.h>
Joerg Roedel30dfa902009-01-09 12:34:49 +010024#include <linux/spinlock.h>
Joerg Roedel788dcfa2009-01-09 13:13:27 +010025#include <linux/debugfs.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010026#include <linux/device.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010027#include <linux/types.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010028#include <linux/sched.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010029#include <linux/list.h>
Joerg Roedel6bf07872009-01-09 12:54:42 +010030#include <linux/slab.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010031
Joerg Roedel30dfa902009-01-09 12:34:49 +010032#define HASH_SIZE 1024ULL
33#define HASH_FN_SHIFT 13
34#define HASH_FN_MASK (HASH_SIZE - 1)
35
Joerg Roedelf2f45e52009-01-09 12:19:52 +010036enum {
37 dma_debug_single,
38 dma_debug_page,
39 dma_debug_sg,
40 dma_debug_coherent,
41};
42
David Woodhouse6c132d12009-01-19 16:52:39 +010043#define DMA_DEBUG_STACKTRACE_ENTRIES 5
44
Joerg Roedelf2f45e52009-01-09 12:19:52 +010045struct dma_debug_entry {
46 struct list_head list;
47 struct device *dev;
48 int type;
49 phys_addr_t paddr;
50 u64 dev_addr;
51 u64 size;
52 int direction;
53 int sg_call_ents;
54 int sg_mapped_ents;
David Woodhouse6c132d12009-01-19 16:52:39 +010055#ifdef CONFIG_STACKTRACE
56 struct stack_trace stacktrace;
57 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
58#endif
Joerg Roedelf2f45e52009-01-09 12:19:52 +010059};
60
Joerg Roedel30dfa902009-01-09 12:34:49 +010061struct hash_bucket {
62 struct list_head list;
63 spinlock_t lock;
Joerg Roedel2d62ece2009-01-09 14:10:26 +010064} ____cacheline_aligned_in_smp;
Joerg Roedel30dfa902009-01-09 12:34:49 +010065
66/* Hash list to save the allocated dma addresses */
67static struct hash_bucket dma_entry_hash[HASH_SIZE];
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010068/* List of pre-allocated dma_debug_entry's */
69static LIST_HEAD(free_entries);
70/* Lock for the list above */
71static DEFINE_SPINLOCK(free_entries_lock);
72
73/* Global disable flag - will be set in case of an error */
74static bool global_disable __read_mostly;
75
Joerg Roedel788dcfa2009-01-09 13:13:27 +010076/* Global error count */
77static u32 error_count;
78
79/* Global error show enable*/
80static u32 show_all_errors __read_mostly;
81/* Number of errors to show */
82static u32 show_num_errors = 1;
83
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010084static u32 num_free_entries;
85static u32 min_free_entries;
Joerg Roedel30dfa902009-01-09 12:34:49 +010086
Joerg Roedel59d3daa2009-01-09 13:01:56 +010087/* number of preallocated entries requested by kernel cmdline */
88static u32 req_entries;
89
Joerg Roedel788dcfa2009-01-09 13:13:27 +010090/* debugfs dentry's for the stuff above */
91static struct dentry *dma_debug_dent __read_mostly;
92static struct dentry *global_disable_dent __read_mostly;
93static struct dentry *error_count_dent __read_mostly;
94static struct dentry *show_all_errors_dent __read_mostly;
95static struct dentry *show_num_errors_dent __read_mostly;
96static struct dentry *num_free_entries_dent __read_mostly;
97static struct dentry *min_free_entries_dent __read_mostly;
98
Joerg Roedel2d62ece2009-01-09 14:10:26 +010099static const char *type2name[4] = { "single", "page",
100 "scather-gather", "coherent" };
101
102static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
103 "DMA_FROM_DEVICE", "DMA_NONE" };
104
105/*
106 * The access to some variables in this macro is racy. We can't use atomic_t
107 * here because all these variables are exported to debugfs. Some of them even
108 * writeable. This is also the reason why a lock won't help much. But anyway,
109 * the races are no big deal. Here is why:
110 *
111 * error_count: the addition is racy, but the worst thing that can happen is
112 * that we don't count some errors
113 * show_num_errors: the subtraction is racy. Also no big deal because in
114 * worst case this will result in one warning more in the
115 * system log than the user configured. This variable is
116 * writeable via debugfs.
117 */
David Woodhouse6c132d12009-01-19 16:52:39 +0100118static inline void dump_entry_trace(struct dma_debug_entry *entry)
119{
120#ifdef CONFIG_STACKTRACE
121 if (entry) {
122 printk(KERN_WARNING "Mapped at:\n");
123 print_stack_trace(&entry->stacktrace, 0);
124 }
125#endif
126}
127
128#define err_printk(dev, entry, format, arg...) do { \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100129 error_count += 1; \
130 if (show_all_errors || show_num_errors > 0) { \
131 WARN(1, "%s %s: " format, \
132 dev_driver_string(dev), \
133 dev_name(dev) , ## arg); \
David Woodhouse6c132d12009-01-19 16:52:39 +0100134 dump_entry_trace(entry); \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100135 } \
136 if (!show_all_errors && show_num_errors > 0) \
137 show_num_errors -= 1; \
138 } while (0);
139
Joerg Roedel30dfa902009-01-09 12:34:49 +0100140/*
141 * Hash related functions
142 *
143 * Every DMA-API request is saved into a struct dma_debug_entry. To
144 * have quick access to these structs they are stored into a hash.
145 */
146static int hash_fn(struct dma_debug_entry *entry)
147{
148 /*
149 * Hash function is based on the dma address.
150 * We use bits 20-27 here as the index into the hash
151 */
152 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
153}
154
155/*
156 * Request exclusive access to a hash bucket for a given dma_debug_entry.
157 */
158static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
159 unsigned long *flags)
160{
161 int idx = hash_fn(entry);
162 unsigned long __flags;
163
164 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
165 *flags = __flags;
166 return &dma_entry_hash[idx];
167}
168
169/*
170 * Give up exclusive access to the hash bucket
171 */
172static void put_hash_bucket(struct hash_bucket *bucket,
173 unsigned long *flags)
174{
175 unsigned long __flags = *flags;
176
177 spin_unlock_irqrestore(&bucket->lock, __flags);
178}
179
180/*
181 * Search a given entry in the hash bucket list
182 */
183static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
184 struct dma_debug_entry *ref)
185{
186 struct dma_debug_entry *entry;
187
188 list_for_each_entry(entry, &bucket->list, list) {
189 if ((entry->dev_addr == ref->dev_addr) &&
190 (entry->dev == ref->dev))
191 return entry;
192 }
193
194 return NULL;
195}
196
197/*
198 * Add an entry to a hash bucket
199 */
200static void hash_bucket_add(struct hash_bucket *bucket,
201 struct dma_debug_entry *entry)
202{
203 list_add_tail(&entry->list, &bucket->list);
204}
205
206/*
207 * Remove entry from a hash bucket list
208 */
209static void hash_bucket_del(struct dma_debug_entry *entry)
210{
211 list_del(&entry->list);
212}
213
214/*
David Woodhouseac26c182009-02-12 16:19:13 +0100215 * Dump mapping entries for debugging purposes
216 */
217void debug_dma_dump_mappings(struct device *dev)
218{
219 int idx;
220
221 for (idx = 0; idx < HASH_SIZE; idx++) {
222 struct hash_bucket *bucket = &dma_entry_hash[idx];
223 struct dma_debug_entry *entry;
224 unsigned long flags;
225
226 spin_lock_irqsave(&bucket->lock, flags);
227
228 list_for_each_entry(entry, &bucket->list, list) {
229 if (!dev || dev == entry->dev) {
230 dev_info(entry->dev,
231 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
232 type2name[entry->type], idx,
233 (unsigned long long)entry->paddr,
234 entry->dev_addr, entry->size,
235 dir2name[entry->direction]);
236 }
237 }
238
239 spin_unlock_irqrestore(&bucket->lock, flags);
240 }
241}
242EXPORT_SYMBOL(debug_dma_dump_mappings);
243
244/*
Joerg Roedel30dfa902009-01-09 12:34:49 +0100245 * Wrapper function for adding an entry to the hash.
246 * This function takes care of locking itself.
247 */
248static void add_dma_entry(struct dma_debug_entry *entry)
249{
250 struct hash_bucket *bucket;
251 unsigned long flags;
252
253 bucket = get_hash_bucket(entry, &flags);
254 hash_bucket_add(bucket, entry);
255 put_hash_bucket(bucket, &flags);
256}
257
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100258/* struct dma_entry allocator
259 *
260 * The next two functions implement the allocator for
261 * struct dma_debug_entries.
262 */
263static struct dma_debug_entry *dma_entry_alloc(void)
264{
265 struct dma_debug_entry *entry = NULL;
266 unsigned long flags;
267
268 spin_lock_irqsave(&free_entries_lock, flags);
269
270 if (list_empty(&free_entries)) {
271 printk(KERN_ERR "DMA-API: debugging out of memory "
272 "- disabling\n");
273 global_disable = true;
274 goto out;
275 }
276
277 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
278 list_del(&entry->list);
279 memset(entry, 0, sizeof(*entry));
280
David Woodhouse6c132d12009-01-19 16:52:39 +0100281#ifdef CONFIG_STACKTRACE
282 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
283 entry->stacktrace.entries = entry->st_entries;
284 entry->stacktrace.skip = 2;
285 save_stack_trace(&entry->stacktrace);
286#endif
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100287 num_free_entries -= 1;
288 if (num_free_entries < min_free_entries)
289 min_free_entries = num_free_entries;
290
291out:
292 spin_unlock_irqrestore(&free_entries_lock, flags);
293
294 return entry;
295}
296
297static void dma_entry_free(struct dma_debug_entry *entry)
298{
299 unsigned long flags;
300
301 /*
302 * add to beginning of the list - this way the entries are
303 * more likely cache hot when they are reallocated.
304 */
305 spin_lock_irqsave(&free_entries_lock, flags);
306 list_add(&entry->list, &free_entries);
307 num_free_entries += 1;
308 spin_unlock_irqrestore(&free_entries_lock, flags);
309}
310
Joerg Roedel6bf07872009-01-09 12:54:42 +0100311/*
312 * DMA-API debugging init code
313 *
314 * The init code does two things:
315 * 1. Initialize core data structures
316 * 2. Preallocate a given number of dma_debug_entry structs
317 */
318
319static int prealloc_memory(u32 num_entries)
320{
321 struct dma_debug_entry *entry, *next_entry;
322 int i;
323
324 for (i = 0; i < num_entries; ++i) {
325 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
326 if (!entry)
327 goto out_err;
328
329 list_add_tail(&entry->list, &free_entries);
330 }
331
332 num_free_entries = num_entries;
333 min_free_entries = num_entries;
334
335 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
336 num_entries);
337
338 return 0;
339
340out_err:
341
342 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
343 list_del(&entry->list);
344 kfree(entry);
345 }
346
347 return -ENOMEM;
348}
349
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100350static int dma_debug_fs_init(void)
351{
352 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
353 if (!dma_debug_dent) {
354 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
355 return -ENOMEM;
356 }
357
358 global_disable_dent = debugfs_create_bool("disabled", 0444,
359 dma_debug_dent,
360 (u32 *)&global_disable);
361 if (!global_disable_dent)
362 goto out_err;
363
364 error_count_dent = debugfs_create_u32("error_count", 0444,
365 dma_debug_dent, &error_count);
366 if (!error_count_dent)
367 goto out_err;
368
369 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
370 dma_debug_dent,
371 &show_all_errors);
372 if (!show_all_errors_dent)
373 goto out_err;
374
375 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
376 dma_debug_dent,
377 &show_num_errors);
378 if (!show_num_errors_dent)
379 goto out_err;
380
381 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
382 dma_debug_dent,
383 &num_free_entries);
384 if (!num_free_entries_dent)
385 goto out_err;
386
387 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
388 dma_debug_dent,
389 &min_free_entries);
390 if (!min_free_entries_dent)
391 goto out_err;
392
393 return 0;
394
395out_err:
396 debugfs_remove_recursive(dma_debug_dent);
397
398 return -ENOMEM;
399}
400
401
Joerg Roedel6bf07872009-01-09 12:54:42 +0100402/*
403 * Let the architectures decide how many entries should be preallocated.
404 */
405void dma_debug_init(u32 num_entries)
406{
407 int i;
408
409 if (global_disable)
410 return;
411
412 for (i = 0; i < HASH_SIZE; ++i) {
413 INIT_LIST_HEAD(&dma_entry_hash[i].list);
414 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
415 }
416
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100417 if (dma_debug_fs_init() != 0) {
418 printk(KERN_ERR "DMA-API: error creating debugfs entries "
419 "- disabling\n");
420 global_disable = true;
421
422 return;
423 }
424
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100425 if (req_entries)
426 num_entries = req_entries;
427
Joerg Roedel6bf07872009-01-09 12:54:42 +0100428 if (prealloc_memory(num_entries) != 0) {
429 printk(KERN_ERR "DMA-API: debugging out of memory error "
430 "- disabled\n");
431 global_disable = true;
432
433 return;
434 }
435
436 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
437}
438
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100439static __init int dma_debug_cmdline(char *str)
440{
441 if (!str)
442 return -EINVAL;
443
444 if (strncmp(str, "off", 3) == 0) {
445 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
446 "command line\n");
447 global_disable = true;
448 }
449
450 return 0;
451}
452
453static __init int dma_debug_entries_cmdline(char *str)
454{
455 int res;
456
457 if (!str)
458 return -EINVAL;
459
460 res = get_option(&str, &req_entries);
461
462 if (!res)
463 req_entries = 0;
464
465 return 0;
466}
467
468__setup("dma_debug=", dma_debug_cmdline);
469__setup("dma_debug_entries=", dma_debug_entries_cmdline);
470
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100471static void check_unmap(struct dma_debug_entry *ref)
472{
473 struct dma_debug_entry *entry;
474 struct hash_bucket *bucket;
475 unsigned long flags;
476
477 if (dma_mapping_error(ref->dev, ref->dev_addr))
478 return;
479
480 bucket = get_hash_bucket(ref, &flags);
481 entry = hash_bucket_find(bucket, ref);
482
483 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100484 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100485 "to free DMA memory it has not allocated "
486 "[device address=0x%016llx] [size=%llu bytes]\n",
487 ref->dev_addr, ref->size);
488 goto out;
489 }
490
491 if (ref->size != entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100492 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100493 "DMA memory with different size "
494 "[device address=0x%016llx] [map size=%llu bytes] "
495 "[unmap size=%llu bytes]\n",
496 ref->dev_addr, entry->size, ref->size);
497 }
498
499 if (ref->type != entry->type) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100500 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100501 "DMA memory with wrong function "
502 "[device address=0x%016llx] [size=%llu bytes] "
503 "[mapped as %s] [unmapped as %s]\n",
504 ref->dev_addr, ref->size,
505 type2name[entry->type], type2name[ref->type]);
506 } else if ((entry->type == dma_debug_coherent) &&
507 (ref->paddr != entry->paddr)) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100508 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100509 "DMA memory with different CPU address "
510 "[device address=0x%016llx] [size=%llu bytes] "
511 "[cpu alloc address=%p] [cpu free address=%p]",
512 ref->dev_addr, ref->size,
513 (void *)entry->paddr, (void *)ref->paddr);
514 }
515
516 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
517 ref->sg_call_ents != entry->sg_call_ents) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100518 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100519 "DMA sg list with different entry count "
520 "[map count=%d] [unmap count=%d]\n",
521 entry->sg_call_ents, ref->sg_call_ents);
522 }
523
524 /*
525 * This may be no bug in reality - but most implementations of the
526 * DMA API don't handle this properly, so check for it here
527 */
528 if (ref->direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100529 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100530 "DMA memory with different direction "
531 "[device address=0x%016llx] [size=%llu bytes] "
532 "[mapped with %s] [unmapped with %s]\n",
533 ref->dev_addr, ref->size,
534 dir2name[entry->direction],
535 dir2name[ref->direction]);
536 }
537
538 hash_bucket_del(entry);
539 dma_entry_free(entry);
540
541out:
542 put_hash_bucket(bucket, &flags);
543}
544
545static void check_for_stack(struct device *dev, void *addr)
546{
547 if (object_is_on_stack(addr))
David Woodhouse6c132d12009-01-19 16:52:39 +0100548 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
549 "stack [addr=%p]\n", addr);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100550}
551
552static void check_sync(struct device *dev, dma_addr_t addr,
553 u64 size, u64 offset, int direction, bool to_cpu)
554{
555 struct dma_debug_entry ref = {
556 .dev = dev,
557 .dev_addr = addr,
558 .size = size,
559 .direction = direction,
560 };
561 struct dma_debug_entry *entry;
562 struct hash_bucket *bucket;
563 unsigned long flags;
564
565 bucket = get_hash_bucket(&ref, &flags);
566
567 entry = hash_bucket_find(bucket, &ref);
568
569 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100570 err_printk(dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100571 "to sync DMA memory it has not allocated "
572 "[device address=0x%016llx] [size=%llu bytes]\n",
573 addr, size);
574 goto out;
575 }
576
577 if ((offset + size) > entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100578 err_printk(dev, entry, "DMA-API: device driver syncs"
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100579 " DMA memory outside allocated range "
580 "[device address=0x%016llx] "
581 "[allocation size=%llu bytes] [sync offset=%llu] "
582 "[sync size=%llu]\n", entry->dev_addr, entry->size,
583 offset, size);
584 }
585
586 if (direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100587 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100588 "DMA memory with different direction "
589 "[device address=0x%016llx] [size=%llu bytes] "
590 "[mapped with %s] [synced with %s]\n",
591 addr, entry->size,
592 dir2name[entry->direction],
593 dir2name[direction]);
594 }
595
596 if (entry->direction == DMA_BIDIRECTIONAL)
597 goto out;
598
599 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
600 !(direction == DMA_TO_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +0100601 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100602 "device read-only DMA memory for cpu "
603 "[device address=0x%016llx] [size=%llu bytes] "
604 "[mapped with %s] [synced with %s]\n",
605 addr, entry->size,
606 dir2name[entry->direction],
607 dir2name[direction]);
608
609 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
610 !(direction == DMA_FROM_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +0100611 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100612 "device write-only DMA memory to device "
613 "[device address=0x%016llx] [size=%llu bytes] "
614 "[mapped with %s] [synced with %s]\n",
615 addr, entry->size,
616 dir2name[entry->direction],
617 dir2name[direction]);
618
619out:
620 put_hash_bucket(bucket, &flags);
621
622}
623
Joerg Roedelf62bc982009-01-09 14:14:49 +0100624void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
625 size_t size, int direction, dma_addr_t dma_addr,
626 bool map_single)
627{
628 struct dma_debug_entry *entry;
629
630 if (unlikely(global_disable))
631 return;
632
633 if (unlikely(dma_mapping_error(dev, dma_addr)))
634 return;
635
636 entry = dma_entry_alloc();
637 if (!entry)
638 return;
639
640 entry->dev = dev;
641 entry->type = dma_debug_page;
642 entry->paddr = page_to_phys(page) + offset;
643 entry->dev_addr = dma_addr;
644 entry->size = size;
645 entry->direction = direction;
646
647 if (map_single) {
648 entry->type = dma_debug_single;
649 check_for_stack(dev, page_address(page) + offset);
650 }
651
652 add_dma_entry(entry);
653}
654EXPORT_SYMBOL(debug_dma_map_page);
655
656void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
657 size_t size, int direction, bool map_single)
658{
659 struct dma_debug_entry ref = {
660 .type = dma_debug_page,
661 .dev = dev,
662 .dev_addr = addr,
663 .size = size,
664 .direction = direction,
665 };
666
667 if (unlikely(global_disable))
668 return;
669
670 if (map_single)
671 ref.type = dma_debug_single;
672
673 check_unmap(&ref);
674}
675EXPORT_SYMBOL(debug_dma_unmap_page);
676
Joerg Roedel972aa452009-01-09 14:19:54 +0100677void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
678 int nents, int mapped_ents, int direction)
679{
680 struct dma_debug_entry *entry;
681 struct scatterlist *s;
682 int i;
683
684 if (unlikely(global_disable))
685 return;
686
687 for_each_sg(sg, s, mapped_ents, i) {
688 entry = dma_entry_alloc();
689 if (!entry)
690 return;
691
692 entry->type = dma_debug_sg;
693 entry->dev = dev;
694 entry->paddr = sg_phys(s);
695 entry->size = s->length;
696 entry->dev_addr = s->dma_address;
697 entry->direction = direction;
698 entry->sg_call_ents = nents;
699 entry->sg_mapped_ents = mapped_ents;
700
701 check_for_stack(dev, sg_virt(s));
702
703 add_dma_entry(entry);
704 }
705}
706EXPORT_SYMBOL(debug_dma_map_sg);
707
708void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
709 int nelems, int dir)
710{
711 struct dma_debug_entry *entry;
712 struct scatterlist *s;
713 int mapped_ents = 0, i;
714 unsigned long flags;
715
716 if (unlikely(global_disable))
717 return;
718
719 for_each_sg(sglist, s, nelems, i) {
720
721 struct dma_debug_entry ref = {
722 .type = dma_debug_sg,
723 .dev = dev,
724 .paddr = sg_phys(s),
725 .dev_addr = s->dma_address,
726 .size = s->length,
727 .direction = dir,
728 .sg_call_ents = 0,
729 };
730
731 if (mapped_ents && i >= mapped_ents)
732 break;
733
734 if (mapped_ents == 0) {
735 struct hash_bucket *bucket;
736 ref.sg_call_ents = nelems;
737 bucket = get_hash_bucket(&ref, &flags);
738 entry = hash_bucket_find(bucket, &ref);
739 if (entry)
740 mapped_ents = entry->sg_mapped_ents;
741 put_hash_bucket(bucket, &flags);
742 }
743
744 check_unmap(&ref);
745 }
746}
747EXPORT_SYMBOL(debug_dma_unmap_sg);
748
Joerg Roedel6bfd4492009-01-09 14:38:50 +0100749void debug_dma_alloc_coherent(struct device *dev, size_t size,
750 dma_addr_t dma_addr, void *virt)
751{
752 struct dma_debug_entry *entry;
753
754 if (unlikely(global_disable))
755 return;
756
757 if (unlikely(virt == NULL))
758 return;
759
760 entry = dma_entry_alloc();
761 if (!entry)
762 return;
763
764 entry->type = dma_debug_coherent;
765 entry->dev = dev;
766 entry->paddr = virt_to_phys(virt);
767 entry->size = size;
768 entry->dev_addr = dma_addr;
769 entry->direction = DMA_BIDIRECTIONAL;
770
771 add_dma_entry(entry);
772}
773EXPORT_SYMBOL(debug_dma_alloc_coherent);
774
775void debug_dma_free_coherent(struct device *dev, size_t size,
776 void *virt, dma_addr_t addr)
777{
778 struct dma_debug_entry ref = {
779 .type = dma_debug_coherent,
780 .dev = dev,
781 .paddr = virt_to_phys(virt),
782 .dev_addr = addr,
783 .size = size,
784 .direction = DMA_BIDIRECTIONAL,
785 };
786
787 if (unlikely(global_disable))
788 return;
789
790 check_unmap(&ref);
791}
792EXPORT_SYMBOL(debug_dma_free_coherent);
793
Joerg Roedelb9d23172009-01-09 14:43:04 +0100794void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
795 size_t size, int direction)
796{
797 if (unlikely(global_disable))
798 return;
799
800 check_sync(dev, dma_handle, size, 0, direction, true);
801}
802EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
803
804void debug_dma_sync_single_for_device(struct device *dev,
805 dma_addr_t dma_handle, size_t size,
806 int direction)
807{
808 if (unlikely(global_disable))
809 return;
810
811 check_sync(dev, dma_handle, size, 0, direction, false);
812}
813EXPORT_SYMBOL(debug_dma_sync_single_for_device);
814
Joerg Roedel948408b2009-01-09 14:55:38 +0100815void debug_dma_sync_single_range_for_cpu(struct device *dev,
816 dma_addr_t dma_handle,
817 unsigned long offset, size_t size,
818 int direction)
819{
820 if (unlikely(global_disable))
821 return;
822
823 check_sync(dev, dma_handle, size, offset, direction, true);
824}
825EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
826
827void debug_dma_sync_single_range_for_device(struct device *dev,
828 dma_addr_t dma_handle,
829 unsigned long offset,
830 size_t size, int direction)
831{
832 if (unlikely(global_disable))
833 return;
834
835 check_sync(dev, dma_handle, size, offset, direction, false);
836}
837EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
838
Joerg Roedela31fba52009-01-09 15:01:12 +0100839void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
840 int nelems, int direction)
841{
842 struct scatterlist *s;
843 int i;
844
845 if (unlikely(global_disable))
846 return;
847
848 for_each_sg(sg, s, nelems, i) {
849 check_sync(dev, s->dma_address, s->dma_length, 0,
850 direction, true);
851 }
852}
853EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
854
855void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
856 int nelems, int direction)
857{
858 struct scatterlist *s;
859 int i;
860
861 if (unlikely(global_disable))
862 return;
863
864 for_each_sg(sg, s, nelems, i) {
865 check_sync(dev, s->dma_address, s->dma_length, 0,
866 direction, false);
867 }
868}
869EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
870