blob: 6022eb4a0cd0c4baad955406cf5e3fb84739fbc3 [file] [log] [blame]
Joerg Roedelf2f45e52009-01-09 12:19:52 +01001/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Joerg Roedel972aa452009-01-09 14:19:54 +010020#include <linux/scatterlist.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010021#include <linux/dma-mapping.h>
David Woodhouse6c132d12009-01-19 16:52:39 +010022#include <linux/stacktrace.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010023#include <linux/dma-debug.h>
Joerg Roedel30dfa902009-01-09 12:34:49 +010024#include <linux/spinlock.h>
Joerg Roedel788dcfa2009-01-09 13:13:27 +010025#include <linux/debugfs.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010026#include <linux/device.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010027#include <linux/types.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010028#include <linux/sched.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010029#include <linux/list.h>
Joerg Roedel6bf07872009-01-09 12:54:42 +010030#include <linux/slab.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010031
Joerg Roedel2e34bde2009-03-16 16:51:55 +010032#include <asm/sections.h>
33
Joerg Roedel30dfa902009-01-09 12:34:49 +010034#define HASH_SIZE 1024ULL
35#define HASH_FN_SHIFT 13
36#define HASH_FN_MASK (HASH_SIZE - 1)
37
Joerg Roedelf2f45e52009-01-09 12:19:52 +010038enum {
39 dma_debug_single,
40 dma_debug_page,
41 dma_debug_sg,
42 dma_debug_coherent,
43};
44
David Woodhouse6c132d12009-01-19 16:52:39 +010045#define DMA_DEBUG_STACKTRACE_ENTRIES 5
46
Joerg Roedelf2f45e52009-01-09 12:19:52 +010047struct dma_debug_entry {
48 struct list_head list;
49 struct device *dev;
50 int type;
51 phys_addr_t paddr;
52 u64 dev_addr;
53 u64 size;
54 int direction;
55 int sg_call_ents;
56 int sg_mapped_ents;
David Woodhouse6c132d12009-01-19 16:52:39 +010057#ifdef CONFIG_STACKTRACE
58 struct stack_trace stacktrace;
59 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
60#endif
Joerg Roedelf2f45e52009-01-09 12:19:52 +010061};
62
Joerg Roedel30dfa902009-01-09 12:34:49 +010063struct hash_bucket {
64 struct list_head list;
65 spinlock_t lock;
Joerg Roedel2d62ece2009-01-09 14:10:26 +010066} ____cacheline_aligned_in_smp;
Joerg Roedel30dfa902009-01-09 12:34:49 +010067
68/* Hash list to save the allocated dma addresses */
69static struct hash_bucket dma_entry_hash[HASH_SIZE];
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010070/* List of pre-allocated dma_debug_entry's */
71static LIST_HEAD(free_entries);
72/* Lock for the list above */
73static DEFINE_SPINLOCK(free_entries_lock);
74
75/* Global disable flag - will be set in case of an error */
76static bool global_disable __read_mostly;
77
Joerg Roedel788dcfa2009-01-09 13:13:27 +010078/* Global error count */
79static u32 error_count;
80
81/* Global error show enable*/
82static u32 show_all_errors __read_mostly;
83/* Number of errors to show */
84static u32 show_num_errors = 1;
85
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010086static u32 num_free_entries;
87static u32 min_free_entries;
Joerg Roedel30dfa902009-01-09 12:34:49 +010088
Joerg Roedel59d3daa2009-01-09 13:01:56 +010089/* number of preallocated entries requested by kernel cmdline */
90static u32 req_entries;
91
Joerg Roedel788dcfa2009-01-09 13:13:27 +010092/* debugfs dentry's for the stuff above */
93static struct dentry *dma_debug_dent __read_mostly;
94static struct dentry *global_disable_dent __read_mostly;
95static struct dentry *error_count_dent __read_mostly;
96static struct dentry *show_all_errors_dent __read_mostly;
97static struct dentry *show_num_errors_dent __read_mostly;
98static struct dentry *num_free_entries_dent __read_mostly;
99static struct dentry *min_free_entries_dent __read_mostly;
100
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100101static const char *type2name[4] = { "single", "page",
102 "scather-gather", "coherent" };
103
104static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
105 "DMA_FROM_DEVICE", "DMA_NONE" };
106
107/*
108 * The access to some variables in this macro is racy. We can't use atomic_t
109 * here because all these variables are exported to debugfs. Some of them even
110 * writeable. This is also the reason why a lock won't help much. But anyway,
111 * the races are no big deal. Here is why:
112 *
113 * error_count: the addition is racy, but the worst thing that can happen is
114 * that we don't count some errors
115 * show_num_errors: the subtraction is racy. Also no big deal because in
116 * worst case this will result in one warning more in the
117 * system log than the user configured. This variable is
118 * writeable via debugfs.
119 */
David Woodhouse6c132d12009-01-19 16:52:39 +0100120static inline void dump_entry_trace(struct dma_debug_entry *entry)
121{
122#ifdef CONFIG_STACKTRACE
123 if (entry) {
124 printk(KERN_WARNING "Mapped at:\n");
125 print_stack_trace(&entry->stacktrace, 0);
126 }
127#endif
128}
129
130#define err_printk(dev, entry, format, arg...) do { \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100131 error_count += 1; \
132 if (show_all_errors || show_num_errors > 0) { \
133 WARN(1, "%s %s: " format, \
134 dev_driver_string(dev), \
135 dev_name(dev) , ## arg); \
David Woodhouse6c132d12009-01-19 16:52:39 +0100136 dump_entry_trace(entry); \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100137 } \
138 if (!show_all_errors && show_num_errors > 0) \
139 show_num_errors -= 1; \
140 } while (0);
141
Joerg Roedel30dfa902009-01-09 12:34:49 +0100142/*
143 * Hash related functions
144 *
145 * Every DMA-API request is saved into a struct dma_debug_entry. To
146 * have quick access to these structs they are stored into a hash.
147 */
148static int hash_fn(struct dma_debug_entry *entry)
149{
150 /*
151 * Hash function is based on the dma address.
152 * We use bits 20-27 here as the index into the hash
153 */
154 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
155}
156
157/*
158 * Request exclusive access to a hash bucket for a given dma_debug_entry.
159 */
160static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
161 unsigned long *flags)
162{
163 int idx = hash_fn(entry);
164 unsigned long __flags;
165
166 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
167 *flags = __flags;
168 return &dma_entry_hash[idx];
169}
170
171/*
172 * Give up exclusive access to the hash bucket
173 */
174static void put_hash_bucket(struct hash_bucket *bucket,
175 unsigned long *flags)
176{
177 unsigned long __flags = *flags;
178
179 spin_unlock_irqrestore(&bucket->lock, __flags);
180}
181
182/*
183 * Search a given entry in the hash bucket list
184 */
185static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
186 struct dma_debug_entry *ref)
187{
188 struct dma_debug_entry *entry;
189
190 list_for_each_entry(entry, &bucket->list, list) {
191 if ((entry->dev_addr == ref->dev_addr) &&
192 (entry->dev == ref->dev))
193 return entry;
194 }
195
196 return NULL;
197}
198
199/*
200 * Add an entry to a hash bucket
201 */
202static void hash_bucket_add(struct hash_bucket *bucket,
203 struct dma_debug_entry *entry)
204{
205 list_add_tail(&entry->list, &bucket->list);
206}
207
208/*
209 * Remove entry from a hash bucket list
210 */
211static void hash_bucket_del(struct dma_debug_entry *entry)
212{
213 list_del(&entry->list);
214}
215
216/*
David Woodhouseac26c182009-02-12 16:19:13 +0100217 * Dump mapping entries for debugging purposes
218 */
219void debug_dma_dump_mappings(struct device *dev)
220{
221 int idx;
222
223 for (idx = 0; idx < HASH_SIZE; idx++) {
224 struct hash_bucket *bucket = &dma_entry_hash[idx];
225 struct dma_debug_entry *entry;
226 unsigned long flags;
227
228 spin_lock_irqsave(&bucket->lock, flags);
229
230 list_for_each_entry(entry, &bucket->list, list) {
231 if (!dev || dev == entry->dev) {
232 dev_info(entry->dev,
233 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
234 type2name[entry->type], idx,
235 (unsigned long long)entry->paddr,
236 entry->dev_addr, entry->size,
237 dir2name[entry->direction]);
238 }
239 }
240
241 spin_unlock_irqrestore(&bucket->lock, flags);
242 }
243}
244EXPORT_SYMBOL(debug_dma_dump_mappings);
245
246/*
Joerg Roedel30dfa902009-01-09 12:34:49 +0100247 * Wrapper function for adding an entry to the hash.
248 * This function takes care of locking itself.
249 */
250static void add_dma_entry(struct dma_debug_entry *entry)
251{
252 struct hash_bucket *bucket;
253 unsigned long flags;
254
255 bucket = get_hash_bucket(entry, &flags);
256 hash_bucket_add(bucket, entry);
257 put_hash_bucket(bucket, &flags);
258}
259
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100260/* struct dma_entry allocator
261 *
262 * The next two functions implement the allocator for
263 * struct dma_debug_entries.
264 */
265static struct dma_debug_entry *dma_entry_alloc(void)
266{
267 struct dma_debug_entry *entry = NULL;
268 unsigned long flags;
269
270 spin_lock_irqsave(&free_entries_lock, flags);
271
272 if (list_empty(&free_entries)) {
273 printk(KERN_ERR "DMA-API: debugging out of memory "
274 "- disabling\n");
275 global_disable = true;
276 goto out;
277 }
278
279 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
280 list_del(&entry->list);
281 memset(entry, 0, sizeof(*entry));
282
David Woodhouse6c132d12009-01-19 16:52:39 +0100283#ifdef CONFIG_STACKTRACE
284 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
285 entry->stacktrace.entries = entry->st_entries;
286 entry->stacktrace.skip = 2;
287 save_stack_trace(&entry->stacktrace);
288#endif
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100289 num_free_entries -= 1;
290 if (num_free_entries < min_free_entries)
291 min_free_entries = num_free_entries;
292
293out:
294 spin_unlock_irqrestore(&free_entries_lock, flags);
295
296 return entry;
297}
298
299static void dma_entry_free(struct dma_debug_entry *entry)
300{
301 unsigned long flags;
302
303 /*
304 * add to beginning of the list - this way the entries are
305 * more likely cache hot when they are reallocated.
306 */
307 spin_lock_irqsave(&free_entries_lock, flags);
308 list_add(&entry->list, &free_entries);
309 num_free_entries += 1;
310 spin_unlock_irqrestore(&free_entries_lock, flags);
311}
312
Joerg Roedel6bf07872009-01-09 12:54:42 +0100313/*
314 * DMA-API debugging init code
315 *
316 * The init code does two things:
317 * 1. Initialize core data structures
318 * 2. Preallocate a given number of dma_debug_entry structs
319 */
320
321static int prealloc_memory(u32 num_entries)
322{
323 struct dma_debug_entry *entry, *next_entry;
324 int i;
325
326 for (i = 0; i < num_entries; ++i) {
327 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
328 if (!entry)
329 goto out_err;
330
331 list_add_tail(&entry->list, &free_entries);
332 }
333
334 num_free_entries = num_entries;
335 min_free_entries = num_entries;
336
337 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
338 num_entries);
339
340 return 0;
341
342out_err:
343
344 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
345 list_del(&entry->list);
346 kfree(entry);
347 }
348
349 return -ENOMEM;
350}
351
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100352static int dma_debug_fs_init(void)
353{
354 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
355 if (!dma_debug_dent) {
356 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
357 return -ENOMEM;
358 }
359
360 global_disable_dent = debugfs_create_bool("disabled", 0444,
361 dma_debug_dent,
362 (u32 *)&global_disable);
363 if (!global_disable_dent)
364 goto out_err;
365
366 error_count_dent = debugfs_create_u32("error_count", 0444,
367 dma_debug_dent, &error_count);
368 if (!error_count_dent)
369 goto out_err;
370
371 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
372 dma_debug_dent,
373 &show_all_errors);
374 if (!show_all_errors_dent)
375 goto out_err;
376
377 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
378 dma_debug_dent,
379 &show_num_errors);
380 if (!show_num_errors_dent)
381 goto out_err;
382
383 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
384 dma_debug_dent,
385 &num_free_entries);
386 if (!num_free_entries_dent)
387 goto out_err;
388
389 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
390 dma_debug_dent,
391 &min_free_entries);
392 if (!min_free_entries_dent)
393 goto out_err;
394
395 return 0;
396
397out_err:
398 debugfs_remove_recursive(dma_debug_dent);
399
400 return -ENOMEM;
401}
402
403
Joerg Roedel6bf07872009-01-09 12:54:42 +0100404/*
405 * Let the architectures decide how many entries should be preallocated.
406 */
407void dma_debug_init(u32 num_entries)
408{
409 int i;
410
411 if (global_disable)
412 return;
413
414 for (i = 0; i < HASH_SIZE; ++i) {
415 INIT_LIST_HEAD(&dma_entry_hash[i].list);
416 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
417 }
418
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100419 if (dma_debug_fs_init() != 0) {
420 printk(KERN_ERR "DMA-API: error creating debugfs entries "
421 "- disabling\n");
422 global_disable = true;
423
424 return;
425 }
426
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100427 if (req_entries)
428 num_entries = req_entries;
429
Joerg Roedel6bf07872009-01-09 12:54:42 +0100430 if (prealloc_memory(num_entries) != 0) {
431 printk(KERN_ERR "DMA-API: debugging out of memory error "
432 "- disabled\n");
433 global_disable = true;
434
435 return;
436 }
437
438 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
439}
440
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100441static __init int dma_debug_cmdline(char *str)
442{
443 if (!str)
444 return -EINVAL;
445
446 if (strncmp(str, "off", 3) == 0) {
447 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
448 "command line\n");
449 global_disable = true;
450 }
451
452 return 0;
453}
454
455static __init int dma_debug_entries_cmdline(char *str)
456{
457 int res;
458
459 if (!str)
460 return -EINVAL;
461
462 res = get_option(&str, &req_entries);
463
464 if (!res)
465 req_entries = 0;
466
467 return 0;
468}
469
470__setup("dma_debug=", dma_debug_cmdline);
471__setup("dma_debug_entries=", dma_debug_entries_cmdline);
472
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100473static void check_unmap(struct dma_debug_entry *ref)
474{
475 struct dma_debug_entry *entry;
476 struct hash_bucket *bucket;
477 unsigned long flags;
478
479 if (dma_mapping_error(ref->dev, ref->dev_addr))
480 return;
481
482 bucket = get_hash_bucket(ref, &flags);
483 entry = hash_bucket_find(bucket, ref);
484
485 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100486 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100487 "to free DMA memory it has not allocated "
488 "[device address=0x%016llx] [size=%llu bytes]\n",
489 ref->dev_addr, ref->size);
490 goto out;
491 }
492
493 if (ref->size != entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100494 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100495 "DMA memory with different size "
496 "[device address=0x%016llx] [map size=%llu bytes] "
497 "[unmap size=%llu bytes]\n",
498 ref->dev_addr, entry->size, ref->size);
499 }
500
501 if (ref->type != entry->type) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100502 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100503 "DMA memory with wrong function "
504 "[device address=0x%016llx] [size=%llu bytes] "
505 "[mapped as %s] [unmapped as %s]\n",
506 ref->dev_addr, ref->size,
507 type2name[entry->type], type2name[ref->type]);
508 } else if ((entry->type == dma_debug_coherent) &&
509 (ref->paddr != entry->paddr)) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100510 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100511 "DMA memory with different CPU address "
512 "[device address=0x%016llx] [size=%llu bytes] "
513 "[cpu alloc address=%p] [cpu free address=%p]",
514 ref->dev_addr, ref->size,
515 (void *)entry->paddr, (void *)ref->paddr);
516 }
517
518 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
519 ref->sg_call_ents != entry->sg_call_ents) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100520 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100521 "DMA sg list with different entry count "
522 "[map count=%d] [unmap count=%d]\n",
523 entry->sg_call_ents, ref->sg_call_ents);
524 }
525
526 /*
527 * This may be no bug in reality - but most implementations of the
528 * DMA API don't handle this properly, so check for it here
529 */
530 if (ref->direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100531 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100532 "DMA memory with different direction "
533 "[device address=0x%016llx] [size=%llu bytes] "
534 "[mapped with %s] [unmapped with %s]\n",
535 ref->dev_addr, ref->size,
536 dir2name[entry->direction],
537 dir2name[ref->direction]);
538 }
539
540 hash_bucket_del(entry);
541 dma_entry_free(entry);
542
543out:
544 put_hash_bucket(bucket, &flags);
545}
546
547static void check_for_stack(struct device *dev, void *addr)
548{
549 if (object_is_on_stack(addr))
David Woodhouse6c132d12009-01-19 16:52:39 +0100550 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
551 "stack [addr=%p]\n", addr);
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100552}
553
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100554static inline bool overlap(void *addr, u64 size, void *start, void *end)
555{
556 void *addr2 = (char *)addr + size;
557
558 return ((addr >= start && addr < end) ||
559 (addr2 >= start && addr2 < end) ||
560 ((addr < start) && (addr2 >= end)));
561}
562
563static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
564{
565 if (overlap(addr, size, _text, _etext) ||
566 overlap(addr, size, __start_rodata, __end_rodata))
567 err_printk(dev, NULL, "DMA-API: device driver maps "
568 "memory from kernel text or rodata "
569 "[addr=%p] [size=%llu]\n", addr, size);
570}
571
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100572static void check_sync(struct device *dev, dma_addr_t addr,
573 u64 size, u64 offset, int direction, bool to_cpu)
574{
575 struct dma_debug_entry ref = {
576 .dev = dev,
577 .dev_addr = addr,
578 .size = size,
579 .direction = direction,
580 };
581 struct dma_debug_entry *entry;
582 struct hash_bucket *bucket;
583 unsigned long flags;
584
585 bucket = get_hash_bucket(&ref, &flags);
586
587 entry = hash_bucket_find(bucket, &ref);
588
589 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100590 err_printk(dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100591 "to sync DMA memory it has not allocated "
592 "[device address=0x%016llx] [size=%llu bytes]\n",
593 addr, size);
594 goto out;
595 }
596
597 if ((offset + size) > entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100598 err_printk(dev, entry, "DMA-API: device driver syncs"
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100599 " DMA memory outside allocated range "
600 "[device address=0x%016llx] "
601 "[allocation size=%llu bytes] [sync offset=%llu] "
602 "[sync size=%llu]\n", entry->dev_addr, entry->size,
603 offset, size);
604 }
605
606 if (direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +0100607 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100608 "DMA memory with different direction "
609 "[device address=0x%016llx] [size=%llu bytes] "
610 "[mapped with %s] [synced with %s]\n",
611 addr, entry->size,
612 dir2name[entry->direction],
613 dir2name[direction]);
614 }
615
616 if (entry->direction == DMA_BIDIRECTIONAL)
617 goto out;
618
619 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
620 !(direction == DMA_TO_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +0100621 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100622 "device read-only DMA memory for cpu "
623 "[device address=0x%016llx] [size=%llu bytes] "
624 "[mapped with %s] [synced with %s]\n",
625 addr, entry->size,
626 dir2name[entry->direction],
627 dir2name[direction]);
628
629 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
630 !(direction == DMA_FROM_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +0100631 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100632 "device write-only DMA memory to device "
633 "[device address=0x%016llx] [size=%llu bytes] "
634 "[mapped with %s] [synced with %s]\n",
635 addr, entry->size,
636 dir2name[entry->direction],
637 dir2name[direction]);
638
639out:
640 put_hash_bucket(bucket, &flags);
641
642}
643
Joerg Roedelf62bc982009-01-09 14:14:49 +0100644void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
645 size_t size, int direction, dma_addr_t dma_addr,
646 bool map_single)
647{
648 struct dma_debug_entry *entry;
649
650 if (unlikely(global_disable))
651 return;
652
653 if (unlikely(dma_mapping_error(dev, dma_addr)))
654 return;
655
656 entry = dma_entry_alloc();
657 if (!entry)
658 return;
659
660 entry->dev = dev;
661 entry->type = dma_debug_page;
662 entry->paddr = page_to_phys(page) + offset;
663 entry->dev_addr = dma_addr;
664 entry->size = size;
665 entry->direction = direction;
666
667 if (map_single) {
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100668 void *addr = ((char *)page_address(page)) + offset;
669
Joerg Roedelf62bc982009-01-09 14:14:49 +0100670 entry->type = dma_debug_single;
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100671 check_for_stack(dev, addr);
672 check_for_illegal_area(dev, addr, size);
Joerg Roedelf62bc982009-01-09 14:14:49 +0100673 }
674
675 add_dma_entry(entry);
676}
677EXPORT_SYMBOL(debug_dma_map_page);
678
679void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
680 size_t size, int direction, bool map_single)
681{
682 struct dma_debug_entry ref = {
683 .type = dma_debug_page,
684 .dev = dev,
685 .dev_addr = addr,
686 .size = size,
687 .direction = direction,
688 };
689
690 if (unlikely(global_disable))
691 return;
692
693 if (map_single)
694 ref.type = dma_debug_single;
695
696 check_unmap(&ref);
697}
698EXPORT_SYMBOL(debug_dma_unmap_page);
699
Joerg Roedel972aa452009-01-09 14:19:54 +0100700void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
701 int nents, int mapped_ents, int direction)
702{
703 struct dma_debug_entry *entry;
704 struct scatterlist *s;
705 int i;
706
707 if (unlikely(global_disable))
708 return;
709
710 for_each_sg(sg, s, mapped_ents, i) {
711 entry = dma_entry_alloc();
712 if (!entry)
713 return;
714
715 entry->type = dma_debug_sg;
716 entry->dev = dev;
717 entry->paddr = sg_phys(s);
718 entry->size = s->length;
719 entry->dev_addr = s->dma_address;
720 entry->direction = direction;
721 entry->sg_call_ents = nents;
722 entry->sg_mapped_ents = mapped_ents;
723
724 check_for_stack(dev, sg_virt(s));
Joerg Roedel2e34bde2009-03-16 16:51:55 +0100725 check_for_illegal_area(dev, sg_virt(s), s->length);
Joerg Roedel972aa452009-01-09 14:19:54 +0100726
727 add_dma_entry(entry);
728 }
729}
730EXPORT_SYMBOL(debug_dma_map_sg);
731
732void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
733 int nelems, int dir)
734{
735 struct dma_debug_entry *entry;
736 struct scatterlist *s;
737 int mapped_ents = 0, i;
738 unsigned long flags;
739
740 if (unlikely(global_disable))
741 return;
742
743 for_each_sg(sglist, s, nelems, i) {
744
745 struct dma_debug_entry ref = {
746 .type = dma_debug_sg,
747 .dev = dev,
748 .paddr = sg_phys(s),
749 .dev_addr = s->dma_address,
750 .size = s->length,
751 .direction = dir,
752 .sg_call_ents = 0,
753 };
754
755 if (mapped_ents && i >= mapped_ents)
756 break;
757
758 if (mapped_ents == 0) {
759 struct hash_bucket *bucket;
760 ref.sg_call_ents = nelems;
761 bucket = get_hash_bucket(&ref, &flags);
762 entry = hash_bucket_find(bucket, &ref);
763 if (entry)
764 mapped_ents = entry->sg_mapped_ents;
765 put_hash_bucket(bucket, &flags);
766 }
767
768 check_unmap(&ref);
769 }
770}
771EXPORT_SYMBOL(debug_dma_unmap_sg);
772
Joerg Roedel6bfd4492009-01-09 14:38:50 +0100773void debug_dma_alloc_coherent(struct device *dev, size_t size,
774 dma_addr_t dma_addr, void *virt)
775{
776 struct dma_debug_entry *entry;
777
778 if (unlikely(global_disable))
779 return;
780
781 if (unlikely(virt == NULL))
782 return;
783
784 entry = dma_entry_alloc();
785 if (!entry)
786 return;
787
788 entry->type = dma_debug_coherent;
789 entry->dev = dev;
790 entry->paddr = virt_to_phys(virt);
791 entry->size = size;
792 entry->dev_addr = dma_addr;
793 entry->direction = DMA_BIDIRECTIONAL;
794
795 add_dma_entry(entry);
796}
797EXPORT_SYMBOL(debug_dma_alloc_coherent);
798
799void debug_dma_free_coherent(struct device *dev, size_t size,
800 void *virt, dma_addr_t addr)
801{
802 struct dma_debug_entry ref = {
803 .type = dma_debug_coherent,
804 .dev = dev,
805 .paddr = virt_to_phys(virt),
806 .dev_addr = addr,
807 .size = size,
808 .direction = DMA_BIDIRECTIONAL,
809 };
810
811 if (unlikely(global_disable))
812 return;
813
814 check_unmap(&ref);
815}
816EXPORT_SYMBOL(debug_dma_free_coherent);
817
Joerg Roedelb9d23172009-01-09 14:43:04 +0100818void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
819 size_t size, int direction)
820{
821 if (unlikely(global_disable))
822 return;
823
824 check_sync(dev, dma_handle, size, 0, direction, true);
825}
826EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
827
828void debug_dma_sync_single_for_device(struct device *dev,
829 dma_addr_t dma_handle, size_t size,
830 int direction)
831{
832 if (unlikely(global_disable))
833 return;
834
835 check_sync(dev, dma_handle, size, 0, direction, false);
836}
837EXPORT_SYMBOL(debug_dma_sync_single_for_device);
838
Joerg Roedel948408b2009-01-09 14:55:38 +0100839void debug_dma_sync_single_range_for_cpu(struct device *dev,
840 dma_addr_t dma_handle,
841 unsigned long offset, size_t size,
842 int direction)
843{
844 if (unlikely(global_disable))
845 return;
846
847 check_sync(dev, dma_handle, size, offset, direction, true);
848}
849EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
850
851void debug_dma_sync_single_range_for_device(struct device *dev,
852 dma_addr_t dma_handle,
853 unsigned long offset,
854 size_t size, int direction)
855{
856 if (unlikely(global_disable))
857 return;
858
859 check_sync(dev, dma_handle, size, offset, direction, false);
860}
861EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
862
Joerg Roedela31fba52009-01-09 15:01:12 +0100863void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
864 int nelems, int direction)
865{
866 struct scatterlist *s;
867 int i;
868
869 if (unlikely(global_disable))
870 return;
871
872 for_each_sg(sg, s, nelems, i) {
873 check_sync(dev, s->dma_address, s->dma_length, 0,
874 direction, true);
875 }
876}
877EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
878
879void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
880 int nelems, int direction)
881{
882 struct scatterlist *s;
883 int i;
884
885 if (unlikely(global_disable))
886 return;
887
888 for_each_sg(sg, s, nelems, i) {
889 check_sync(dev, s->dma_address, s->dma_length, 0,
890 direction, false);
891 }
892}
893EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
894