blob: 98f2d7e91a9114e5e124e42bef00d7935708e521 [file] [log] [blame]
Joerg Roedelf2f45e52009-01-09 12:19:52 +01001/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Joerg Roedel972aa452009-01-09 14:19:54 +010020#include <linux/scatterlist.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010021#include <linux/dma-mapping.h>
David Woodhouse6c132d12009-01-19 16:52:39 +010022#include <linux/stacktrace.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010023#include <linux/dma-debug.h>
Joerg Roedel30dfa902009-01-09 12:34:49 +010024#include <linux/spinlock.h>
Joerg Roedel788dcfa2009-01-09 13:13:27 +010025#include <linux/debugfs.h>
Joerg Roedel8a6fc702009-05-22 21:23:13 +020026#include <linux/uaccess.h>
Paul Gortmaker23a7bfa2011-07-01 16:23:59 -040027#include <linux/export.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010028#include <linux/device.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010029#include <linux/types.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010030#include <linux/sched.h>
Joerg Roedel8a6fc702009-05-22 21:23:13 +020031#include <linux/ctype.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010032#include <linux/list.h>
Joerg Roedel6bf07872009-01-09 12:54:42 +010033#include <linux/slab.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010034
Joerg Roedel2e34bde2009-03-16 16:51:55 +010035#include <asm/sections.h>
36
Joerg Roedel30dfa902009-01-09 12:34:49 +010037#define HASH_SIZE 1024ULL
38#define HASH_FN_SHIFT 13
39#define HASH_FN_MASK (HASH_SIZE - 1)
40
Joerg Roedelf2f45e52009-01-09 12:19:52 +010041enum {
42 dma_debug_single,
43 dma_debug_page,
44 dma_debug_sg,
45 dma_debug_coherent,
46};
47
Shuah Khan6c9c6d62012-10-08 11:08:06 -060048enum map_err_types {
49 MAP_ERR_CHECK_NOT_APPLICABLE,
50 MAP_ERR_NOT_CHECKED,
51 MAP_ERR_CHECKED,
52};
53
David Woodhouse6c132d12009-01-19 16:52:39 +010054#define DMA_DEBUG_STACKTRACE_ENTRIES 5
55
Dan Williams0abdd7a2014-01-21 15:48:12 -080056/**
57 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
58 * @list: node on pre-allocated free_entries list
59 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
60 * @type: single, page, sg, coherent
61 * @pfn: page frame of the start address
62 * @offset: offset of mapping relative to pfn
63 * @size: length of the mapping
64 * @direction: enum dma_data_direction
65 * @sg_call_ents: 'nents' from dma_map_sg
66 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
67 * @map_err_type: track whether dma_mapping_error() was checked
68 * @stacktrace: support backtraces when a violation is detected
69 */
Joerg Roedelf2f45e52009-01-09 12:19:52 +010070struct dma_debug_entry {
71 struct list_head list;
72 struct device *dev;
73 int type;
Dan Williams0abdd7a2014-01-21 15:48:12 -080074 unsigned long pfn;
75 size_t offset;
Joerg Roedelf2f45e52009-01-09 12:19:52 +010076 u64 dev_addr;
77 u64 size;
78 int direction;
79 int sg_call_ents;
80 int sg_mapped_ents;
Shuah Khan6c9c6d62012-10-08 11:08:06 -060081 enum map_err_types map_err_type;
David Woodhouse6c132d12009-01-19 16:52:39 +010082#ifdef CONFIG_STACKTRACE
83 struct stack_trace stacktrace;
84 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
85#endif
Joerg Roedelf2f45e52009-01-09 12:19:52 +010086};
87
Neil Hormanc6a21d02011-08-08 15:13:54 -040088typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
89
Joerg Roedel30dfa902009-01-09 12:34:49 +010090struct hash_bucket {
91 struct list_head list;
92 spinlock_t lock;
Joerg Roedel2d62ece2009-01-09 14:10:26 +010093} ____cacheline_aligned_in_smp;
Joerg Roedel30dfa902009-01-09 12:34:49 +010094
95/* Hash list to save the allocated dma addresses */
96static struct hash_bucket dma_entry_hash[HASH_SIZE];
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010097/* List of pre-allocated dma_debug_entry's */
98static LIST_HEAD(free_entries);
99/* Lock for the list above */
100static DEFINE_SPINLOCK(free_entries_lock);
101
102/* Global disable flag - will be set in case of an error */
Dan Carpenter68ee6d22012-06-27 12:08:55 +0300103static u32 global_disable __read_mostly;
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100104
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100105/* Global error count */
106static u32 error_count;
107
108/* Global error show enable*/
109static u32 show_all_errors __read_mostly;
110/* Number of errors to show */
111static u32 show_num_errors = 1;
112
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100113static u32 num_free_entries;
114static u32 min_free_entries;
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900115static u32 nr_total_entries;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100116
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100117/* number of preallocated entries requested by kernel cmdline */
118static u32 req_entries;
119
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100120/* debugfs dentry's for the stuff above */
121static struct dentry *dma_debug_dent __read_mostly;
122static struct dentry *global_disable_dent __read_mostly;
123static struct dentry *error_count_dent __read_mostly;
124static struct dentry *show_all_errors_dent __read_mostly;
125static struct dentry *show_num_errors_dent __read_mostly;
126static struct dentry *num_free_entries_dent __read_mostly;
127static struct dentry *min_free_entries_dent __read_mostly;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200128static struct dentry *filter_dent __read_mostly;
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100129
Joerg Roedel2e507d82009-05-22 18:24:20 +0200130/* per-driver filter related state */
131
132#define NAME_MAX_LEN 64
133
134static char current_driver_name[NAME_MAX_LEN] __read_mostly;
135static struct device_driver *current_driver __read_mostly;
136
137static DEFINE_RWLOCK(driver_name_lock);
Joerg Roedel30dfa902009-01-09 12:34:49 +0100138
Shuah Khan6c9c6d62012-10-08 11:08:06 -0600139static const char *const maperr2str[] = {
140 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
141 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
142 [MAP_ERR_CHECKED] = "dma map error checked",
143};
144
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100145static const char *type2name[4] = { "single", "page",
146 "scather-gather", "coherent" };
147
148static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
149 "DMA_FROM_DEVICE", "DMA_NONE" };
150
151/*
152 * The access to some variables in this macro is racy. We can't use atomic_t
153 * here because all these variables are exported to debugfs. Some of them even
154 * writeable. This is also the reason why a lock won't help much. But anyway,
155 * the races are no big deal. Here is why:
156 *
157 * error_count: the addition is racy, but the worst thing that can happen is
158 * that we don't count some errors
159 * show_num_errors: the subtraction is racy. Also no big deal because in
160 * worst case this will result in one warning more in the
161 * system log than the user configured. This variable is
162 * writeable via debugfs.
163 */
David Woodhouse6c132d12009-01-19 16:52:39 +0100164static inline void dump_entry_trace(struct dma_debug_entry *entry)
165{
166#ifdef CONFIG_STACKTRACE
167 if (entry) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200168 pr_warning("Mapped at:\n");
David Woodhouse6c132d12009-01-19 16:52:39 +0100169 print_stack_trace(&entry->stacktrace, 0);
170 }
171#endif
172}
173
Joerg Roedel2e507d82009-05-22 18:24:20 +0200174static bool driver_filter(struct device *dev)
175{
Joerg Roedel0bf84122009-06-08 15:53:46 +0200176 struct device_driver *drv;
177 unsigned long flags;
178 bool ret;
179
Joerg Roedel2e507d82009-05-22 18:24:20 +0200180 /* driver filter off */
181 if (likely(!current_driver_name[0]))
182 return true;
183
184 /* driver filter on and initialized */
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400185 if (current_driver && dev && dev->driver == current_driver)
Joerg Roedel2e507d82009-05-22 18:24:20 +0200186 return true;
187
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400188 /* driver filter on, but we can't filter on a NULL device... */
189 if (!dev)
190 return false;
191
Joerg Roedel0bf84122009-06-08 15:53:46 +0200192 if (current_driver || !current_driver_name[0])
193 return false;
194
Joerg Roedel2e507d82009-05-22 18:24:20 +0200195 /* driver filter on but not yet initialized */
Alan Sternf3ff9242012-01-24 13:35:24 -0500196 drv = dev->driver;
Joerg Roedel0bf84122009-06-08 15:53:46 +0200197 if (!drv)
198 return false;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200199
Joerg Roedel0bf84122009-06-08 15:53:46 +0200200 /* lock to protect against change of current_driver_name */
201 read_lock_irqsave(&driver_name_lock, flags);
Joerg Roedel2e507d82009-05-22 18:24:20 +0200202
Joerg Roedel0bf84122009-06-08 15:53:46 +0200203 ret = false;
204 if (drv->name &&
205 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
206 current_driver = drv;
207 ret = true;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200208 }
209
Joerg Roedel0bf84122009-06-08 15:53:46 +0200210 read_unlock_irqrestore(&driver_name_lock, flags);
Joerg Roedel0bf84122009-06-08 15:53:46 +0200211
212 return ret;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200213}
214
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400215#define err_printk(dev, entry, format, arg...) do { \
216 error_count += 1; \
217 if (driver_filter(dev) && \
218 (show_all_errors || show_num_errors > 0)) { \
219 WARN(1, "%s %s: " format, \
220 dev ? dev_driver_string(dev) : "NULL", \
221 dev ? dev_name(dev) : "NULL", ## arg); \
222 dump_entry_trace(entry); \
223 } \
224 if (!show_all_errors && show_num_errors > 0) \
225 show_num_errors -= 1; \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100226 } while (0);
227
Joerg Roedel30dfa902009-01-09 12:34:49 +0100228/*
229 * Hash related functions
230 *
231 * Every DMA-API request is saved into a struct dma_debug_entry. To
232 * have quick access to these structs they are stored into a hash.
233 */
234static int hash_fn(struct dma_debug_entry *entry)
235{
236 /*
237 * Hash function is based on the dma address.
238 * We use bits 20-27 here as the index into the hash
239 */
240 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
241}
242
243/*
244 * Request exclusive access to a hash bucket for a given dma_debug_entry.
245 */
246static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
247 unsigned long *flags)
248{
249 int idx = hash_fn(entry);
250 unsigned long __flags;
251
252 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
253 *flags = __flags;
254 return &dma_entry_hash[idx];
255}
256
257/*
258 * Give up exclusive access to the hash bucket
259 */
260static void put_hash_bucket(struct hash_bucket *bucket,
261 unsigned long *flags)
262{
263 unsigned long __flags = *flags;
264
265 spin_unlock_irqrestore(&bucket->lock, __flags);
266}
267
Neil Hormanc6a21d02011-08-08 15:13:54 -0400268static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
269{
Thomas Jarosch91ec37c2011-11-17 20:31:02 +0100270 return ((a->dev_addr == b->dev_addr) &&
Neil Hormanc6a21d02011-08-08 15:13:54 -0400271 (a->dev == b->dev)) ? true : false;
272}
273
274static bool containing_match(struct dma_debug_entry *a,
275 struct dma_debug_entry *b)
276{
277 if (a->dev != b->dev)
278 return false;
279
280 if ((b->dev_addr <= a->dev_addr) &&
281 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
282 return true;
283
284 return false;
285}
286
Joerg Roedel30dfa902009-01-09 12:34:49 +0100287/*
288 * Search a given entry in the hash bucket list
289 */
Neil Hormanc6a21d02011-08-08 15:13:54 -0400290static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
291 struct dma_debug_entry *ref,
292 match_fn match)
Joerg Roedel30dfa902009-01-09 12:34:49 +0100293{
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200294 struct dma_debug_entry *entry, *ret = NULL;
Ming Leife73fbe2012-10-19 13:57:01 -0700295 int matches = 0, match_lvl, last_lvl = -1;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100296
297 list_for_each_entry(entry, &bucket->list, list) {
Neil Hormanc6a21d02011-08-08 15:13:54 -0400298 if (!match(ref, entry))
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200299 continue;
300
301 /*
302 * Some drivers map the same physical address multiple
303 * times. Without a hardware IOMMU this results in the
304 * same device addresses being put into the dma-debug
305 * hash multiple times too. This can result in false
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200306 * positives being reported. Therefore we implement a
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200307 * best-fit algorithm here which returns the entry from
308 * the hash which fits best to the reference value
309 * instead of the first-fit.
310 */
311 matches += 1;
312 match_lvl = 0;
Joerg Roedele5e8c5b2009-06-11 10:03:42 +0200313 entry->size == ref->size ? ++match_lvl : 0;
314 entry->type == ref->type ? ++match_lvl : 0;
315 entry->direction == ref->direction ? ++match_lvl : 0;
316 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200317
Joerg Roedele5e8c5b2009-06-11 10:03:42 +0200318 if (match_lvl == 4) {
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200319 /* perfect-fit - return the result */
Joerg Roedel30dfa902009-01-09 12:34:49 +0100320 return entry;
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200321 } else if (match_lvl > last_lvl) {
322 /*
323 * We found an entry that fits better then the
Ming Leife73fbe2012-10-19 13:57:01 -0700324 * previous one or it is the 1st match.
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200325 */
326 last_lvl = match_lvl;
327 ret = entry;
328 }
Joerg Roedel30dfa902009-01-09 12:34:49 +0100329 }
330
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200331 /*
332 * If we have multiple matches but no perfect-fit, just return
333 * NULL.
334 */
335 ret = (matches == 1) ? ret : NULL;
336
337 return ret;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100338}
339
Neil Hormanc6a21d02011-08-08 15:13:54 -0400340static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
341 struct dma_debug_entry *ref)
342{
343 return __hash_bucket_find(bucket, ref, exact_match);
344}
345
346static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
347 struct dma_debug_entry *ref,
348 unsigned long *flags)
349{
350
351 unsigned int max_range = dma_get_max_seg_size(ref->dev);
352 struct dma_debug_entry *entry, index = *ref;
353 unsigned int range = 0;
354
355 while (range <= max_range) {
356 entry = __hash_bucket_find(*bucket, &index, containing_match);
357
358 if (entry)
359 return entry;
360
361 /*
362 * Nothing found, go back a hash bucket
363 */
364 put_hash_bucket(*bucket, flags);
365 range += (1 << HASH_FN_SHIFT);
366 index.dev_addr -= (1 << HASH_FN_SHIFT);
367 *bucket = get_hash_bucket(&index, flags);
368 }
369
370 return NULL;
371}
372
Joerg Roedel30dfa902009-01-09 12:34:49 +0100373/*
374 * Add an entry to a hash bucket
375 */
376static void hash_bucket_add(struct hash_bucket *bucket,
377 struct dma_debug_entry *entry)
378{
379 list_add_tail(&entry->list, &bucket->list);
380}
381
382/*
383 * Remove entry from a hash bucket list
384 */
385static void hash_bucket_del(struct dma_debug_entry *entry)
386{
387 list_del(&entry->list);
388}
389
Dan Williams0abdd7a2014-01-21 15:48:12 -0800390static unsigned long long phys_addr(struct dma_debug_entry *entry)
391{
392 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
393}
394
Joerg Roedel30dfa902009-01-09 12:34:49 +0100395/*
David Woodhouseac26c182009-02-12 16:19:13 +0100396 * Dump mapping entries for debugging purposes
397 */
398void debug_dma_dump_mappings(struct device *dev)
399{
400 int idx;
401
402 for (idx = 0; idx < HASH_SIZE; idx++) {
403 struct hash_bucket *bucket = &dma_entry_hash[idx];
404 struct dma_debug_entry *entry;
405 unsigned long flags;
406
407 spin_lock_irqsave(&bucket->lock, flags);
408
409 list_for_each_entry(entry, &bucket->list, list) {
410 if (!dev || dev == entry->dev) {
411 dev_info(entry->dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -0800412 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
David Woodhouseac26c182009-02-12 16:19:13 +0100413 type2name[entry->type], idx,
Dan Williams0abdd7a2014-01-21 15:48:12 -0800414 phys_addr(entry), entry->pfn,
David Woodhouseac26c182009-02-12 16:19:13 +0100415 entry->dev_addr, entry->size,
Shuah Khan6c9c6d62012-10-08 11:08:06 -0600416 dir2name[entry->direction],
417 maperr2str[entry->map_err_type]);
David Woodhouseac26c182009-02-12 16:19:13 +0100418 }
419 }
420
421 spin_unlock_irqrestore(&bucket->lock, flags);
422 }
423}
424EXPORT_SYMBOL(debug_dma_dump_mappings);
425
426/*
Dan Williams3b7a6412014-03-03 15:38:21 -0800427 * For each mapping (initial cacheline in the case of
428 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
429 * scatterlist, or the cacheline specified in dma_map_single) insert
430 * into this tree using the cacheline as the key. At
Dan Williams0abdd7a2014-01-21 15:48:12 -0800431 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
Dan Williams3b7a6412014-03-03 15:38:21 -0800432 * the entry already exists at insertion time add a tag as a reference
Dan Williams0abdd7a2014-01-21 15:48:12 -0800433 * count for the overlapping mappings. For now, the overlap tracking
Dan Williams3b7a6412014-03-03 15:38:21 -0800434 * just ensures that 'unmaps' balance 'maps' before marking the
435 * cacheline idle, but we should also be flagging overlaps as an API
436 * violation.
Dan Williams0abdd7a2014-01-21 15:48:12 -0800437 *
438 * Memory usage is mostly constrained by the maximum number of available
439 * dma-debug entries in that we need a free dma_debug_entry before
Dan Williams3b7a6412014-03-03 15:38:21 -0800440 * inserting into the tree. In the case of dma_map_page and
441 * dma_alloc_coherent there is only one dma_debug_entry and one
442 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
443 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
444 * entries into the tree.
Dan Williams0abdd7a2014-01-21 15:48:12 -0800445 *
446 * At any time debug_dma_assert_idle() can be called to trigger a
Dan Williams3b7a6412014-03-03 15:38:21 -0800447 * warning if any cachelines in the given page are in the active set.
Dan Williams0abdd7a2014-01-21 15:48:12 -0800448 */
Dan Williams3b7a6412014-03-03 15:38:21 -0800449static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800450static DEFINE_SPINLOCK(radix_lock);
Dan Williams3b7a6412014-03-03 15:38:21 -0800451#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
452#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
453#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800454
Dan Williams3b7a6412014-03-03 15:38:21 -0800455static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
456{
457 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
458 (entry->offset >> L1_CACHE_SHIFT);
459}
460
461static int active_cacheline_read_overlap(phys_addr_t cln)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800462{
463 int overlap = 0, i;
464
465 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
Dan Williams3b7a6412014-03-03 15:38:21 -0800466 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
Dan Williams0abdd7a2014-01-21 15:48:12 -0800467 overlap |= 1 << i;
468 return overlap;
469}
470
Dan Williams3b7a6412014-03-03 15:38:21 -0800471static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800472{
473 int i;
474
Dan Williams3b7a6412014-03-03 15:38:21 -0800475 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
Dan Williams59f2e7d2014-01-29 14:05:53 -0800476 return overlap;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800477
478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
479 if (overlap & 1 << i)
Dan Williams3b7a6412014-03-03 15:38:21 -0800480 radix_tree_tag_set(&dma_active_cacheline, cln, i);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800481 else
Dan Williams3b7a6412014-03-03 15:38:21 -0800482 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800483
484 return overlap;
485}
486
Dan Williams3b7a6412014-03-03 15:38:21 -0800487static void active_cacheline_inc_overlap(phys_addr_t cln)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800488{
Dan Williams3b7a6412014-03-03 15:38:21 -0800489 int overlap = active_cacheline_read_overlap(cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800490
Dan Williams3b7a6412014-03-03 15:38:21 -0800491 overlap = active_cacheline_set_overlap(cln, ++overlap);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800492
493 /* If we overflowed the overlap counter then we're potentially
494 * leaking dma-mappings. Otherwise, if maps and unmaps are
495 * balanced then this overflow may cause false negatives in
Dan Williams3b7a6412014-03-03 15:38:21 -0800496 * debug_dma_assert_idle() as the cacheline may be marked idle
Dan Williams0abdd7a2014-01-21 15:48:12 -0800497 * prematurely.
498 */
Dan Williams3b7a6412014-03-03 15:38:21 -0800499 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
500 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
501 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800502}
503
Dan Williams3b7a6412014-03-03 15:38:21 -0800504static int active_cacheline_dec_overlap(phys_addr_t cln)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800505{
Dan Williams3b7a6412014-03-03 15:38:21 -0800506 int overlap = active_cacheline_read_overlap(cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800507
Dan Williams3b7a6412014-03-03 15:38:21 -0800508 return active_cacheline_set_overlap(cln, --overlap);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800509}
510
Dan Williams3b7a6412014-03-03 15:38:21 -0800511static int active_cacheline_insert(struct dma_debug_entry *entry)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800512{
Dan Williams3b7a6412014-03-03 15:38:21 -0800513 phys_addr_t cln = to_cacheline_number(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800514 unsigned long flags;
515 int rc;
516
Dan Williams3b7a6412014-03-03 15:38:21 -0800517 /* If the device is not writing memory then we don't have any
518 * concerns about the cpu consuming stale data. This mitigates
519 * legitimate usages of overlapping mappings.
520 */
521 if (entry->direction == DMA_TO_DEVICE)
522 return 0;
523
Dan Williams0abdd7a2014-01-21 15:48:12 -0800524 spin_lock_irqsave(&radix_lock, flags);
Dan Williams3b7a6412014-03-03 15:38:21 -0800525 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800526 if (rc == -EEXIST)
Dan Williams3b7a6412014-03-03 15:38:21 -0800527 active_cacheline_inc_overlap(cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800528 spin_unlock_irqrestore(&radix_lock, flags);
529
530 return rc;
531}
532
Dan Williams3b7a6412014-03-03 15:38:21 -0800533static void active_cacheline_remove(struct dma_debug_entry *entry)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800534{
Dan Williams3b7a6412014-03-03 15:38:21 -0800535 phys_addr_t cln = to_cacheline_number(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800536 unsigned long flags;
537
Dan Williams3b7a6412014-03-03 15:38:21 -0800538 /* ...mirror the insert case */
539 if (entry->direction == DMA_TO_DEVICE)
540 return;
541
Dan Williams0abdd7a2014-01-21 15:48:12 -0800542 spin_lock_irqsave(&radix_lock, flags);
Dan Williams59f2e7d2014-01-29 14:05:53 -0800543 /* since we are counting overlaps the final put of the
Dan Williams3b7a6412014-03-03 15:38:21 -0800544 * cacheline will occur when the overlap count is 0.
545 * active_cacheline_dec_overlap() returns -1 in that case
Dan Williams59f2e7d2014-01-29 14:05:53 -0800546 */
Dan Williams3b7a6412014-03-03 15:38:21 -0800547 if (active_cacheline_dec_overlap(cln) < 0)
548 radix_tree_delete(&dma_active_cacheline, cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800549 spin_unlock_irqrestore(&radix_lock, flags);
550}
551
552/**
553 * debug_dma_assert_idle() - assert that a page is not undergoing dma
Dan Williams3b7a6412014-03-03 15:38:21 -0800554 * @page: page to lookup in the dma_active_cacheline tree
Dan Williams0abdd7a2014-01-21 15:48:12 -0800555 *
556 * Place a call to this routine in cases where the cpu touching the page
557 * before the dma completes (page is dma_unmapped) will lead to data
558 * corruption.
559 */
560void debug_dma_assert_idle(struct page *page)
561{
Dan Williams3b7a6412014-03-03 15:38:21 -0800562 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
563 struct dma_debug_entry *entry = NULL;
564 void **results = (void **) &ents;
565 unsigned int nents, i;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800566 unsigned long flags;
Dan Williams3b7a6412014-03-03 15:38:21 -0800567 phys_addr_t cln;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800568
569 if (!page)
570 return;
571
Dan Williams3b7a6412014-03-03 15:38:21 -0800572 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800573 spin_lock_irqsave(&radix_lock, flags);
Dan Williams3b7a6412014-03-03 15:38:21 -0800574 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
575 CACHELINES_PER_PAGE);
576 for (i = 0; i < nents; i++) {
577 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
578
579 if (ent_cln == cln) {
580 entry = ents[i];
581 break;
582 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
583 break;
584 }
Dan Williams0abdd7a2014-01-21 15:48:12 -0800585 spin_unlock_irqrestore(&radix_lock, flags);
586
587 if (!entry)
588 return;
589
Dan Williams3b7a6412014-03-03 15:38:21 -0800590 cln = to_cacheline_number(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800591 err_printk(entry->dev, entry,
Dan Williams3b7a6412014-03-03 15:38:21 -0800592 "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
593 &cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800594}
595
596/*
Joerg Roedel30dfa902009-01-09 12:34:49 +0100597 * Wrapper function for adding an entry to the hash.
598 * This function takes care of locking itself.
599 */
600static void add_dma_entry(struct dma_debug_entry *entry)
601{
602 struct hash_bucket *bucket;
603 unsigned long flags;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800604 int rc;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100605
606 bucket = get_hash_bucket(entry, &flags);
607 hash_bucket_add(bucket, entry);
608 put_hash_bucket(bucket, &flags);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800609
Dan Williams3b7a6412014-03-03 15:38:21 -0800610 rc = active_cacheline_insert(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800611 if (rc == -ENOMEM) {
Dan Williams3b7a6412014-03-03 15:38:21 -0800612 pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
Dan Williams0abdd7a2014-01-21 15:48:12 -0800613 global_disable = true;
614 }
615
616 /* TODO: report -EEXIST errors here as overlapping mappings are
617 * not supported by the DMA API
618 */
Joerg Roedel30dfa902009-01-09 12:34:49 +0100619}
620
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900621static struct dma_debug_entry *__dma_entry_alloc(void)
622{
623 struct dma_debug_entry *entry;
624
625 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
626 list_del(&entry->list);
627 memset(entry, 0, sizeof(*entry));
628
629 num_free_entries -= 1;
630 if (num_free_entries < min_free_entries)
631 min_free_entries = num_free_entries;
632
633 return entry;
634}
635
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100636/* struct dma_entry allocator
637 *
638 * The next two functions implement the allocator for
639 * struct dma_debug_entries.
640 */
641static struct dma_debug_entry *dma_entry_alloc(void)
642{
Jakub Kicinski29cdd4e2012-04-04 03:19:10 +0200643 struct dma_debug_entry *entry;
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100644 unsigned long flags;
645
646 spin_lock_irqsave(&free_entries_lock, flags);
647
648 if (list_empty(&free_entries)) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200649 pr_err("DMA-API: debugging out of memory - disabling\n");
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100650 global_disable = true;
Jakub Kicinski29cdd4e2012-04-04 03:19:10 +0200651 spin_unlock_irqrestore(&free_entries_lock, flags);
652 return NULL;
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100653 }
654
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900655 entry = __dma_entry_alloc();
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100656
Jakub Kicinski29cdd4e2012-04-04 03:19:10 +0200657 spin_unlock_irqrestore(&free_entries_lock, flags);
658
David Woodhouse6c132d12009-01-19 16:52:39 +0100659#ifdef CONFIG_STACKTRACE
660 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
661 entry->stacktrace.entries = entry->st_entries;
662 entry->stacktrace.skip = 2;
663 save_stack_trace(&entry->stacktrace);
664#endif
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100665
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100666 return entry;
667}
668
669static void dma_entry_free(struct dma_debug_entry *entry)
670{
671 unsigned long flags;
672
Dan Williams3b7a6412014-03-03 15:38:21 -0800673 active_cacheline_remove(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800674
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100675 /*
676 * add to beginning of the list - this way the entries are
677 * more likely cache hot when they are reallocated.
678 */
679 spin_lock_irqsave(&free_entries_lock, flags);
680 list_add(&entry->list, &free_entries);
681 num_free_entries += 1;
682 spin_unlock_irqrestore(&free_entries_lock, flags);
683}
684
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900685int dma_debug_resize_entries(u32 num_entries)
686{
687 int i, delta, ret = 0;
688 unsigned long flags;
689 struct dma_debug_entry *entry;
690 LIST_HEAD(tmp);
691
692 spin_lock_irqsave(&free_entries_lock, flags);
693
694 if (nr_total_entries < num_entries) {
695 delta = num_entries - nr_total_entries;
696
697 spin_unlock_irqrestore(&free_entries_lock, flags);
698
699 for (i = 0; i < delta; i++) {
700 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
701 if (!entry)
702 break;
703
704 list_add_tail(&entry->list, &tmp);
705 }
706
707 spin_lock_irqsave(&free_entries_lock, flags);
708
709 list_splice(&tmp, &free_entries);
710 nr_total_entries += i;
711 num_free_entries += i;
712 } else {
713 delta = nr_total_entries - num_entries;
714
715 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
716 entry = __dma_entry_alloc();
717 kfree(entry);
718 }
719
720 nr_total_entries -= i;
721 }
722
723 if (nr_total_entries != num_entries)
724 ret = 1;
725
726 spin_unlock_irqrestore(&free_entries_lock, flags);
727
728 return ret;
729}
730EXPORT_SYMBOL(dma_debug_resize_entries);
731
Joerg Roedel6bf07872009-01-09 12:54:42 +0100732/*
733 * DMA-API debugging init code
734 *
735 * The init code does two things:
736 * 1. Initialize core data structures
737 * 2. Preallocate a given number of dma_debug_entry structs
738 */
739
740static int prealloc_memory(u32 num_entries)
741{
742 struct dma_debug_entry *entry, *next_entry;
743 int i;
744
745 for (i = 0; i < num_entries; ++i) {
746 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
747 if (!entry)
748 goto out_err;
749
750 list_add_tail(&entry->list, &free_entries);
751 }
752
753 num_free_entries = num_entries;
754 min_free_entries = num_entries;
755
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200756 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
Joerg Roedel6bf07872009-01-09 12:54:42 +0100757
758 return 0;
759
760out_err:
761
762 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
763 list_del(&entry->list);
764 kfree(entry);
765 }
766
767 return -ENOMEM;
768}
769
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200770static ssize_t filter_read(struct file *file, char __user *user_buf,
771 size_t count, loff_t *ppos)
772{
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200773 char buf[NAME_MAX_LEN + 1];
Joerg Roedelc17e2cf2009-06-08 15:19:29 +0200774 unsigned long flags;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200775 int len;
776
777 if (!current_driver_name[0])
778 return 0;
779
780 /*
781 * We can't copy to userspace directly because current_driver_name can
782 * only be read under the driver_name_lock with irqs disabled. So
783 * create a temporary copy first.
784 */
785 read_lock_irqsave(&driver_name_lock, flags);
786 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
787 read_unlock_irqrestore(&driver_name_lock, flags);
788
789 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
790}
791
792static ssize_t filter_write(struct file *file, const char __user *userbuf,
793 size_t count, loff_t *ppos)
794{
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200795 char buf[NAME_MAX_LEN];
Joerg Roedelc17e2cf2009-06-08 15:19:29 +0200796 unsigned long flags;
797 size_t len;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200798 int i;
799
800 /*
801 * We can't copy from userspace directly. Access to
802 * current_driver_name is protected with a write_lock with irqs
803 * disabled. Since copy_from_user can fault and may sleep we
804 * need to copy to temporary buffer first
805 */
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200806 len = min(count, (size_t)(NAME_MAX_LEN - 1));
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200807 if (copy_from_user(buf, userbuf, len))
808 return -EFAULT;
809
810 buf[len] = 0;
811
812 write_lock_irqsave(&driver_name_lock, flags);
813
Joerg Roedel312325092009-06-08 15:07:08 +0200814 /*
815 * Now handle the string we got from userspace very carefully.
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200816 * The rules are:
817 * - only use the first token we got
818 * - token delimiter is everything looking like a space
819 * character (' ', '\n', '\t' ...)
820 *
821 */
822 if (!isalnum(buf[0])) {
823 /*
Joerg Roedel312325092009-06-08 15:07:08 +0200824 * If the first character userspace gave us is not
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200825 * alphanumerical then assume the filter should be
826 * switched off.
827 */
828 if (current_driver_name[0])
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200829 pr_info("DMA-API: switching off dma-debug driver filter\n");
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200830 current_driver_name[0] = 0;
831 current_driver = NULL;
832 goto out_unlock;
833 }
834
835 /*
836 * Now parse out the first token and use it as the name for the
837 * driver to filter for.
838 */
Dan Carpenter39a37ce2010-04-06 19:45:12 +0300839 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200840 current_driver_name[i] = buf[i];
841 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
842 break;
843 }
844 current_driver_name[i] = 0;
845 current_driver = NULL;
846
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200847 pr_info("DMA-API: enable driver filter for driver [%s]\n",
848 current_driver_name);
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200849
850out_unlock:
851 write_unlock_irqrestore(&driver_name_lock, flags);
852
853 return count;
854}
855
Thiago Farinaaeb583d2010-01-18 18:57:33 -0500856static const struct file_operations filter_fops = {
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200857 .read = filter_read,
858 .write = filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200859 .llseek = default_llseek,
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200860};
861
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100862static int dma_debug_fs_init(void)
863{
864 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
865 if (!dma_debug_dent) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200866 pr_err("DMA-API: can not create debugfs directory\n");
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100867 return -ENOMEM;
868 }
869
870 global_disable_dent = debugfs_create_bool("disabled", 0444,
871 dma_debug_dent,
Dan Carpenter68ee6d22012-06-27 12:08:55 +0300872 &global_disable);
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100873 if (!global_disable_dent)
874 goto out_err;
875
876 error_count_dent = debugfs_create_u32("error_count", 0444,
877 dma_debug_dent, &error_count);
878 if (!error_count_dent)
879 goto out_err;
880
881 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
882 dma_debug_dent,
883 &show_all_errors);
884 if (!show_all_errors_dent)
885 goto out_err;
886
887 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
888 dma_debug_dent,
889 &show_num_errors);
890 if (!show_num_errors_dent)
891 goto out_err;
892
893 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
894 dma_debug_dent,
895 &num_free_entries);
896 if (!num_free_entries_dent)
897 goto out_err;
898
899 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
900 dma_debug_dent,
901 &min_free_entries);
902 if (!min_free_entries_dent)
903 goto out_err;
904
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200905 filter_dent = debugfs_create_file("driver_filter", 0644,
906 dma_debug_dent, NULL, &filter_fops);
907 if (!filter_dent)
908 goto out_err;
909
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100910 return 0;
911
912out_err:
913 debugfs_remove_recursive(dma_debug_dent);
914
915 return -ENOMEM;
916}
917
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400918static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
Joerg Roedeled888ae2009-05-22 17:16:04 +0200919{
920 struct dma_debug_entry *entry;
921 unsigned long flags;
922 int count = 0, i;
923
Joerg Roedelbe81c6e2009-06-08 15:46:19 +0200924 local_irq_save(flags);
925
Joerg Roedeled888ae2009-05-22 17:16:04 +0200926 for (i = 0; i < HASH_SIZE; ++i) {
Joerg Roedelbe81c6e2009-06-08 15:46:19 +0200927 spin_lock(&dma_entry_hash[i].lock);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200928 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400929 if (entry->dev == dev) {
Joerg Roedeled888ae2009-05-22 17:16:04 +0200930 count += 1;
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400931 *out_entry = entry;
932 }
Joerg Roedeled888ae2009-05-22 17:16:04 +0200933 }
Joerg Roedelbe81c6e2009-06-08 15:46:19 +0200934 spin_unlock(&dma_entry_hash[i].lock);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200935 }
936
Joerg Roedelbe81c6e2009-06-08 15:46:19 +0200937 local_irq_restore(flags);
938
Joerg Roedeled888ae2009-05-22 17:16:04 +0200939 return count;
940}
941
Ingo Molnara8fe9ea2009-12-31 15:16:23 +0100942static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
Joerg Roedeled888ae2009-05-22 17:16:04 +0200943{
944 struct device *dev = data;
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400945 struct dma_debug_entry *uninitialized_var(entry);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200946 int count;
947
Shaun Ruffellf797d982009-12-17 18:00:36 -0600948 if (global_disable)
Ingo Molnara8fe9ea2009-12-31 15:16:23 +0100949 return 0;
Joerg Roedeled888ae2009-05-22 17:16:04 +0200950
951 switch (action) {
952 case BUS_NOTIFY_UNBOUND_DRIVER:
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400953 count = device_dma_allocations(dev, &entry);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200954 if (count == 0)
955 break;
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400956 err_printk(dev, entry, "DMA-API: device driver has pending "
Joerg Roedeled888ae2009-05-22 17:16:04 +0200957 "DMA allocations while released from device "
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400958 "[count=%d]\n"
959 "One of leaked entries details: "
960 "[device address=0x%016llx] [size=%llu bytes] "
961 "[mapped with %s] [mapped as %s]\n",
962 count, entry->dev_addr, entry->size,
963 dir2name[entry->direction], type2name[entry->type]);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200964 break;
965 default:
966 break;
967 }
968
969 return 0;
970}
971
Joerg Roedel41531c82009-03-16 17:32:14 +0100972void dma_debug_add_bus(struct bus_type *bus)
973{
Joerg Roedeled888ae2009-05-22 17:16:04 +0200974 struct notifier_block *nb;
975
Shaun Ruffellf797d982009-12-17 18:00:36 -0600976 if (global_disable)
977 return;
978
Joerg Roedeled888ae2009-05-22 17:16:04 +0200979 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
980 if (nb == NULL) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200981 pr_err("dma_debug_add_bus: out of memory\n");
Joerg Roedeled888ae2009-05-22 17:16:04 +0200982 return;
983 }
984
985 nb->notifier_call = dma_debug_device_change;
986
987 bus_register_notifier(bus, nb);
Joerg Roedel41531c82009-03-16 17:32:14 +0100988}
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100989
Joerg Roedel6bf07872009-01-09 12:54:42 +0100990/*
991 * Let the architectures decide how many entries should be preallocated.
992 */
993void dma_debug_init(u32 num_entries)
994{
995 int i;
996
997 if (global_disable)
998 return;
999
1000 for (i = 0; i < HASH_SIZE; ++i) {
1001 INIT_LIST_HEAD(&dma_entry_hash[i].list);
Ingo Molnarb0a5b832009-06-16 16:11:14 +02001002 spin_lock_init(&dma_entry_hash[i].lock);
Joerg Roedel6bf07872009-01-09 12:54:42 +01001003 }
1004
Joerg Roedel788dcfa2009-01-09 13:13:27 +01001005 if (dma_debug_fs_init() != 0) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001006 pr_err("DMA-API: error creating debugfs entries - disabling\n");
Joerg Roedel788dcfa2009-01-09 13:13:27 +01001007 global_disable = true;
1008
1009 return;
1010 }
1011
Joerg Roedel59d3daa2009-01-09 13:01:56 +01001012 if (req_entries)
1013 num_entries = req_entries;
1014
Joerg Roedel6bf07872009-01-09 12:54:42 +01001015 if (prealloc_memory(num_entries) != 0) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001016 pr_err("DMA-API: debugging out of memory error - disabled\n");
Joerg Roedel6bf07872009-01-09 12:54:42 +01001017 global_disable = true;
1018
1019 return;
1020 }
1021
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +09001022 nr_total_entries = num_free_entries;
1023
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001024 pr_info("DMA-API: debugging enabled by kernel config\n");
Joerg Roedel6bf07872009-01-09 12:54:42 +01001025}
1026
Joerg Roedel59d3daa2009-01-09 13:01:56 +01001027static __init int dma_debug_cmdline(char *str)
1028{
1029 if (!str)
1030 return -EINVAL;
1031
1032 if (strncmp(str, "off", 3) == 0) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001033 pr_info("DMA-API: debugging disabled on kernel command line\n");
Joerg Roedel59d3daa2009-01-09 13:01:56 +01001034 global_disable = true;
1035 }
1036
1037 return 0;
1038}
1039
1040static __init int dma_debug_entries_cmdline(char *str)
1041{
1042 int res;
1043
1044 if (!str)
1045 return -EINVAL;
1046
1047 res = get_option(&str, &req_entries);
1048
1049 if (!res)
1050 req_entries = 0;
1051
1052 return 0;
1053}
1054
1055__setup("dma_debug=", dma_debug_cmdline);
1056__setup("dma_debug_entries=", dma_debug_entries_cmdline);
1057
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001058static void check_unmap(struct dma_debug_entry *ref)
1059{
1060 struct dma_debug_entry *entry;
1061 struct hash_bucket *bucket;
1062 unsigned long flags;
1063
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001064 bucket = get_hash_bucket(ref, &flags);
Neil Hormanc6a21d02011-08-08 15:13:54 -04001065 entry = bucket_find_exact(bucket, ref);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001066
1067 if (!entry) {
Alexander Duyck8d640a52013-03-22 15:04:48 -07001068 /* must drop lock before calling dma_mapping_error */
1069 put_hash_bucket(bucket, &flags);
1070
Shuah Khanbfe0fb02012-11-03 17:00:07 -06001071 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1072 err_printk(ref->dev, NULL,
Alexander Duyck8d640a52013-03-22 15:04:48 -07001073 "DMA-API: device driver tries to free an "
1074 "invalid DMA memory address\n");
1075 } else {
1076 err_printk(ref->dev, NULL,
1077 "DMA-API: device driver tries to free DMA "
1078 "memory it has not allocated [device "
1079 "address=0x%016llx] [size=%llu bytes]\n",
1080 ref->dev_addr, ref->size);
Shuah Khanbfe0fb02012-11-03 17:00:07 -06001081 }
Alexander Duyck8d640a52013-03-22 15:04:48 -07001082 return;
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001083 }
1084
1085 if (ref->size != entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001086 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001087 "DMA memory with different size "
1088 "[device address=0x%016llx] [map size=%llu bytes] "
1089 "[unmap size=%llu bytes]\n",
1090 ref->dev_addr, entry->size, ref->size);
1091 }
1092
1093 if (ref->type != entry->type) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001094 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001095 "DMA memory with wrong function "
1096 "[device address=0x%016llx] [size=%llu bytes] "
1097 "[mapped as %s] [unmapped as %s]\n",
1098 ref->dev_addr, ref->size,
1099 type2name[entry->type], type2name[ref->type]);
1100 } else if ((entry->type == dma_debug_coherent) &&
Dan Williams0abdd7a2014-01-21 15:48:12 -08001101 (phys_addr(ref) != phys_addr(entry))) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001102 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001103 "DMA memory with different CPU address "
1104 "[device address=0x%016llx] [size=%llu bytes] "
Joerg Roedel59a40e702009-10-29 16:25:50 +01001105 "[cpu alloc address=0x%016llx] "
1106 "[cpu free address=0x%016llx]",
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001107 ref->dev_addr, ref->size,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001108 phys_addr(entry),
1109 phys_addr(ref));
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001110 }
1111
1112 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1113 ref->sg_call_ents != entry->sg_call_ents) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001114 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001115 "DMA sg list with different entry count "
1116 "[map count=%d] [unmap count=%d]\n",
1117 entry->sg_call_ents, ref->sg_call_ents);
1118 }
1119
1120 /*
1121 * This may be no bug in reality - but most implementations of the
1122 * DMA API don't handle this properly, so check for it here
1123 */
1124 if (ref->direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001125 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001126 "DMA memory with different direction "
1127 "[device address=0x%016llx] [size=%llu bytes] "
1128 "[mapped with %s] [unmapped with %s]\n",
1129 ref->dev_addr, ref->size,
1130 dir2name[entry->direction],
1131 dir2name[ref->direction]);
1132 }
1133
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001134 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1135 err_printk(ref->dev, entry,
1136 "DMA-API: device driver failed to check map error"
1137 "[device address=0x%016llx] [size=%llu bytes] "
1138 "[mapped as %s]",
1139 ref->dev_addr, ref->size,
1140 type2name[entry->type]);
1141 }
1142
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001143 hash_bucket_del(entry);
1144 dma_entry_free(entry);
1145
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001146 put_hash_bucket(bucket, &flags);
1147}
1148
1149static void check_for_stack(struct device *dev, void *addr)
1150{
1151 if (object_is_on_stack(addr))
David Woodhouse6c132d12009-01-19 16:52:39 +01001152 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
1153 "stack [addr=%p]\n", addr);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001154}
1155
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001156static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001157{
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001158 unsigned long a1 = (unsigned long)addr;
1159 unsigned long b1 = a1 + len;
1160 unsigned long a2 = (unsigned long)start;
1161 unsigned long b2 = (unsigned long)end;
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001162
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001163 return !(b1 <= a2 || a1 >= b2);
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001164}
1165
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001166static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001167{
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001168 if (overlap(addr, len, _text, _etext) ||
1169 overlap(addr, len, __start_rodata, __end_rodata))
1170 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001171}
1172
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001173static void check_sync(struct device *dev,
1174 struct dma_debug_entry *ref,
1175 bool to_cpu)
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001176{
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001177 struct dma_debug_entry *entry;
1178 struct hash_bucket *bucket;
1179 unsigned long flags;
1180
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001181 bucket = get_hash_bucket(ref, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001182
Neil Hormanc6a21d02011-08-08 15:13:54 -04001183 entry = bucket_find_contain(&bucket, ref, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001184
1185 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001186 err_printk(dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001187 "to sync DMA memory it has not allocated "
1188 "[device address=0x%016llx] [size=%llu bytes]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001189 (unsigned long long)ref->dev_addr, ref->size);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001190 goto out;
1191 }
1192
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001193 if (ref->size > entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001194 err_printk(dev, entry, "DMA-API: device driver syncs"
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001195 " DMA memory outside allocated range "
1196 "[device address=0x%016llx] "
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001197 "[allocation size=%llu bytes] "
1198 "[sync offset+size=%llu]\n",
1199 entry->dev_addr, entry->size,
1200 ref->size);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001201 }
1202
Krzysztof Halasa42d53b42010-01-08 14:42:36 -08001203 if (entry->direction == DMA_BIDIRECTIONAL)
1204 goto out;
1205
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001206 if (ref->direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001207 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001208 "DMA memory with different direction "
1209 "[device address=0x%016llx] [size=%llu bytes] "
1210 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001211 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001212 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001213 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001214 }
1215
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001216 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001217 !(ref->direction == DMA_TO_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +01001218 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001219 "device read-only DMA memory for cpu "
1220 "[device address=0x%016llx] [size=%llu bytes] "
1221 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001222 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001223 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001224 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001225
1226 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001227 !(ref->direction == DMA_FROM_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +01001228 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001229 "device write-only DMA memory to device "
1230 "[device address=0x%016llx] [size=%llu bytes] "
1231 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001232 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001233 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001234 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001235
1236out:
1237 put_hash_bucket(bucket, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001238}
1239
Joerg Roedelf62bc982009-01-09 14:14:49 +01001240void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1241 size_t size, int direction, dma_addr_t dma_addr,
1242 bool map_single)
1243{
1244 struct dma_debug_entry *entry;
1245
1246 if (unlikely(global_disable))
1247 return;
1248
Shuah Khanbfe0fb02012-11-03 17:00:07 -06001249 if (dma_mapping_error(dev, dma_addr))
Joerg Roedelf62bc982009-01-09 14:14:49 +01001250 return;
1251
1252 entry = dma_entry_alloc();
1253 if (!entry)
1254 return;
1255
1256 entry->dev = dev;
1257 entry->type = dma_debug_page;
Dan Williams0abdd7a2014-01-21 15:48:12 -08001258 entry->pfn = page_to_pfn(page);
1259 entry->offset = offset,
Joerg Roedelf62bc982009-01-09 14:14:49 +01001260 entry->dev_addr = dma_addr;
1261 entry->size = size;
1262 entry->direction = direction;
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001263 entry->map_err_type = MAP_ERR_NOT_CHECKED;
Joerg Roedelf62bc982009-01-09 14:14:49 +01001264
Joerg Roedel9537a482009-03-23 15:35:08 +01001265 if (map_single)
Joerg Roedelf62bc982009-01-09 14:14:49 +01001266 entry->type = dma_debug_single;
Joerg Roedel9537a482009-03-23 15:35:08 +01001267
1268 if (!PageHighMem(page)) {
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001269 void *addr = page_address(page) + offset;
1270
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001271 check_for_stack(dev, addr);
1272 check_for_illegal_area(dev, addr, size);
Joerg Roedelf62bc982009-01-09 14:14:49 +01001273 }
1274
1275 add_dma_entry(entry);
1276}
1277EXPORT_SYMBOL(debug_dma_map_page);
1278
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001279void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1280{
1281 struct dma_debug_entry ref;
1282 struct dma_debug_entry *entry;
1283 struct hash_bucket *bucket;
1284 unsigned long flags;
1285
1286 if (unlikely(global_disable))
1287 return;
1288
1289 ref.dev = dev;
1290 ref.dev_addr = dma_addr;
1291 bucket = get_hash_bucket(&ref, &flags);
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001292
Alexander Duyck96e7d7a2013-03-22 15:04:49 -07001293 list_for_each_entry(entry, &bucket->list, list) {
1294 if (!exact_match(&ref, entry))
1295 continue;
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001296
Alexander Duyck96e7d7a2013-03-22 15:04:49 -07001297 /*
1298 * The same physical address can be mapped multiple
1299 * times. Without a hardware IOMMU this results in the
1300 * same device addresses being put into the dma-debug
1301 * hash multiple times too. This can result in false
1302 * positives being reported. Therefore we implement a
1303 * best-fit algorithm here which updates the first entry
1304 * from the hash which fits the reference value and is
1305 * not currently listed as being checked.
1306 */
1307 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1308 entry->map_err_type = MAP_ERR_CHECKED;
1309 break;
1310 }
1311 }
1312
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001313 put_hash_bucket(bucket, &flags);
1314}
1315EXPORT_SYMBOL(debug_dma_mapping_error);
1316
Joerg Roedelf62bc982009-01-09 14:14:49 +01001317void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1318 size_t size, int direction, bool map_single)
1319{
1320 struct dma_debug_entry ref = {
1321 .type = dma_debug_page,
1322 .dev = dev,
1323 .dev_addr = addr,
1324 .size = size,
1325 .direction = direction,
1326 };
1327
1328 if (unlikely(global_disable))
1329 return;
1330
1331 if (map_single)
1332 ref.type = dma_debug_single;
1333
1334 check_unmap(&ref);
1335}
1336EXPORT_SYMBOL(debug_dma_unmap_page);
1337
Joerg Roedel972aa452009-01-09 14:19:54 +01001338void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1339 int nents, int mapped_ents, int direction)
1340{
1341 struct dma_debug_entry *entry;
1342 struct scatterlist *s;
1343 int i;
1344
1345 if (unlikely(global_disable))
1346 return;
1347
1348 for_each_sg(sg, s, mapped_ents, i) {
1349 entry = dma_entry_alloc();
1350 if (!entry)
1351 return;
1352
1353 entry->type = dma_debug_sg;
1354 entry->dev = dev;
Dan Williams0abdd7a2014-01-21 15:48:12 -08001355 entry->pfn = page_to_pfn(sg_page(s));
1356 entry->offset = s->offset,
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001357 entry->size = sg_dma_len(s);
FUJITA Tomonori15aedea42009-05-27 09:43:01 +09001358 entry->dev_addr = sg_dma_address(s);
Joerg Roedel972aa452009-01-09 14:19:54 +01001359 entry->direction = direction;
1360 entry->sg_call_ents = nents;
1361 entry->sg_mapped_ents = mapped_ents;
1362
Joerg Roedel9537a482009-03-23 15:35:08 +01001363 if (!PageHighMem(sg_page(s))) {
1364 check_for_stack(dev, sg_virt(s));
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001365 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
Joerg Roedel9537a482009-03-23 15:35:08 +01001366 }
Joerg Roedel972aa452009-01-09 14:19:54 +01001367
1368 add_dma_entry(entry);
1369 }
1370}
1371EXPORT_SYMBOL(debug_dma_map_sg);
1372
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001373static int get_nr_mapped_entries(struct device *dev,
1374 struct dma_debug_entry *ref)
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001375{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001376 struct dma_debug_entry *entry;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001377 struct hash_bucket *bucket;
1378 unsigned long flags;
Joerg Roedelc17e2cf2009-06-08 15:19:29 +02001379 int mapped_ents;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001380
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001381 bucket = get_hash_bucket(ref, &flags);
Neil Hormanc6a21d02011-08-08 15:13:54 -04001382 entry = bucket_find_exact(bucket, ref);
Joerg Roedelc17e2cf2009-06-08 15:19:29 +02001383 mapped_ents = 0;
1384
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001385 if (entry)
1386 mapped_ents = entry->sg_mapped_ents;
1387 put_hash_bucket(bucket, &flags);
1388
1389 return mapped_ents;
1390}
1391
Joerg Roedel972aa452009-01-09 14:19:54 +01001392void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1393 int nelems, int dir)
1394{
Joerg Roedel972aa452009-01-09 14:19:54 +01001395 struct scatterlist *s;
1396 int mapped_ents = 0, i;
Joerg Roedel972aa452009-01-09 14:19:54 +01001397
1398 if (unlikely(global_disable))
1399 return;
1400
1401 for_each_sg(sglist, s, nelems, i) {
1402
1403 struct dma_debug_entry ref = {
1404 .type = dma_debug_sg,
1405 .dev = dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001406 .pfn = page_to_pfn(sg_page(s)),
1407 .offset = s->offset,
FUJITA Tomonori15aedea42009-05-27 09:43:01 +09001408 .dev_addr = sg_dma_address(s),
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001409 .size = sg_dma_len(s),
Joerg Roedel972aa452009-01-09 14:19:54 +01001410 .direction = dir,
Joerg Roedele5e8c5b2009-06-11 10:03:42 +02001411 .sg_call_ents = nelems,
Joerg Roedel972aa452009-01-09 14:19:54 +01001412 };
1413
1414 if (mapped_ents && i >= mapped_ents)
1415 break;
1416
Joerg Roedele5e8c5b2009-06-11 10:03:42 +02001417 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001418 mapped_ents = get_nr_mapped_entries(dev, &ref);
Joerg Roedel972aa452009-01-09 14:19:54 +01001419
1420 check_unmap(&ref);
1421 }
1422}
1423EXPORT_SYMBOL(debug_dma_unmap_sg);
1424
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001425void debug_dma_alloc_coherent(struct device *dev, size_t size,
1426 dma_addr_t dma_addr, void *virt)
1427{
1428 struct dma_debug_entry *entry;
1429
1430 if (unlikely(global_disable))
1431 return;
1432
1433 if (unlikely(virt == NULL))
1434 return;
1435
1436 entry = dma_entry_alloc();
1437 if (!entry)
1438 return;
1439
1440 entry->type = dma_debug_coherent;
1441 entry->dev = dev;
Dan Williams0abdd7a2014-01-21 15:48:12 -08001442 entry->pfn = page_to_pfn(virt_to_page(virt));
1443 entry->offset = (size_t) virt & PAGE_MASK;
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001444 entry->size = size;
1445 entry->dev_addr = dma_addr;
1446 entry->direction = DMA_BIDIRECTIONAL;
1447
1448 add_dma_entry(entry);
1449}
1450EXPORT_SYMBOL(debug_dma_alloc_coherent);
1451
1452void debug_dma_free_coherent(struct device *dev, size_t size,
1453 void *virt, dma_addr_t addr)
1454{
1455 struct dma_debug_entry ref = {
1456 .type = dma_debug_coherent,
1457 .dev = dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001458 .pfn = page_to_pfn(virt_to_page(virt)),
1459 .offset = (size_t) virt & PAGE_MASK,
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001460 .dev_addr = addr,
1461 .size = size,
1462 .direction = DMA_BIDIRECTIONAL,
1463 };
1464
1465 if (unlikely(global_disable))
1466 return;
1467
1468 check_unmap(&ref);
1469}
1470EXPORT_SYMBOL(debug_dma_free_coherent);
1471
Joerg Roedelb9d23172009-01-09 14:43:04 +01001472void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1473 size_t size, int direction)
1474{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001475 struct dma_debug_entry ref;
1476
Joerg Roedelb9d23172009-01-09 14:43:04 +01001477 if (unlikely(global_disable))
1478 return;
1479
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001480 ref.type = dma_debug_single;
1481 ref.dev = dev;
1482 ref.dev_addr = dma_handle;
1483 ref.size = size;
1484 ref.direction = direction;
1485 ref.sg_call_ents = 0;
1486
1487 check_sync(dev, &ref, true);
Joerg Roedelb9d23172009-01-09 14:43:04 +01001488}
1489EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1490
1491void debug_dma_sync_single_for_device(struct device *dev,
1492 dma_addr_t dma_handle, size_t size,
1493 int direction)
1494{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001495 struct dma_debug_entry ref;
1496
Joerg Roedelb9d23172009-01-09 14:43:04 +01001497 if (unlikely(global_disable))
1498 return;
1499
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001500 ref.type = dma_debug_single;
1501 ref.dev = dev;
1502 ref.dev_addr = dma_handle;
1503 ref.size = size;
1504 ref.direction = direction;
1505 ref.sg_call_ents = 0;
1506
1507 check_sync(dev, &ref, false);
Joerg Roedelb9d23172009-01-09 14:43:04 +01001508}
1509EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1510
Joerg Roedel948408b2009-01-09 14:55:38 +01001511void debug_dma_sync_single_range_for_cpu(struct device *dev,
1512 dma_addr_t dma_handle,
1513 unsigned long offset, size_t size,
1514 int direction)
1515{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001516 struct dma_debug_entry ref;
1517
Joerg Roedel948408b2009-01-09 14:55:38 +01001518 if (unlikely(global_disable))
1519 return;
1520
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001521 ref.type = dma_debug_single;
1522 ref.dev = dev;
1523 ref.dev_addr = dma_handle;
1524 ref.size = offset + size;
1525 ref.direction = direction;
1526 ref.sg_call_ents = 0;
1527
1528 check_sync(dev, &ref, true);
Joerg Roedel948408b2009-01-09 14:55:38 +01001529}
1530EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1531
1532void debug_dma_sync_single_range_for_device(struct device *dev,
1533 dma_addr_t dma_handle,
1534 unsigned long offset,
1535 size_t size, int direction)
1536{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001537 struct dma_debug_entry ref;
1538
Joerg Roedel948408b2009-01-09 14:55:38 +01001539 if (unlikely(global_disable))
1540 return;
1541
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001542 ref.type = dma_debug_single;
1543 ref.dev = dev;
1544 ref.dev_addr = dma_handle;
1545 ref.size = offset + size;
1546 ref.direction = direction;
1547 ref.sg_call_ents = 0;
1548
1549 check_sync(dev, &ref, false);
Joerg Roedel948408b2009-01-09 14:55:38 +01001550}
1551EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1552
Joerg Roedela31fba52009-01-09 15:01:12 +01001553void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1554 int nelems, int direction)
1555{
1556 struct scatterlist *s;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001557 int mapped_ents = 0, i;
Joerg Roedela31fba52009-01-09 15:01:12 +01001558
1559 if (unlikely(global_disable))
1560 return;
1561
1562 for_each_sg(sg, s, nelems, i) {
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001563
1564 struct dma_debug_entry ref = {
1565 .type = dma_debug_sg,
1566 .dev = dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001567 .pfn = page_to_pfn(sg_page(s)),
1568 .offset = s->offset,
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001569 .dev_addr = sg_dma_address(s),
1570 .size = sg_dma_len(s),
1571 .direction = direction,
1572 .sg_call_ents = nelems,
1573 };
1574
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001575 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001576 mapped_ents = get_nr_mapped_entries(dev, &ref);
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001577
1578 if (i >= mapped_ents)
1579 break;
1580
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001581 check_sync(dev, &ref, true);
Joerg Roedela31fba52009-01-09 15:01:12 +01001582 }
1583}
1584EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1585
1586void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1587 int nelems, int direction)
1588{
1589 struct scatterlist *s;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001590 int mapped_ents = 0, i;
Joerg Roedela31fba52009-01-09 15:01:12 +01001591
1592 if (unlikely(global_disable))
1593 return;
1594
1595 for_each_sg(sg, s, nelems, i) {
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001596
1597 struct dma_debug_entry ref = {
1598 .type = dma_debug_sg,
1599 .dev = dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001600 .pfn = page_to_pfn(sg_page(s)),
1601 .offset = s->offset,
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001602 .dev_addr = sg_dma_address(s),
1603 .size = sg_dma_len(s),
1604 .direction = direction,
1605 .sg_call_ents = nelems,
1606 };
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001607 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001608 mapped_ents = get_nr_mapped_entries(dev, &ref);
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001609
1610 if (i >= mapped_ents)
1611 break;
1612
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001613 check_sync(dev, &ref, false);
Joerg Roedela31fba52009-01-09 15:01:12 +01001614 }
1615}
1616EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1617
Joerg Roedel1745de52009-05-22 21:49:51 +02001618static int __init dma_debug_driver_setup(char *str)
1619{
1620 int i;
1621
1622 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1623 current_driver_name[i] = *str;
1624 if (*str == 0)
1625 break;
1626 }
1627
1628 if (current_driver_name[0])
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001629 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1630 current_driver_name);
Joerg Roedel1745de52009-05-22 21:49:51 +02001631
1632
1633 return 1;
1634}
1635__setup("dma_debug_driver=", dma_debug_driver_setup);