blob: c007d25bee0987a64de11f53e587c0b9035bdd66 [file] [log] [blame]
Joerg Roedelf2f45e52009-01-09 12:19:52 +01001/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Ingo Molnar68db0cf2017-02-08 18:51:37 +010020#include <linux/sched/task_stack.h>
Joerg Roedel972aa452009-01-09 14:19:54 +010021#include <linux/scatterlist.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010022#include <linux/dma-mapping.h>
Ingo Molnar29930022017-02-08 18:51:36 +010023#include <linux/sched/task.h>
David Woodhouse6c132d12009-01-19 16:52:39 +010024#include <linux/stacktrace.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010025#include <linux/dma-debug.h>
Joerg Roedel30dfa902009-01-09 12:34:49 +010026#include <linux/spinlock.h>
Andy Lutomirskib4a0f532016-08-11 02:35:22 -070027#include <linux/vmalloc.h>
Joerg Roedel788dcfa2009-01-09 13:13:27 +010028#include <linux/debugfs.h>
Joerg Roedel8a6fc702009-05-22 21:23:13 +020029#include <linux/uaccess.h>
Paul Gortmaker23a7bfa2011-07-01 16:23:59 -040030#include <linux/export.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010031#include <linux/device.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010032#include <linux/types.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010033#include <linux/sched.h>
Joerg Roedel8a6fc702009-05-22 21:23:13 +020034#include <linux/ctype.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010035#include <linux/list.h>
Joerg Roedel6bf07872009-01-09 12:54:42 +010036#include <linux/slab.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010037
Joerg Roedel2e34bde2009-03-16 16:51:55 +010038#include <asm/sections.h>
39
Joerg Roedel30dfa902009-01-09 12:34:49 +010040#define HASH_SIZE 1024ULL
41#define HASH_FN_SHIFT 13
42#define HASH_FN_MASK (HASH_SIZE - 1)
43
Christoph Hellwig15b28bb2018-04-16 17:22:28 +020044/* allow architectures to override this if absolutely required */
45#ifndef PREALLOC_DMA_DEBUG_ENTRIES
46#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
47#endif
48
Joerg Roedelf2f45e52009-01-09 12:19:52 +010049enum {
50 dma_debug_single,
51 dma_debug_page,
52 dma_debug_sg,
53 dma_debug_coherent,
Niklas Söderlund0e74b342016-08-10 13:22:15 +020054 dma_debug_resource,
Joerg Roedelf2f45e52009-01-09 12:19:52 +010055};
56
Shuah Khan6c9c6d62012-10-08 11:08:06 -060057enum map_err_types {
58 MAP_ERR_CHECK_NOT_APPLICABLE,
59 MAP_ERR_NOT_CHECKED,
60 MAP_ERR_CHECKED,
61};
62
David Woodhouse6c132d12009-01-19 16:52:39 +010063#define DMA_DEBUG_STACKTRACE_ENTRIES 5
64
Dan Williams0abdd7a2014-01-21 15:48:12 -080065/**
66 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
67 * @list: node on pre-allocated free_entries list
68 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
69 * @type: single, page, sg, coherent
70 * @pfn: page frame of the start address
71 * @offset: offset of mapping relative to pfn
72 * @size: length of the mapping
73 * @direction: enum dma_data_direction
74 * @sg_call_ents: 'nents' from dma_map_sg
75 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
76 * @map_err_type: track whether dma_mapping_error() was checked
77 * @stacktrace: support backtraces when a violation is detected
78 */
Joerg Roedelf2f45e52009-01-09 12:19:52 +010079struct dma_debug_entry {
80 struct list_head list;
81 struct device *dev;
82 int type;
Dan Williams0abdd7a2014-01-21 15:48:12 -080083 unsigned long pfn;
84 size_t offset;
Joerg Roedelf2f45e52009-01-09 12:19:52 +010085 u64 dev_addr;
86 u64 size;
87 int direction;
88 int sg_call_ents;
89 int sg_mapped_ents;
Shuah Khan6c9c6d62012-10-08 11:08:06 -060090 enum map_err_types map_err_type;
David Woodhouse6c132d12009-01-19 16:52:39 +010091#ifdef CONFIG_STACKTRACE
92 struct stack_trace stacktrace;
93 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
94#endif
Joerg Roedelf2f45e52009-01-09 12:19:52 +010095};
96
Neil Hormanc6a21d02011-08-08 15:13:54 -040097typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
98
Joerg Roedel30dfa902009-01-09 12:34:49 +010099struct hash_bucket {
100 struct list_head list;
101 spinlock_t lock;
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100102} ____cacheline_aligned_in_smp;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100103
104/* Hash list to save the allocated dma addresses */
105static struct hash_bucket dma_entry_hash[HASH_SIZE];
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100106/* List of pre-allocated dma_debug_entry's */
107static LIST_HEAD(free_entries);
108/* Lock for the list above */
109static DEFINE_SPINLOCK(free_entries_lock);
110
111/* Global disable flag - will be set in case of an error */
Viresh Kumar621a5f72015-09-26 15:04:07 -0700112static bool global_disable __read_mostly;
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100113
Florian Fainelli2ce8e7e2014-12-10 15:41:25 -0800114/* Early initialization disable flag, set at the end of dma_debug_init */
115static bool dma_debug_initialized __read_mostly;
116
Florian Fainelli01ce18b2014-12-10 15:41:23 -0800117static inline bool dma_debug_disabled(void)
118{
Florian Fainelli2ce8e7e2014-12-10 15:41:25 -0800119 return global_disable || !dma_debug_initialized;
Florian Fainelli01ce18b2014-12-10 15:41:23 -0800120}
121
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100122/* Global error count */
123static u32 error_count;
124
125/* Global error show enable*/
126static u32 show_all_errors __read_mostly;
127/* Number of errors to show */
128static u32 show_num_errors = 1;
129
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100130static u32 num_free_entries;
131static u32 min_free_entries;
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900132static u32 nr_total_entries;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100133
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100134/* number of preallocated entries requested by kernel cmdline */
Christoph Hellwigbcebe322018-04-24 09:40:51 +0200135static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100136
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100137/* debugfs dentry's for the stuff above */
138static struct dentry *dma_debug_dent __read_mostly;
139static struct dentry *global_disable_dent __read_mostly;
140static struct dentry *error_count_dent __read_mostly;
141static struct dentry *show_all_errors_dent __read_mostly;
142static struct dentry *show_num_errors_dent __read_mostly;
143static struct dentry *num_free_entries_dent __read_mostly;
144static struct dentry *min_free_entries_dent __read_mostly;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200145static struct dentry *filter_dent __read_mostly;
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100146
Joerg Roedel2e507d82009-05-22 18:24:20 +0200147/* per-driver filter related state */
148
149#define NAME_MAX_LEN 64
150
151static char current_driver_name[NAME_MAX_LEN] __read_mostly;
152static struct device_driver *current_driver __read_mostly;
153
154static DEFINE_RWLOCK(driver_name_lock);
Joerg Roedel30dfa902009-01-09 12:34:49 +0100155
Shuah Khan6c9c6d62012-10-08 11:08:06 -0600156static const char *const maperr2str[] = {
157 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
158 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
159 [MAP_ERR_CHECKED] = "dma map error checked",
160};
161
Niklas Söderlund0e74b342016-08-10 13:22:15 +0200162static const char *type2name[5] = { "single", "page",
163 "scather-gather", "coherent",
164 "resource" };
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100165
166static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
167 "DMA_FROM_DEVICE", "DMA_NONE" };
168
169/*
170 * The access to some variables in this macro is racy. We can't use atomic_t
171 * here because all these variables are exported to debugfs. Some of them even
172 * writeable. This is also the reason why a lock won't help much. But anyway,
173 * the races are no big deal. Here is why:
174 *
175 * error_count: the addition is racy, but the worst thing that can happen is
176 * that we don't count some errors
177 * show_num_errors: the subtraction is racy. Also no big deal because in
178 * worst case this will result in one warning more in the
179 * system log than the user configured. This variable is
180 * writeable via debugfs.
181 */
David Woodhouse6c132d12009-01-19 16:52:39 +0100182static inline void dump_entry_trace(struct dma_debug_entry *entry)
183{
184#ifdef CONFIG_STACKTRACE
185 if (entry) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200186 pr_warning("Mapped at:\n");
David Woodhouse6c132d12009-01-19 16:52:39 +0100187 print_stack_trace(&entry->stacktrace, 0);
188 }
189#endif
190}
191
Joerg Roedel2e507d82009-05-22 18:24:20 +0200192static bool driver_filter(struct device *dev)
193{
Joerg Roedel0bf84122009-06-08 15:53:46 +0200194 struct device_driver *drv;
195 unsigned long flags;
196 bool ret;
197
Joerg Roedel2e507d82009-05-22 18:24:20 +0200198 /* driver filter off */
199 if (likely(!current_driver_name[0]))
200 return true;
201
202 /* driver filter on and initialized */
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400203 if (current_driver && dev && dev->driver == current_driver)
Joerg Roedel2e507d82009-05-22 18:24:20 +0200204 return true;
205
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400206 /* driver filter on, but we can't filter on a NULL device... */
207 if (!dev)
208 return false;
209
Joerg Roedel0bf84122009-06-08 15:53:46 +0200210 if (current_driver || !current_driver_name[0])
211 return false;
212
Joerg Roedel2e507d82009-05-22 18:24:20 +0200213 /* driver filter on but not yet initialized */
Alan Sternf3ff9242012-01-24 13:35:24 -0500214 drv = dev->driver;
Joerg Roedel0bf84122009-06-08 15:53:46 +0200215 if (!drv)
216 return false;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200217
Joerg Roedel0bf84122009-06-08 15:53:46 +0200218 /* lock to protect against change of current_driver_name */
219 read_lock_irqsave(&driver_name_lock, flags);
Joerg Roedel2e507d82009-05-22 18:24:20 +0200220
Joerg Roedel0bf84122009-06-08 15:53:46 +0200221 ret = false;
222 if (drv->name &&
223 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
224 current_driver = drv;
225 ret = true;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200226 }
227
Joerg Roedel0bf84122009-06-08 15:53:46 +0200228 read_unlock_irqrestore(&driver_name_lock, flags);
Joerg Roedel0bf84122009-06-08 15:53:46 +0200229
230 return ret;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200231}
232
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400233#define err_printk(dev, entry, format, arg...) do { \
234 error_count += 1; \
235 if (driver_filter(dev) && \
236 (show_all_errors || show_num_errors > 0)) { \
237 WARN(1, "%s %s: " format, \
238 dev ? dev_driver_string(dev) : "NULL", \
239 dev ? dev_name(dev) : "NULL", ## arg); \
240 dump_entry_trace(entry); \
241 } \
242 if (!show_all_errors && show_num_errors > 0) \
243 show_num_errors -= 1; \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100244 } while (0);
245
Joerg Roedel30dfa902009-01-09 12:34:49 +0100246/*
247 * Hash related functions
248 *
249 * Every DMA-API request is saved into a struct dma_debug_entry. To
250 * have quick access to these structs they are stored into a hash.
251 */
252static int hash_fn(struct dma_debug_entry *entry)
253{
254 /*
255 * Hash function is based on the dma address.
256 * We use bits 20-27 here as the index into the hash
257 */
258 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
259}
260
261/*
262 * Request exclusive access to a hash bucket for a given dma_debug_entry.
263 */
264static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
265 unsigned long *flags)
Stephen Boydd5dfc802016-07-26 15:21:08 -0700266 __acquires(&dma_entry_hash[idx].lock)
Joerg Roedel30dfa902009-01-09 12:34:49 +0100267{
268 int idx = hash_fn(entry);
269 unsigned long __flags;
270
271 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
272 *flags = __flags;
273 return &dma_entry_hash[idx];
274}
275
276/*
277 * Give up exclusive access to the hash bucket
278 */
279static void put_hash_bucket(struct hash_bucket *bucket,
280 unsigned long *flags)
Stephen Boydd5dfc802016-07-26 15:21:08 -0700281 __releases(&bucket->lock)
Joerg Roedel30dfa902009-01-09 12:34:49 +0100282{
283 unsigned long __flags = *flags;
284
285 spin_unlock_irqrestore(&bucket->lock, __flags);
286}
287
Neil Hormanc6a21d02011-08-08 15:13:54 -0400288static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
289{
Thomas Jarosch91ec37c2011-11-17 20:31:02 +0100290 return ((a->dev_addr == b->dev_addr) &&
Neil Hormanc6a21d02011-08-08 15:13:54 -0400291 (a->dev == b->dev)) ? true : false;
292}
293
294static bool containing_match(struct dma_debug_entry *a,
295 struct dma_debug_entry *b)
296{
297 if (a->dev != b->dev)
298 return false;
299
300 if ((b->dev_addr <= a->dev_addr) &&
301 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
302 return true;
303
304 return false;
305}
306
Joerg Roedel30dfa902009-01-09 12:34:49 +0100307/*
308 * Search a given entry in the hash bucket list
309 */
Neil Hormanc6a21d02011-08-08 15:13:54 -0400310static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
311 struct dma_debug_entry *ref,
312 match_fn match)
Joerg Roedel30dfa902009-01-09 12:34:49 +0100313{
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200314 struct dma_debug_entry *entry, *ret = NULL;
Ming Leife73fbe2012-10-19 13:57:01 -0700315 int matches = 0, match_lvl, last_lvl = -1;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100316
317 list_for_each_entry(entry, &bucket->list, list) {
Neil Hormanc6a21d02011-08-08 15:13:54 -0400318 if (!match(ref, entry))
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200319 continue;
320
321 /*
322 * Some drivers map the same physical address multiple
323 * times. Without a hardware IOMMU this results in the
324 * same device addresses being put into the dma-debug
325 * hash multiple times too. This can result in false
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200326 * positives being reported. Therefore we implement a
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200327 * best-fit algorithm here which returns the entry from
328 * the hash which fits best to the reference value
329 * instead of the first-fit.
330 */
331 matches += 1;
332 match_lvl = 0;
Joerg Roedele5e8c5b2009-06-11 10:03:42 +0200333 entry->size == ref->size ? ++match_lvl : 0;
334 entry->type == ref->type ? ++match_lvl : 0;
335 entry->direction == ref->direction ? ++match_lvl : 0;
336 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200337
Joerg Roedele5e8c5b2009-06-11 10:03:42 +0200338 if (match_lvl == 4) {
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200339 /* perfect-fit - return the result */
Joerg Roedel30dfa902009-01-09 12:34:49 +0100340 return entry;
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200341 } else if (match_lvl > last_lvl) {
342 /*
343 * We found an entry that fits better then the
Ming Leife73fbe2012-10-19 13:57:01 -0700344 * previous one or it is the 1st match.
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200345 */
346 last_lvl = match_lvl;
347 ret = entry;
348 }
Joerg Roedel30dfa902009-01-09 12:34:49 +0100349 }
350
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200351 /*
352 * If we have multiple matches but no perfect-fit, just return
353 * NULL.
354 */
355 ret = (matches == 1) ? ret : NULL;
356
357 return ret;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100358}
359
Neil Hormanc6a21d02011-08-08 15:13:54 -0400360static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
361 struct dma_debug_entry *ref)
362{
363 return __hash_bucket_find(bucket, ref, exact_match);
364}
365
366static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
367 struct dma_debug_entry *ref,
368 unsigned long *flags)
369{
370
371 unsigned int max_range = dma_get_max_seg_size(ref->dev);
372 struct dma_debug_entry *entry, index = *ref;
373 unsigned int range = 0;
374
375 while (range <= max_range) {
Sebastian Otta7a2c022015-04-16 12:43:25 -0700376 entry = __hash_bucket_find(*bucket, ref, containing_match);
Neil Hormanc6a21d02011-08-08 15:13:54 -0400377
378 if (entry)
379 return entry;
380
381 /*
382 * Nothing found, go back a hash bucket
383 */
384 put_hash_bucket(*bucket, flags);
385 range += (1 << HASH_FN_SHIFT);
386 index.dev_addr -= (1 << HASH_FN_SHIFT);
387 *bucket = get_hash_bucket(&index, flags);
388 }
389
390 return NULL;
391}
392
Joerg Roedel30dfa902009-01-09 12:34:49 +0100393/*
394 * Add an entry to a hash bucket
395 */
396static void hash_bucket_add(struct hash_bucket *bucket,
397 struct dma_debug_entry *entry)
398{
399 list_add_tail(&entry->list, &bucket->list);
400}
401
402/*
403 * Remove entry from a hash bucket list
404 */
405static void hash_bucket_del(struct dma_debug_entry *entry)
406{
407 list_del(&entry->list);
408}
409
Dan Williams0abdd7a2014-01-21 15:48:12 -0800410static unsigned long long phys_addr(struct dma_debug_entry *entry)
411{
Niklas Söderlund0e74b342016-08-10 13:22:15 +0200412 if (entry->type == dma_debug_resource)
413 return __pfn_to_phys(entry->pfn) + entry->offset;
414
Dan Williams0abdd7a2014-01-21 15:48:12 -0800415 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
416}
417
Joerg Roedel30dfa902009-01-09 12:34:49 +0100418/*
David Woodhouseac26c182009-02-12 16:19:13 +0100419 * Dump mapping entries for debugging purposes
420 */
421void debug_dma_dump_mappings(struct device *dev)
422{
423 int idx;
424
425 for (idx = 0; idx < HASH_SIZE; idx++) {
426 struct hash_bucket *bucket = &dma_entry_hash[idx];
427 struct dma_debug_entry *entry;
428 unsigned long flags;
429
430 spin_lock_irqsave(&bucket->lock, flags);
431
432 list_for_each_entry(entry, &bucket->list, list) {
433 if (!dev || dev == entry->dev) {
434 dev_info(entry->dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -0800435 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
David Woodhouseac26c182009-02-12 16:19:13 +0100436 type2name[entry->type], idx,
Dan Williams0abdd7a2014-01-21 15:48:12 -0800437 phys_addr(entry), entry->pfn,
David Woodhouseac26c182009-02-12 16:19:13 +0100438 entry->dev_addr, entry->size,
Shuah Khan6c9c6d62012-10-08 11:08:06 -0600439 dir2name[entry->direction],
440 maperr2str[entry->map_err_type]);
David Woodhouseac26c182009-02-12 16:19:13 +0100441 }
442 }
443
444 spin_unlock_irqrestore(&bucket->lock, flags);
445 }
446}
David Woodhouseac26c182009-02-12 16:19:13 +0100447
448/*
Dan Williams3b7a6412014-03-03 15:38:21 -0800449 * For each mapping (initial cacheline in the case of
450 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
451 * scatterlist, or the cacheline specified in dma_map_single) insert
452 * into this tree using the cacheline as the key. At
Dan Williams0abdd7a2014-01-21 15:48:12 -0800453 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
Dan Williams3b7a6412014-03-03 15:38:21 -0800454 * the entry already exists at insertion time add a tag as a reference
Dan Williams0abdd7a2014-01-21 15:48:12 -0800455 * count for the overlapping mappings. For now, the overlap tracking
Dan Williams3b7a6412014-03-03 15:38:21 -0800456 * just ensures that 'unmaps' balance 'maps' before marking the
457 * cacheline idle, but we should also be flagging overlaps as an API
458 * violation.
Dan Williams0abdd7a2014-01-21 15:48:12 -0800459 *
460 * Memory usage is mostly constrained by the maximum number of available
461 * dma-debug entries in that we need a free dma_debug_entry before
Dan Williams3b7a6412014-03-03 15:38:21 -0800462 * inserting into the tree. In the case of dma_map_page and
463 * dma_alloc_coherent there is only one dma_debug_entry and one
464 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
465 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
466 * entries into the tree.
Dan Williams0abdd7a2014-01-21 15:48:12 -0800467 *
468 * At any time debug_dma_assert_idle() can be called to trigger a
Dan Williams3b7a6412014-03-03 15:38:21 -0800469 * warning if any cachelines in the given page are in the active set.
Dan Williams0abdd7a2014-01-21 15:48:12 -0800470 */
Dan Williams3b7a6412014-03-03 15:38:21 -0800471static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800472static DEFINE_SPINLOCK(radix_lock);
Dan Williams3b7a6412014-03-03 15:38:21 -0800473#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
474#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
475#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800476
Dan Williams3b7a6412014-03-03 15:38:21 -0800477static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
478{
479 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
480 (entry->offset >> L1_CACHE_SHIFT);
481}
482
483static int active_cacheline_read_overlap(phys_addr_t cln)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800484{
485 int overlap = 0, i;
486
487 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
Dan Williams3b7a6412014-03-03 15:38:21 -0800488 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
Dan Williams0abdd7a2014-01-21 15:48:12 -0800489 overlap |= 1 << i;
490 return overlap;
491}
492
Dan Williams3b7a6412014-03-03 15:38:21 -0800493static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800494{
495 int i;
496
Dan Williams3b7a6412014-03-03 15:38:21 -0800497 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
Dan Williams59f2e7d2014-01-29 14:05:53 -0800498 return overlap;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800499
500 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
501 if (overlap & 1 << i)
Dan Williams3b7a6412014-03-03 15:38:21 -0800502 radix_tree_tag_set(&dma_active_cacheline, cln, i);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800503 else
Dan Williams3b7a6412014-03-03 15:38:21 -0800504 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800505
506 return overlap;
507}
508
Dan Williams3b7a6412014-03-03 15:38:21 -0800509static void active_cacheline_inc_overlap(phys_addr_t cln)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800510{
Dan Williams3b7a6412014-03-03 15:38:21 -0800511 int overlap = active_cacheline_read_overlap(cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800512
Dan Williams3b7a6412014-03-03 15:38:21 -0800513 overlap = active_cacheline_set_overlap(cln, ++overlap);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800514
515 /* If we overflowed the overlap counter then we're potentially
516 * leaking dma-mappings. Otherwise, if maps and unmaps are
517 * balanced then this overflow may cause false negatives in
Dan Williams3b7a6412014-03-03 15:38:21 -0800518 * debug_dma_assert_idle() as the cacheline may be marked idle
Dan Williams0abdd7a2014-01-21 15:48:12 -0800519 * prematurely.
520 */
Dan Williams3b7a6412014-03-03 15:38:21 -0800521 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
522 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
523 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800524}
525
Dan Williams3b7a6412014-03-03 15:38:21 -0800526static int active_cacheline_dec_overlap(phys_addr_t cln)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800527{
Dan Williams3b7a6412014-03-03 15:38:21 -0800528 int overlap = active_cacheline_read_overlap(cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800529
Dan Williams3b7a6412014-03-03 15:38:21 -0800530 return active_cacheline_set_overlap(cln, --overlap);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800531}
532
Dan Williams3b7a6412014-03-03 15:38:21 -0800533static int active_cacheline_insert(struct dma_debug_entry *entry)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800534{
Dan Williams3b7a6412014-03-03 15:38:21 -0800535 phys_addr_t cln = to_cacheline_number(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800536 unsigned long flags;
537 int rc;
538
Dan Williams3b7a6412014-03-03 15:38:21 -0800539 /* If the device is not writing memory then we don't have any
540 * concerns about the cpu consuming stale data. This mitigates
541 * legitimate usages of overlapping mappings.
542 */
543 if (entry->direction == DMA_TO_DEVICE)
544 return 0;
545
Dan Williams0abdd7a2014-01-21 15:48:12 -0800546 spin_lock_irqsave(&radix_lock, flags);
Dan Williams3b7a6412014-03-03 15:38:21 -0800547 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800548 if (rc == -EEXIST)
Dan Williams3b7a6412014-03-03 15:38:21 -0800549 active_cacheline_inc_overlap(cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800550 spin_unlock_irqrestore(&radix_lock, flags);
551
552 return rc;
553}
554
Dan Williams3b7a6412014-03-03 15:38:21 -0800555static void active_cacheline_remove(struct dma_debug_entry *entry)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800556{
Dan Williams3b7a6412014-03-03 15:38:21 -0800557 phys_addr_t cln = to_cacheline_number(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800558 unsigned long flags;
559
Dan Williams3b7a6412014-03-03 15:38:21 -0800560 /* ...mirror the insert case */
561 if (entry->direction == DMA_TO_DEVICE)
562 return;
563
Dan Williams0abdd7a2014-01-21 15:48:12 -0800564 spin_lock_irqsave(&radix_lock, flags);
Dan Williams59f2e7d2014-01-29 14:05:53 -0800565 /* since we are counting overlaps the final put of the
Dan Williams3b7a6412014-03-03 15:38:21 -0800566 * cacheline will occur when the overlap count is 0.
567 * active_cacheline_dec_overlap() returns -1 in that case
Dan Williams59f2e7d2014-01-29 14:05:53 -0800568 */
Dan Williams3b7a6412014-03-03 15:38:21 -0800569 if (active_cacheline_dec_overlap(cln) < 0)
570 radix_tree_delete(&dma_active_cacheline, cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800571 spin_unlock_irqrestore(&radix_lock, flags);
572}
573
574/**
575 * debug_dma_assert_idle() - assert that a page is not undergoing dma
Dan Williams3b7a6412014-03-03 15:38:21 -0800576 * @page: page to lookup in the dma_active_cacheline tree
Dan Williams0abdd7a2014-01-21 15:48:12 -0800577 *
578 * Place a call to this routine in cases where the cpu touching the page
579 * before the dma completes (page is dma_unmapped) will lead to data
580 * corruption.
581 */
582void debug_dma_assert_idle(struct page *page)
583{
Dan Williams3b7a6412014-03-03 15:38:21 -0800584 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
585 struct dma_debug_entry *entry = NULL;
586 void **results = (void **) &ents;
587 unsigned int nents, i;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800588 unsigned long flags;
Dan Williams3b7a6412014-03-03 15:38:21 -0800589 phys_addr_t cln;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800590
Haggai Eranc9d120b2015-07-17 16:24:06 -0700591 if (dma_debug_disabled())
592 return;
593
Dan Williams0abdd7a2014-01-21 15:48:12 -0800594 if (!page)
595 return;
596
Dan Williams3b7a6412014-03-03 15:38:21 -0800597 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800598 spin_lock_irqsave(&radix_lock, flags);
Dan Williams3b7a6412014-03-03 15:38:21 -0800599 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
600 CACHELINES_PER_PAGE);
601 for (i = 0; i < nents; i++) {
602 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
603
604 if (ent_cln == cln) {
605 entry = ents[i];
606 break;
607 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
608 break;
609 }
Dan Williams0abdd7a2014-01-21 15:48:12 -0800610 spin_unlock_irqrestore(&radix_lock, flags);
611
612 if (!entry)
613 return;
614
Dan Williams3b7a6412014-03-03 15:38:21 -0800615 cln = to_cacheline_number(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800616 err_printk(entry->dev, entry,
Dan Williams3b7a6412014-03-03 15:38:21 -0800617 "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
618 &cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800619}
620
621/*
Joerg Roedel30dfa902009-01-09 12:34:49 +0100622 * Wrapper function for adding an entry to the hash.
623 * This function takes care of locking itself.
624 */
625static void add_dma_entry(struct dma_debug_entry *entry)
626{
627 struct hash_bucket *bucket;
628 unsigned long flags;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800629 int rc;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100630
631 bucket = get_hash_bucket(entry, &flags);
632 hash_bucket_add(bucket, entry);
633 put_hash_bucket(bucket, &flags);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800634
Dan Williams3b7a6412014-03-03 15:38:21 -0800635 rc = active_cacheline_insert(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800636 if (rc == -ENOMEM) {
Dan Williams3b7a6412014-03-03 15:38:21 -0800637 pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
Dan Williams0abdd7a2014-01-21 15:48:12 -0800638 global_disable = true;
639 }
640
641 /* TODO: report -EEXIST errors here as overlapping mappings are
642 * not supported by the DMA API
643 */
Joerg Roedel30dfa902009-01-09 12:34:49 +0100644}
645
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900646static struct dma_debug_entry *__dma_entry_alloc(void)
647{
648 struct dma_debug_entry *entry;
649
650 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
651 list_del(&entry->list);
652 memset(entry, 0, sizeof(*entry));
653
654 num_free_entries -= 1;
655 if (num_free_entries < min_free_entries)
656 min_free_entries = num_free_entries;
657
658 return entry;
659}
660
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100661/* struct dma_entry allocator
662 *
663 * The next two functions implement the allocator for
664 * struct dma_debug_entries.
665 */
666static struct dma_debug_entry *dma_entry_alloc(void)
667{
Jakub Kicinski29cdd4e2012-04-04 03:19:10 +0200668 struct dma_debug_entry *entry;
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100669 unsigned long flags;
670
671 spin_lock_irqsave(&free_entries_lock, flags);
672
673 if (list_empty(&free_entries)) {
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100674 global_disable = true;
Jakub Kicinski29cdd4e2012-04-04 03:19:10 +0200675 spin_unlock_irqrestore(&free_entries_lock, flags);
Ville Syrjälä3017cd62016-05-26 15:16:25 -0700676 pr_err("DMA-API: debugging out of memory - disabling\n");
Jakub Kicinski29cdd4e2012-04-04 03:19:10 +0200677 return NULL;
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100678 }
679
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900680 entry = __dma_entry_alloc();
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100681
Jakub Kicinski29cdd4e2012-04-04 03:19:10 +0200682 spin_unlock_irqrestore(&free_entries_lock, flags);
683
David Woodhouse6c132d12009-01-19 16:52:39 +0100684#ifdef CONFIG_STACKTRACE
685 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
686 entry->stacktrace.entries = entry->st_entries;
687 entry->stacktrace.skip = 2;
688 save_stack_trace(&entry->stacktrace);
689#endif
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100690
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100691 return entry;
692}
693
694static void dma_entry_free(struct dma_debug_entry *entry)
695{
696 unsigned long flags;
697
Dan Williams3b7a6412014-03-03 15:38:21 -0800698 active_cacheline_remove(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800699
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100700 /*
701 * add to beginning of the list - this way the entries are
702 * more likely cache hot when they are reallocated.
703 */
704 spin_lock_irqsave(&free_entries_lock, flags);
705 list_add(&entry->list, &free_entries);
706 num_free_entries += 1;
707 spin_unlock_irqrestore(&free_entries_lock, flags);
708}
709
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900710int dma_debug_resize_entries(u32 num_entries)
711{
712 int i, delta, ret = 0;
713 unsigned long flags;
714 struct dma_debug_entry *entry;
715 LIST_HEAD(tmp);
716
717 spin_lock_irqsave(&free_entries_lock, flags);
718
719 if (nr_total_entries < num_entries) {
720 delta = num_entries - nr_total_entries;
721
722 spin_unlock_irqrestore(&free_entries_lock, flags);
723
724 for (i = 0; i < delta; i++) {
725 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
726 if (!entry)
727 break;
728
729 list_add_tail(&entry->list, &tmp);
730 }
731
732 spin_lock_irqsave(&free_entries_lock, flags);
733
734 list_splice(&tmp, &free_entries);
735 nr_total_entries += i;
736 num_free_entries += i;
737 } else {
738 delta = nr_total_entries - num_entries;
739
740 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
741 entry = __dma_entry_alloc();
742 kfree(entry);
743 }
744
745 nr_total_entries -= i;
746 }
747
748 if (nr_total_entries != num_entries)
749 ret = 1;
750
751 spin_unlock_irqrestore(&free_entries_lock, flags);
752
753 return ret;
754}
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900755
Joerg Roedel6bf07872009-01-09 12:54:42 +0100756/*
757 * DMA-API debugging init code
758 *
759 * The init code does two things:
760 * 1. Initialize core data structures
761 * 2. Preallocate a given number of dma_debug_entry structs
762 */
763
764static int prealloc_memory(u32 num_entries)
765{
766 struct dma_debug_entry *entry, *next_entry;
767 int i;
768
769 for (i = 0; i < num_entries; ++i) {
770 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
771 if (!entry)
772 goto out_err;
773
774 list_add_tail(&entry->list, &free_entries);
775 }
776
777 num_free_entries = num_entries;
778 min_free_entries = num_entries;
779
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200780 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
Joerg Roedel6bf07872009-01-09 12:54:42 +0100781
782 return 0;
783
784out_err:
785
786 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
787 list_del(&entry->list);
788 kfree(entry);
789 }
790
791 return -ENOMEM;
792}
793
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200794static ssize_t filter_read(struct file *file, char __user *user_buf,
795 size_t count, loff_t *ppos)
796{
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200797 char buf[NAME_MAX_LEN + 1];
Joerg Roedelc17e2cf2009-06-08 15:19:29 +0200798 unsigned long flags;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200799 int len;
800
801 if (!current_driver_name[0])
802 return 0;
803
804 /*
805 * We can't copy to userspace directly because current_driver_name can
806 * only be read under the driver_name_lock with irqs disabled. So
807 * create a temporary copy first.
808 */
809 read_lock_irqsave(&driver_name_lock, flags);
810 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
811 read_unlock_irqrestore(&driver_name_lock, flags);
812
813 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
814}
815
816static ssize_t filter_write(struct file *file, const char __user *userbuf,
817 size_t count, loff_t *ppos)
818{
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200819 char buf[NAME_MAX_LEN];
Joerg Roedelc17e2cf2009-06-08 15:19:29 +0200820 unsigned long flags;
821 size_t len;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200822 int i;
823
824 /*
825 * We can't copy from userspace directly. Access to
826 * current_driver_name is protected with a write_lock with irqs
827 * disabled. Since copy_from_user can fault and may sleep we
828 * need to copy to temporary buffer first
829 */
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200830 len = min(count, (size_t)(NAME_MAX_LEN - 1));
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200831 if (copy_from_user(buf, userbuf, len))
832 return -EFAULT;
833
834 buf[len] = 0;
835
836 write_lock_irqsave(&driver_name_lock, flags);
837
Joerg Roedel312325092009-06-08 15:07:08 +0200838 /*
839 * Now handle the string we got from userspace very carefully.
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200840 * The rules are:
841 * - only use the first token we got
842 * - token delimiter is everything looking like a space
843 * character (' ', '\n', '\t' ...)
844 *
845 */
846 if (!isalnum(buf[0])) {
847 /*
Joerg Roedel312325092009-06-08 15:07:08 +0200848 * If the first character userspace gave us is not
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200849 * alphanumerical then assume the filter should be
850 * switched off.
851 */
852 if (current_driver_name[0])
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200853 pr_info("DMA-API: switching off dma-debug driver filter\n");
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200854 current_driver_name[0] = 0;
855 current_driver = NULL;
856 goto out_unlock;
857 }
858
859 /*
860 * Now parse out the first token and use it as the name for the
861 * driver to filter for.
862 */
Dan Carpenter39a37ce2010-04-06 19:45:12 +0300863 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200864 current_driver_name[i] = buf[i];
865 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
866 break;
867 }
868 current_driver_name[i] = 0;
869 current_driver = NULL;
870
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200871 pr_info("DMA-API: enable driver filter for driver [%s]\n",
872 current_driver_name);
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200873
874out_unlock:
875 write_unlock_irqrestore(&driver_name_lock, flags);
876
877 return count;
878}
879
Thiago Farinaaeb583d2010-01-18 18:57:33 -0500880static const struct file_operations filter_fops = {
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200881 .read = filter_read,
882 .write = filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200883 .llseek = default_llseek,
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200884};
885
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100886static int dma_debug_fs_init(void)
887{
888 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
889 if (!dma_debug_dent) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200890 pr_err("DMA-API: can not create debugfs directory\n");
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100891 return -ENOMEM;
892 }
893
894 global_disable_dent = debugfs_create_bool("disabled", 0444,
895 dma_debug_dent,
Dan Carpenter68ee6d22012-06-27 12:08:55 +0300896 &global_disable);
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100897 if (!global_disable_dent)
898 goto out_err;
899
900 error_count_dent = debugfs_create_u32("error_count", 0444,
901 dma_debug_dent, &error_count);
902 if (!error_count_dent)
903 goto out_err;
904
905 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
906 dma_debug_dent,
907 &show_all_errors);
908 if (!show_all_errors_dent)
909 goto out_err;
910
911 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
912 dma_debug_dent,
913 &show_num_errors);
914 if (!show_num_errors_dent)
915 goto out_err;
916
917 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
918 dma_debug_dent,
919 &num_free_entries);
920 if (!num_free_entries_dent)
921 goto out_err;
922
923 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
924 dma_debug_dent,
925 &min_free_entries);
926 if (!min_free_entries_dent)
927 goto out_err;
928
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200929 filter_dent = debugfs_create_file("driver_filter", 0644,
930 dma_debug_dent, NULL, &filter_fops);
931 if (!filter_dent)
932 goto out_err;
933
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100934 return 0;
935
936out_err:
937 debugfs_remove_recursive(dma_debug_dent);
938
939 return -ENOMEM;
940}
941
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400942static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
Joerg Roedeled888ae2009-05-22 17:16:04 +0200943{
944 struct dma_debug_entry *entry;
945 unsigned long flags;
946 int count = 0, i;
947
948 for (i = 0; i < HASH_SIZE; ++i) {
Pankaj Gupta6a5cd602017-05-03 14:51:28 -0700949 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200950 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400951 if (entry->dev == dev) {
Joerg Roedeled888ae2009-05-22 17:16:04 +0200952 count += 1;
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400953 *out_entry = entry;
954 }
Joerg Roedeled888ae2009-05-22 17:16:04 +0200955 }
Pankaj Gupta6a5cd602017-05-03 14:51:28 -0700956 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200957 }
958
959 return count;
960}
961
Ingo Molnara8fe9ea2009-12-31 15:16:23 +0100962static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
Joerg Roedeled888ae2009-05-22 17:16:04 +0200963{
964 struct device *dev = data;
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400965 struct dma_debug_entry *uninitialized_var(entry);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200966 int count;
967
Florian Fainelli01ce18b2014-12-10 15:41:23 -0800968 if (dma_debug_disabled())
Ingo Molnara8fe9ea2009-12-31 15:16:23 +0100969 return 0;
Joerg Roedeled888ae2009-05-22 17:16:04 +0200970
971 switch (action) {
972 case BUS_NOTIFY_UNBOUND_DRIVER:
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400973 count = device_dma_allocations(dev, &entry);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200974 if (count == 0)
975 break;
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400976 err_printk(dev, entry, "DMA-API: device driver has pending "
Joerg Roedeled888ae2009-05-22 17:16:04 +0200977 "DMA allocations while released from device "
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400978 "[count=%d]\n"
979 "One of leaked entries details: "
980 "[device address=0x%016llx] [size=%llu bytes] "
981 "[mapped with %s] [mapped as %s]\n",
982 count, entry->dev_addr, entry->size,
983 dir2name[entry->direction], type2name[entry->type]);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200984 break;
985 default:
986 break;
987 }
988
989 return 0;
990}
991
Joerg Roedel41531c82009-03-16 17:32:14 +0100992void dma_debug_add_bus(struct bus_type *bus)
993{
Joerg Roedeled888ae2009-05-22 17:16:04 +0200994 struct notifier_block *nb;
995
Florian Fainelli01ce18b2014-12-10 15:41:23 -0800996 if (dma_debug_disabled())
Shaun Ruffellf797d982009-12-17 18:00:36 -0600997 return;
998
Joerg Roedeled888ae2009-05-22 17:16:04 +0200999 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1000 if (nb == NULL) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001001 pr_err("dma_debug_add_bus: out of memory\n");
Joerg Roedeled888ae2009-05-22 17:16:04 +02001002 return;
1003 }
1004
1005 nb->notifier_call = dma_debug_device_change;
1006
1007 bus_register_notifier(bus, nb);
Joerg Roedel41531c82009-03-16 17:32:14 +01001008}
Joerg Roedel788dcfa2009-01-09 13:13:27 +01001009
Christoph Hellwig15b28bb2018-04-16 17:22:28 +02001010static int dma_debug_init(void)
Joerg Roedel6bf07872009-01-09 12:54:42 +01001011{
1012 int i;
1013
Florian Fainelli2ce8e7e2014-12-10 15:41:25 -08001014 /* Do not use dma_debug_initialized here, since we really want to be
1015 * called to set dma_debug_initialized
1016 */
1017 if (global_disable)
Christoph Hellwig15b28bb2018-04-16 17:22:28 +02001018 return 0;
Joerg Roedel6bf07872009-01-09 12:54:42 +01001019
1020 for (i = 0; i < HASH_SIZE; ++i) {
1021 INIT_LIST_HEAD(&dma_entry_hash[i].list);
Ingo Molnarb0a5b832009-06-16 16:11:14 +02001022 spin_lock_init(&dma_entry_hash[i].lock);
Joerg Roedel6bf07872009-01-09 12:54:42 +01001023 }
1024
Joerg Roedel788dcfa2009-01-09 13:13:27 +01001025 if (dma_debug_fs_init() != 0) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001026 pr_err("DMA-API: error creating debugfs entries - disabling\n");
Joerg Roedel788dcfa2009-01-09 13:13:27 +01001027 global_disable = true;
1028
Christoph Hellwig15b28bb2018-04-16 17:22:28 +02001029 return 0;
Joerg Roedel788dcfa2009-01-09 13:13:27 +01001030 }
1031
Christoph Hellwigbcebe322018-04-24 09:40:51 +02001032 if (prealloc_memory(nr_prealloc_entries) != 0) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001033 pr_err("DMA-API: debugging out of memory error - disabled\n");
Joerg Roedel6bf07872009-01-09 12:54:42 +01001034 global_disable = true;
1035
Christoph Hellwig15b28bb2018-04-16 17:22:28 +02001036 return 0;
Joerg Roedel6bf07872009-01-09 12:54:42 +01001037 }
1038
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +09001039 nr_total_entries = num_free_entries;
1040
Florian Fainelli2ce8e7e2014-12-10 15:41:25 -08001041 dma_debug_initialized = true;
1042
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001043 pr_info("DMA-API: debugging enabled by kernel config\n");
Christoph Hellwig15b28bb2018-04-16 17:22:28 +02001044 return 0;
Joerg Roedel6bf07872009-01-09 12:54:42 +01001045}
Christoph Hellwig15b28bb2018-04-16 17:22:28 +02001046core_initcall(dma_debug_init);
Joerg Roedel6bf07872009-01-09 12:54:42 +01001047
Joerg Roedel59d3daa2009-01-09 13:01:56 +01001048static __init int dma_debug_cmdline(char *str)
1049{
1050 if (!str)
1051 return -EINVAL;
1052
1053 if (strncmp(str, "off", 3) == 0) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001054 pr_info("DMA-API: debugging disabled on kernel command line\n");
Joerg Roedel59d3daa2009-01-09 13:01:56 +01001055 global_disable = true;
1056 }
1057
1058 return 0;
1059}
1060
1061static __init int dma_debug_entries_cmdline(char *str)
1062{
Joerg Roedel59d3daa2009-01-09 13:01:56 +01001063 if (!str)
1064 return -EINVAL;
Christoph Hellwigbcebe322018-04-24 09:40:51 +02001065 if (!get_option(&str, &nr_prealloc_entries))
1066 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
Joerg Roedel59d3daa2009-01-09 13:01:56 +01001067 return 0;
1068}
1069
1070__setup("dma_debug=", dma_debug_cmdline);
1071__setup("dma_debug_entries=", dma_debug_entries_cmdline);
1072
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001073static void check_unmap(struct dma_debug_entry *ref)
1074{
1075 struct dma_debug_entry *entry;
1076 struct hash_bucket *bucket;
1077 unsigned long flags;
1078
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001079 bucket = get_hash_bucket(ref, &flags);
Neil Hormanc6a21d02011-08-08 15:13:54 -04001080 entry = bucket_find_exact(bucket, ref);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001081
1082 if (!entry) {
Alexander Duyck8d640a52013-03-22 15:04:48 -07001083 /* must drop lock before calling dma_mapping_error */
1084 put_hash_bucket(bucket, &flags);
1085
Shuah Khanbfe0fb02012-11-03 17:00:07 -06001086 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1087 err_printk(ref->dev, NULL,
Alexander Duyck8d640a52013-03-22 15:04:48 -07001088 "DMA-API: device driver tries to free an "
1089 "invalid DMA memory address\n");
1090 } else {
1091 err_printk(ref->dev, NULL,
1092 "DMA-API: device driver tries to free DMA "
1093 "memory it has not allocated [device "
1094 "address=0x%016llx] [size=%llu bytes]\n",
1095 ref->dev_addr, ref->size);
Shuah Khanbfe0fb02012-11-03 17:00:07 -06001096 }
Alexander Duyck8d640a52013-03-22 15:04:48 -07001097 return;
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001098 }
1099
1100 if (ref->size != entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001101 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001102 "DMA memory with different size "
1103 "[device address=0x%016llx] [map size=%llu bytes] "
1104 "[unmap size=%llu bytes]\n",
1105 ref->dev_addr, entry->size, ref->size);
1106 }
1107
1108 if (ref->type != entry->type) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001109 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001110 "DMA memory with wrong function "
1111 "[device address=0x%016llx] [size=%llu bytes] "
1112 "[mapped as %s] [unmapped as %s]\n",
1113 ref->dev_addr, ref->size,
1114 type2name[entry->type], type2name[ref->type]);
1115 } else if ((entry->type == dma_debug_coherent) &&
Dan Williams0abdd7a2014-01-21 15:48:12 -08001116 (phys_addr(ref) != phys_addr(entry))) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001117 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001118 "DMA memory with different CPU address "
1119 "[device address=0x%016llx] [size=%llu bytes] "
Joerg Roedel59a40e702009-10-29 16:25:50 +01001120 "[cpu alloc address=0x%016llx] "
1121 "[cpu free address=0x%016llx]",
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001122 ref->dev_addr, ref->size,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001123 phys_addr(entry),
1124 phys_addr(ref));
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001125 }
1126
1127 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1128 ref->sg_call_ents != entry->sg_call_ents) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001129 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001130 "DMA sg list with different entry count "
1131 "[map count=%d] [unmap count=%d]\n",
1132 entry->sg_call_ents, ref->sg_call_ents);
1133 }
1134
1135 /*
1136 * This may be no bug in reality - but most implementations of the
1137 * DMA API don't handle this properly, so check for it here
1138 */
1139 if (ref->direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001140 err_printk(ref->dev, entry, "DMA-API: device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001141 "DMA memory with different direction "
1142 "[device address=0x%016llx] [size=%llu bytes] "
1143 "[mapped with %s] [unmapped with %s]\n",
1144 ref->dev_addr, ref->size,
1145 dir2name[entry->direction],
1146 dir2name[ref->direction]);
1147 }
1148
Miles Chena5759b22017-02-22 15:40:09 -08001149 /*
1150 * Drivers should use dma_mapping_error() to check the returned
1151 * addresses of dma_map_single() and dma_map_page().
1152 * If not, print this warning message. See Documentation/DMA-API.txt.
1153 */
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001154 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1155 err_printk(ref->dev, entry,
1156 "DMA-API: device driver failed to check map error"
1157 "[device address=0x%016llx] [size=%llu bytes] "
1158 "[mapped as %s]",
1159 ref->dev_addr, ref->size,
1160 type2name[entry->type]);
1161 }
1162
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001163 hash_bucket_del(entry);
1164 dma_entry_free(entry);
1165
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001166 put_hash_bucket(bucket, &flags);
1167}
1168
Andy Lutomirskib4a0f532016-08-11 02:35:22 -07001169static void check_for_stack(struct device *dev,
1170 struct page *page, size_t offset)
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001171{
Andy Lutomirskib4a0f532016-08-11 02:35:22 -07001172 void *addr;
1173 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1174
1175 if (!stack_vm_area) {
1176 /* Stack is direct-mapped. */
1177 if (PageHighMem(page))
1178 return;
1179 addr = page_address(page) + offset;
1180 if (object_is_on_stack(addr))
1181 err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr);
1182 } else {
1183 /* Stack is vmalloced. */
1184 int i;
1185
1186 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1187 if (page != stack_vm_area->pages[i])
1188 continue;
1189
1190 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1191 err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr);
1192 break;
1193 }
1194 }
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001195}
1196
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001197static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001198{
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001199 unsigned long a1 = (unsigned long)addr;
1200 unsigned long b1 = a1 + len;
1201 unsigned long a2 = (unsigned long)start;
1202 unsigned long b2 = (unsigned long)end;
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001203
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001204 return !(b1 <= a2 || a1 >= b2);
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001205}
1206
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001207static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001208{
Laura Abbottea535e42016-01-14 15:16:50 -08001209 if (overlap(addr, len, _stext, _etext) ||
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001210 overlap(addr, len, __start_rodata, __end_rodata))
1211 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001212}
1213
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001214static void check_sync(struct device *dev,
1215 struct dma_debug_entry *ref,
1216 bool to_cpu)
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001217{
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001218 struct dma_debug_entry *entry;
1219 struct hash_bucket *bucket;
1220 unsigned long flags;
1221
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001222 bucket = get_hash_bucket(ref, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001223
Neil Hormanc6a21d02011-08-08 15:13:54 -04001224 entry = bucket_find_contain(&bucket, ref, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001225
1226 if (!entry) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001227 err_printk(dev, NULL, "DMA-API: device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001228 "to sync DMA memory it has not allocated "
1229 "[device address=0x%016llx] [size=%llu bytes]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001230 (unsigned long long)ref->dev_addr, ref->size);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001231 goto out;
1232 }
1233
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001234 if (ref->size > entry->size) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001235 err_printk(dev, entry, "DMA-API: device driver syncs"
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001236 " DMA memory outside allocated range "
1237 "[device address=0x%016llx] "
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001238 "[allocation size=%llu bytes] "
1239 "[sync offset+size=%llu]\n",
1240 entry->dev_addr, entry->size,
1241 ref->size);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001242 }
1243
Krzysztof Halasa42d53b42010-01-08 14:42:36 -08001244 if (entry->direction == DMA_BIDIRECTIONAL)
1245 goto out;
1246
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001247 if (ref->direction != entry->direction) {
David Woodhouse6c132d12009-01-19 16:52:39 +01001248 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001249 "DMA memory with different direction "
1250 "[device address=0x%016llx] [size=%llu bytes] "
1251 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001252 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001253 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001254 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001255 }
1256
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001257 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001258 !(ref->direction == DMA_TO_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +01001259 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001260 "device read-only DMA memory for cpu "
1261 "[device address=0x%016llx] [size=%llu bytes] "
1262 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001263 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001264 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001265 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001266
1267 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001268 !(ref->direction == DMA_FROM_DEVICE))
David Woodhouse6c132d12009-01-19 16:52:39 +01001269 err_printk(dev, entry, "DMA-API: device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001270 "device write-only DMA memory to device "
1271 "[device address=0x%016llx] [size=%llu bytes] "
1272 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001273 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001274 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001275 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001276
Robin Murphy7f830642015-11-06 16:32:55 -08001277 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1278 ref->sg_call_ents != entry->sg_call_ents) {
1279 err_printk(ref->dev, entry, "DMA-API: device driver syncs "
1280 "DMA sg list with different entry count "
1281 "[map count=%d] [sync count=%d]\n",
1282 entry->sg_call_ents, ref->sg_call_ents);
1283 }
1284
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001285out:
1286 put_hash_bucket(bucket, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001287}
1288
Robin Murphy78c47832018-05-21 12:35:13 +01001289static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1290{
1291#ifdef CONFIG_DMA_API_DEBUG_SG
1292 unsigned int max_seg = dma_get_max_seg_size(dev);
1293 u64 start, end, boundary = dma_get_seg_boundary(dev);
1294
1295 /*
1296 * Either the driver forgot to set dma_parms appropriately, or
1297 * whoever generated the list forgot to check them.
1298 */
1299 if (sg->length > max_seg)
1300 err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1301 sg->length, max_seg);
1302 /*
1303 * In some cases this could potentially be the DMA API
1304 * implementation's fault, but it would usually imply that
1305 * the scatterlist was built inappropriately to begin with.
1306 */
1307 start = sg_dma_address(sg);
1308 end = start + sg_dma_len(sg) - 1;
1309 if ((start ^ end) & ~boundary)
1310 err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1311 start, end, boundary);
1312#endif
1313}
1314
Joerg Roedelf62bc982009-01-09 14:14:49 +01001315void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1316 size_t size, int direction, dma_addr_t dma_addr,
1317 bool map_single)
1318{
1319 struct dma_debug_entry *entry;
1320
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001321 if (unlikely(dma_debug_disabled()))
Joerg Roedelf62bc982009-01-09 14:14:49 +01001322 return;
1323
Shuah Khanbfe0fb02012-11-03 17:00:07 -06001324 if (dma_mapping_error(dev, dma_addr))
Joerg Roedelf62bc982009-01-09 14:14:49 +01001325 return;
1326
1327 entry = dma_entry_alloc();
1328 if (!entry)
1329 return;
1330
1331 entry->dev = dev;
1332 entry->type = dma_debug_page;
Dan Williams0abdd7a2014-01-21 15:48:12 -08001333 entry->pfn = page_to_pfn(page);
1334 entry->offset = offset,
Joerg Roedelf62bc982009-01-09 14:14:49 +01001335 entry->dev_addr = dma_addr;
1336 entry->size = size;
1337 entry->direction = direction;
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001338 entry->map_err_type = MAP_ERR_NOT_CHECKED;
Joerg Roedelf62bc982009-01-09 14:14:49 +01001339
Joerg Roedel9537a482009-03-23 15:35:08 +01001340 if (map_single)
Joerg Roedelf62bc982009-01-09 14:14:49 +01001341 entry->type = dma_debug_single;
Joerg Roedel9537a482009-03-23 15:35:08 +01001342
Andy Lutomirskib4a0f532016-08-11 02:35:22 -07001343 check_for_stack(dev, page, offset);
1344
Joerg Roedel9537a482009-03-23 15:35:08 +01001345 if (!PageHighMem(page)) {
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001346 void *addr = page_address(page) + offset;
1347
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001348 check_for_illegal_area(dev, addr, size);
Joerg Roedelf62bc982009-01-09 14:14:49 +01001349 }
1350
1351 add_dma_entry(entry);
1352}
1353EXPORT_SYMBOL(debug_dma_map_page);
1354
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001355void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1356{
1357 struct dma_debug_entry ref;
1358 struct dma_debug_entry *entry;
1359 struct hash_bucket *bucket;
1360 unsigned long flags;
1361
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001362 if (unlikely(dma_debug_disabled()))
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001363 return;
1364
1365 ref.dev = dev;
1366 ref.dev_addr = dma_addr;
1367 bucket = get_hash_bucket(&ref, &flags);
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001368
Alexander Duyck96e7d7a2013-03-22 15:04:49 -07001369 list_for_each_entry(entry, &bucket->list, list) {
1370 if (!exact_match(&ref, entry))
1371 continue;
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001372
Alexander Duyck96e7d7a2013-03-22 15:04:49 -07001373 /*
1374 * The same physical address can be mapped multiple
1375 * times. Without a hardware IOMMU this results in the
1376 * same device addresses being put into the dma-debug
1377 * hash multiple times too. This can result in false
1378 * positives being reported. Therefore we implement a
1379 * best-fit algorithm here which updates the first entry
1380 * from the hash which fits the reference value and is
1381 * not currently listed as being checked.
1382 */
1383 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1384 entry->map_err_type = MAP_ERR_CHECKED;
1385 break;
1386 }
1387 }
1388
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001389 put_hash_bucket(bucket, &flags);
1390}
1391EXPORT_SYMBOL(debug_dma_mapping_error);
1392
Joerg Roedelf62bc982009-01-09 14:14:49 +01001393void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1394 size_t size, int direction, bool map_single)
1395{
1396 struct dma_debug_entry ref = {
1397 .type = dma_debug_page,
1398 .dev = dev,
1399 .dev_addr = addr,
1400 .size = size,
1401 .direction = direction,
1402 };
1403
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001404 if (unlikely(dma_debug_disabled()))
Joerg Roedelf62bc982009-01-09 14:14:49 +01001405 return;
1406
1407 if (map_single)
1408 ref.type = dma_debug_single;
1409
1410 check_unmap(&ref);
1411}
1412EXPORT_SYMBOL(debug_dma_unmap_page);
1413
Joerg Roedel972aa452009-01-09 14:19:54 +01001414void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1415 int nents, int mapped_ents, int direction)
1416{
1417 struct dma_debug_entry *entry;
1418 struct scatterlist *s;
1419 int i;
1420
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001421 if (unlikely(dma_debug_disabled()))
Joerg Roedel972aa452009-01-09 14:19:54 +01001422 return;
1423
1424 for_each_sg(sg, s, mapped_ents, i) {
1425 entry = dma_entry_alloc();
1426 if (!entry)
1427 return;
1428
1429 entry->type = dma_debug_sg;
1430 entry->dev = dev;
Dan Williams0abdd7a2014-01-21 15:48:12 -08001431 entry->pfn = page_to_pfn(sg_page(s));
1432 entry->offset = s->offset,
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001433 entry->size = sg_dma_len(s);
FUJITA Tomonori15aedea2009-05-27 09:43:01 +09001434 entry->dev_addr = sg_dma_address(s);
Joerg Roedel972aa452009-01-09 14:19:54 +01001435 entry->direction = direction;
1436 entry->sg_call_ents = nents;
1437 entry->sg_mapped_ents = mapped_ents;
1438
Andy Lutomirskib4a0f532016-08-11 02:35:22 -07001439 check_for_stack(dev, sg_page(s), s->offset);
1440
Joerg Roedel9537a482009-03-23 15:35:08 +01001441 if (!PageHighMem(sg_page(s))) {
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001442 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
Joerg Roedel9537a482009-03-23 15:35:08 +01001443 }
Joerg Roedel972aa452009-01-09 14:19:54 +01001444
Robin Murphy78c47832018-05-21 12:35:13 +01001445 check_sg_segment(dev, s);
1446
Joerg Roedel972aa452009-01-09 14:19:54 +01001447 add_dma_entry(entry);
1448 }
1449}
1450EXPORT_SYMBOL(debug_dma_map_sg);
1451
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001452static int get_nr_mapped_entries(struct device *dev,
1453 struct dma_debug_entry *ref)
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001454{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001455 struct dma_debug_entry *entry;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001456 struct hash_bucket *bucket;
1457 unsigned long flags;
Joerg Roedelc17e2cf2009-06-08 15:19:29 +02001458 int mapped_ents;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001459
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001460 bucket = get_hash_bucket(ref, &flags);
Neil Hormanc6a21d02011-08-08 15:13:54 -04001461 entry = bucket_find_exact(bucket, ref);
Joerg Roedelc17e2cf2009-06-08 15:19:29 +02001462 mapped_ents = 0;
1463
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001464 if (entry)
1465 mapped_ents = entry->sg_mapped_ents;
1466 put_hash_bucket(bucket, &flags);
1467
1468 return mapped_ents;
1469}
1470
Joerg Roedel972aa452009-01-09 14:19:54 +01001471void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1472 int nelems, int dir)
1473{
Joerg Roedel972aa452009-01-09 14:19:54 +01001474 struct scatterlist *s;
1475 int mapped_ents = 0, i;
Joerg Roedel972aa452009-01-09 14:19:54 +01001476
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001477 if (unlikely(dma_debug_disabled()))
Joerg Roedel972aa452009-01-09 14:19:54 +01001478 return;
1479
1480 for_each_sg(sglist, s, nelems, i) {
1481
1482 struct dma_debug_entry ref = {
1483 .type = dma_debug_sg,
1484 .dev = dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001485 .pfn = page_to_pfn(sg_page(s)),
1486 .offset = s->offset,
FUJITA Tomonori15aedea2009-05-27 09:43:01 +09001487 .dev_addr = sg_dma_address(s),
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001488 .size = sg_dma_len(s),
Joerg Roedel972aa452009-01-09 14:19:54 +01001489 .direction = dir,
Joerg Roedele5e8c5b2009-06-11 10:03:42 +02001490 .sg_call_ents = nelems,
Joerg Roedel972aa452009-01-09 14:19:54 +01001491 };
1492
1493 if (mapped_ents && i >= mapped_ents)
1494 break;
1495
Joerg Roedele5e8c5b2009-06-11 10:03:42 +02001496 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001497 mapped_ents = get_nr_mapped_entries(dev, &ref);
Joerg Roedel972aa452009-01-09 14:19:54 +01001498
1499 check_unmap(&ref);
1500 }
1501}
1502EXPORT_SYMBOL(debug_dma_unmap_sg);
1503
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001504void debug_dma_alloc_coherent(struct device *dev, size_t size,
1505 dma_addr_t dma_addr, void *virt)
1506{
1507 struct dma_debug_entry *entry;
1508
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001509 if (unlikely(dma_debug_disabled()))
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001510 return;
1511
1512 if (unlikely(virt == NULL))
1513 return;
1514
Miles Chenaf1da682018-02-22 19:22:20 +08001515 /* handle vmalloc and linear addresses */
1516 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001517 return;
1518
Miles Chenaf1da682018-02-22 19:22:20 +08001519 entry = dma_entry_alloc();
1520 if (!entry)
Miles Chen3aaabbf2017-11-17 15:26:19 -08001521 return;
1522
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001523 entry->type = dma_debug_coherent;
1524 entry->dev = dev;
Geliang Tange57d0552017-04-22 09:18:05 +08001525 entry->offset = offset_in_page(virt);
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001526 entry->size = size;
1527 entry->dev_addr = dma_addr;
1528 entry->direction = DMA_BIDIRECTIONAL;
1529
Miles Chen3aaabbf2017-11-17 15:26:19 -08001530 if (is_vmalloc_addr(virt))
1531 entry->pfn = vmalloc_to_pfn(virt);
1532 else
1533 entry->pfn = page_to_pfn(virt_to_page(virt));
1534
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001535 add_dma_entry(entry);
1536}
1537EXPORT_SYMBOL(debug_dma_alloc_coherent);
1538
1539void debug_dma_free_coherent(struct device *dev, size_t size,
1540 void *virt, dma_addr_t addr)
1541{
1542 struct dma_debug_entry ref = {
1543 .type = dma_debug_coherent,
1544 .dev = dev,
Geliang Tange57d0552017-04-22 09:18:05 +08001545 .offset = offset_in_page(virt),
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001546 .dev_addr = addr,
1547 .size = size,
1548 .direction = DMA_BIDIRECTIONAL,
1549 };
1550
Miles Chen3aaabbf2017-11-17 15:26:19 -08001551 /* handle vmalloc and linear addresses */
Miles Chenaf1da682018-02-22 19:22:20 +08001552 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
Miles Chen3aaabbf2017-11-17 15:26:19 -08001553 return;
1554
1555 if (is_vmalloc_addr(virt))
1556 ref.pfn = vmalloc_to_pfn(virt);
1557 else
1558 ref.pfn = page_to_pfn(virt_to_page(virt));
1559
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001560 if (unlikely(dma_debug_disabled()))
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001561 return;
1562
1563 check_unmap(&ref);
1564}
1565EXPORT_SYMBOL(debug_dma_free_coherent);
1566
Niklas Söderlund0e74b342016-08-10 13:22:15 +02001567void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1568 int direction, dma_addr_t dma_addr)
1569{
1570 struct dma_debug_entry *entry;
1571
1572 if (unlikely(dma_debug_disabled()))
1573 return;
1574
1575 entry = dma_entry_alloc();
1576 if (!entry)
1577 return;
1578
1579 entry->type = dma_debug_resource;
1580 entry->dev = dev;
Niklas Söderlund2e0cc302016-09-29 21:59:15 +02001581 entry->pfn = PHYS_PFN(addr);
Niklas Söderlund0e74b342016-08-10 13:22:15 +02001582 entry->offset = offset_in_page(addr);
1583 entry->size = size;
1584 entry->dev_addr = dma_addr;
1585 entry->direction = direction;
1586 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1587
1588 add_dma_entry(entry);
1589}
1590EXPORT_SYMBOL(debug_dma_map_resource);
1591
1592void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1593 size_t size, int direction)
1594{
1595 struct dma_debug_entry ref = {
1596 .type = dma_debug_resource,
1597 .dev = dev,
1598 .dev_addr = dma_addr,
1599 .size = size,
1600 .direction = direction,
1601 };
1602
1603 if (unlikely(dma_debug_disabled()))
1604 return;
1605
1606 check_unmap(&ref);
1607}
1608EXPORT_SYMBOL(debug_dma_unmap_resource);
1609
Joerg Roedelb9d23172009-01-09 14:43:04 +01001610void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1611 size_t size, int direction)
1612{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001613 struct dma_debug_entry ref;
1614
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001615 if (unlikely(dma_debug_disabled()))
Joerg Roedelb9d23172009-01-09 14:43:04 +01001616 return;
1617
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001618 ref.type = dma_debug_single;
1619 ref.dev = dev;
1620 ref.dev_addr = dma_handle;
1621 ref.size = size;
1622 ref.direction = direction;
1623 ref.sg_call_ents = 0;
1624
1625 check_sync(dev, &ref, true);
Joerg Roedelb9d23172009-01-09 14:43:04 +01001626}
1627EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1628
1629void debug_dma_sync_single_for_device(struct device *dev,
1630 dma_addr_t dma_handle, size_t size,
1631 int direction)
1632{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001633 struct dma_debug_entry ref;
1634
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001635 if (unlikely(dma_debug_disabled()))
Joerg Roedelb9d23172009-01-09 14:43:04 +01001636 return;
1637
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001638 ref.type = dma_debug_single;
1639 ref.dev = dev;
1640 ref.dev_addr = dma_handle;
1641 ref.size = size;
1642 ref.direction = direction;
1643 ref.sg_call_ents = 0;
1644
1645 check_sync(dev, &ref, false);
Joerg Roedelb9d23172009-01-09 14:43:04 +01001646}
1647EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1648
Joerg Roedel948408b2009-01-09 14:55:38 +01001649void debug_dma_sync_single_range_for_cpu(struct device *dev,
1650 dma_addr_t dma_handle,
1651 unsigned long offset, size_t size,
1652 int direction)
1653{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001654 struct dma_debug_entry ref;
1655
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001656 if (unlikely(dma_debug_disabled()))
Joerg Roedel948408b2009-01-09 14:55:38 +01001657 return;
1658
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001659 ref.type = dma_debug_single;
1660 ref.dev = dev;
1661 ref.dev_addr = dma_handle;
1662 ref.size = offset + size;
1663 ref.direction = direction;
1664 ref.sg_call_ents = 0;
1665
1666 check_sync(dev, &ref, true);
Joerg Roedel948408b2009-01-09 14:55:38 +01001667}
1668EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1669
1670void debug_dma_sync_single_range_for_device(struct device *dev,
1671 dma_addr_t dma_handle,
1672 unsigned long offset,
1673 size_t size, int direction)
1674{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001675 struct dma_debug_entry ref;
1676
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001677 if (unlikely(dma_debug_disabled()))
Joerg Roedel948408b2009-01-09 14:55:38 +01001678 return;
1679
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001680 ref.type = dma_debug_single;
1681 ref.dev = dev;
1682 ref.dev_addr = dma_handle;
1683 ref.size = offset + size;
1684 ref.direction = direction;
1685 ref.sg_call_ents = 0;
1686
1687 check_sync(dev, &ref, false);
Joerg Roedel948408b2009-01-09 14:55:38 +01001688}
1689EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1690
Joerg Roedela31fba52009-01-09 15:01:12 +01001691void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1692 int nelems, int direction)
1693{
1694 struct scatterlist *s;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001695 int mapped_ents = 0, i;
Joerg Roedela31fba52009-01-09 15:01:12 +01001696
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001697 if (unlikely(dma_debug_disabled()))
Joerg Roedela31fba52009-01-09 15:01:12 +01001698 return;
1699
1700 for_each_sg(sg, s, nelems, i) {
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001701
1702 struct dma_debug_entry ref = {
1703 .type = dma_debug_sg,
1704 .dev = dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001705 .pfn = page_to_pfn(sg_page(s)),
1706 .offset = s->offset,
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001707 .dev_addr = sg_dma_address(s),
1708 .size = sg_dma_len(s),
1709 .direction = direction,
1710 .sg_call_ents = nelems,
1711 };
1712
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001713 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001714 mapped_ents = get_nr_mapped_entries(dev, &ref);
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001715
1716 if (i >= mapped_ents)
1717 break;
1718
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001719 check_sync(dev, &ref, true);
Joerg Roedela31fba52009-01-09 15:01:12 +01001720 }
1721}
1722EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1723
1724void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1725 int nelems, int direction)
1726{
1727 struct scatterlist *s;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001728 int mapped_ents = 0, i;
Joerg Roedela31fba52009-01-09 15:01:12 +01001729
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001730 if (unlikely(dma_debug_disabled()))
Joerg Roedela31fba52009-01-09 15:01:12 +01001731 return;
1732
1733 for_each_sg(sg, s, nelems, i) {
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001734
1735 struct dma_debug_entry ref = {
1736 .type = dma_debug_sg,
1737 .dev = dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001738 .pfn = page_to_pfn(sg_page(s)),
1739 .offset = s->offset,
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001740 .dev_addr = sg_dma_address(s),
1741 .size = sg_dma_len(s),
1742 .direction = direction,
1743 .sg_call_ents = nelems,
1744 };
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001745 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001746 mapped_ents = get_nr_mapped_entries(dev, &ref);
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001747
1748 if (i >= mapped_ents)
1749 break;
1750
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001751 check_sync(dev, &ref, false);
Joerg Roedela31fba52009-01-09 15:01:12 +01001752 }
1753}
1754EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1755
Joerg Roedel1745de52009-05-22 21:49:51 +02001756static int __init dma_debug_driver_setup(char *str)
1757{
1758 int i;
1759
1760 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1761 current_driver_name[i] = *str;
1762 if (*str == 0)
1763 break;
1764 }
1765
1766 if (current_driver_name[0])
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001767 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1768 current_driver_name);
Joerg Roedel1745de52009-05-22 21:49:51 +02001769
1770
1771 return 1;
1772}
1773__setup("dma_debug_driver=", dma_debug_driver_setup);