blob: 2ede46308024a233d08da3d71174e85f3aa6263e [file] [log] [blame]
Joerg Roedelf2f45e52009-01-09 12:19:52 +01001/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/dma-debug.h>
Joerg Roedel30dfa902009-01-09 12:34:49 +010021#include <linux/spinlock.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010022#include <linux/types.h>
23#include <linux/list.h>
Joerg Roedel6bf07872009-01-09 12:54:42 +010024#include <linux/slab.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010025
Joerg Roedel30dfa902009-01-09 12:34:49 +010026#define HASH_SIZE 1024ULL
27#define HASH_FN_SHIFT 13
28#define HASH_FN_MASK (HASH_SIZE - 1)
29
Joerg Roedelf2f45e52009-01-09 12:19:52 +010030enum {
31 dma_debug_single,
32 dma_debug_page,
33 dma_debug_sg,
34 dma_debug_coherent,
35};
36
37struct dma_debug_entry {
38 struct list_head list;
39 struct device *dev;
40 int type;
41 phys_addr_t paddr;
42 u64 dev_addr;
43 u64 size;
44 int direction;
45 int sg_call_ents;
46 int sg_mapped_ents;
47};
48
Joerg Roedel30dfa902009-01-09 12:34:49 +010049struct hash_bucket {
50 struct list_head list;
51 spinlock_t lock;
52} __cacheline_aligned_in_smp;
53
54/* Hash list to save the allocated dma addresses */
55static struct hash_bucket dma_entry_hash[HASH_SIZE];
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010056/* List of pre-allocated dma_debug_entry's */
57static LIST_HEAD(free_entries);
58/* Lock for the list above */
59static DEFINE_SPINLOCK(free_entries_lock);
60
61/* Global disable flag - will be set in case of an error */
62static bool global_disable __read_mostly;
63
64static u32 num_free_entries;
65static u32 min_free_entries;
Joerg Roedel30dfa902009-01-09 12:34:49 +010066
Joerg Roedel59d3daa2009-01-09 13:01:56 +010067/* number of preallocated entries requested by kernel cmdline */
68static u32 req_entries;
69
Joerg Roedel30dfa902009-01-09 12:34:49 +010070/*
71 * Hash related functions
72 *
73 * Every DMA-API request is saved into a struct dma_debug_entry. To
74 * have quick access to these structs they are stored into a hash.
75 */
76static int hash_fn(struct dma_debug_entry *entry)
77{
78 /*
79 * Hash function is based on the dma address.
80 * We use bits 20-27 here as the index into the hash
81 */
82 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
83}
84
85/*
86 * Request exclusive access to a hash bucket for a given dma_debug_entry.
87 */
88static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
89 unsigned long *flags)
90{
91 int idx = hash_fn(entry);
92 unsigned long __flags;
93
94 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
95 *flags = __flags;
96 return &dma_entry_hash[idx];
97}
98
99/*
100 * Give up exclusive access to the hash bucket
101 */
102static void put_hash_bucket(struct hash_bucket *bucket,
103 unsigned long *flags)
104{
105 unsigned long __flags = *flags;
106
107 spin_unlock_irqrestore(&bucket->lock, __flags);
108}
109
110/*
111 * Search a given entry in the hash bucket list
112 */
113static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
114 struct dma_debug_entry *ref)
115{
116 struct dma_debug_entry *entry;
117
118 list_for_each_entry(entry, &bucket->list, list) {
119 if ((entry->dev_addr == ref->dev_addr) &&
120 (entry->dev == ref->dev))
121 return entry;
122 }
123
124 return NULL;
125}
126
127/*
128 * Add an entry to a hash bucket
129 */
130static void hash_bucket_add(struct hash_bucket *bucket,
131 struct dma_debug_entry *entry)
132{
133 list_add_tail(&entry->list, &bucket->list);
134}
135
136/*
137 * Remove entry from a hash bucket list
138 */
139static void hash_bucket_del(struct dma_debug_entry *entry)
140{
141 list_del(&entry->list);
142}
143
144/*
145 * Wrapper function for adding an entry to the hash.
146 * This function takes care of locking itself.
147 */
148static void add_dma_entry(struct dma_debug_entry *entry)
149{
150 struct hash_bucket *bucket;
151 unsigned long flags;
152
153 bucket = get_hash_bucket(entry, &flags);
154 hash_bucket_add(bucket, entry);
155 put_hash_bucket(bucket, &flags);
156}
157
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100158/* struct dma_entry allocator
159 *
160 * The next two functions implement the allocator for
161 * struct dma_debug_entries.
162 */
163static struct dma_debug_entry *dma_entry_alloc(void)
164{
165 struct dma_debug_entry *entry = NULL;
166 unsigned long flags;
167
168 spin_lock_irqsave(&free_entries_lock, flags);
169
170 if (list_empty(&free_entries)) {
171 printk(KERN_ERR "DMA-API: debugging out of memory "
172 "- disabling\n");
173 global_disable = true;
174 goto out;
175 }
176
177 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
178 list_del(&entry->list);
179 memset(entry, 0, sizeof(*entry));
180
181 num_free_entries -= 1;
182 if (num_free_entries < min_free_entries)
183 min_free_entries = num_free_entries;
184
185out:
186 spin_unlock_irqrestore(&free_entries_lock, flags);
187
188 return entry;
189}
190
191static void dma_entry_free(struct dma_debug_entry *entry)
192{
193 unsigned long flags;
194
195 /*
196 * add to beginning of the list - this way the entries are
197 * more likely cache hot when they are reallocated.
198 */
199 spin_lock_irqsave(&free_entries_lock, flags);
200 list_add(&entry->list, &free_entries);
201 num_free_entries += 1;
202 spin_unlock_irqrestore(&free_entries_lock, flags);
203}
204
Joerg Roedel6bf07872009-01-09 12:54:42 +0100205/*
206 * DMA-API debugging init code
207 *
208 * The init code does two things:
209 * 1. Initialize core data structures
210 * 2. Preallocate a given number of dma_debug_entry structs
211 */
212
213static int prealloc_memory(u32 num_entries)
214{
215 struct dma_debug_entry *entry, *next_entry;
216 int i;
217
218 for (i = 0; i < num_entries; ++i) {
219 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
220 if (!entry)
221 goto out_err;
222
223 list_add_tail(&entry->list, &free_entries);
224 }
225
226 num_free_entries = num_entries;
227 min_free_entries = num_entries;
228
229 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
230 num_entries);
231
232 return 0;
233
234out_err:
235
236 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
237 list_del(&entry->list);
238 kfree(entry);
239 }
240
241 return -ENOMEM;
242}
243
244/*
245 * Let the architectures decide how many entries should be preallocated.
246 */
247void dma_debug_init(u32 num_entries)
248{
249 int i;
250
251 if (global_disable)
252 return;
253
254 for (i = 0; i < HASH_SIZE; ++i) {
255 INIT_LIST_HEAD(&dma_entry_hash[i].list);
256 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
257 }
258
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100259 if (req_entries)
260 num_entries = req_entries;
261
Joerg Roedel6bf07872009-01-09 12:54:42 +0100262 if (prealloc_memory(num_entries) != 0) {
263 printk(KERN_ERR "DMA-API: debugging out of memory error "
264 "- disabled\n");
265 global_disable = true;
266
267 return;
268 }
269
270 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
271}
272
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100273static __init int dma_debug_cmdline(char *str)
274{
275 if (!str)
276 return -EINVAL;
277
278 if (strncmp(str, "off", 3) == 0) {
279 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
280 "command line\n");
281 global_disable = true;
282 }
283
284 return 0;
285}
286
287static __init int dma_debug_entries_cmdline(char *str)
288{
289 int res;
290
291 if (!str)
292 return -EINVAL;
293
294 res = get_option(&str, &req_entries);
295
296 if (!res)
297 req_entries = 0;
298
299 return 0;
300}
301
302__setup("dma_debug=", dma_debug_cmdline);
303__setup("dma_debug_entries=", dma_debug_entries_cmdline);
304