blob: b60914669656fd1e7e752d80bed92dcf6c678360 [file] [log] [blame]
Joerg Roedelf2f45e52009-01-09 12:19:52 +01001/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/dma-debug.h>
Joerg Roedel30dfa902009-01-09 12:34:49 +010021#include <linux/spinlock.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010022#include <linux/types.h>
23#include <linux/list.h>
24
Joerg Roedel30dfa902009-01-09 12:34:49 +010025#define HASH_SIZE 1024ULL
26#define HASH_FN_SHIFT 13
27#define HASH_FN_MASK (HASH_SIZE - 1)
28
Joerg Roedelf2f45e52009-01-09 12:19:52 +010029enum {
30 dma_debug_single,
31 dma_debug_page,
32 dma_debug_sg,
33 dma_debug_coherent,
34};
35
36struct dma_debug_entry {
37 struct list_head list;
38 struct device *dev;
39 int type;
40 phys_addr_t paddr;
41 u64 dev_addr;
42 u64 size;
43 int direction;
44 int sg_call_ents;
45 int sg_mapped_ents;
46};
47
Joerg Roedel30dfa902009-01-09 12:34:49 +010048struct hash_bucket {
49 struct list_head list;
50 spinlock_t lock;
51} __cacheline_aligned_in_smp;
52
53/* Hash list to save the allocated dma addresses */
54static struct hash_bucket dma_entry_hash[HASH_SIZE];
Joerg Roedel3b1e79e2009-01-09 12:42:46 +010055/* List of pre-allocated dma_debug_entry's */
56static LIST_HEAD(free_entries);
57/* Lock for the list above */
58static DEFINE_SPINLOCK(free_entries_lock);
59
60/* Global disable flag - will be set in case of an error */
61static bool global_disable __read_mostly;
62
63static u32 num_free_entries;
64static u32 min_free_entries;
Joerg Roedel30dfa902009-01-09 12:34:49 +010065
66/*
67 * Hash related functions
68 *
69 * Every DMA-API request is saved into a struct dma_debug_entry. To
70 * have quick access to these structs they are stored into a hash.
71 */
72static int hash_fn(struct dma_debug_entry *entry)
73{
74 /*
75 * Hash function is based on the dma address.
76 * We use bits 20-27 here as the index into the hash
77 */
78 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
79}
80
81/*
82 * Request exclusive access to a hash bucket for a given dma_debug_entry.
83 */
84static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
85 unsigned long *flags)
86{
87 int idx = hash_fn(entry);
88 unsigned long __flags;
89
90 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
91 *flags = __flags;
92 return &dma_entry_hash[idx];
93}
94
95/*
96 * Give up exclusive access to the hash bucket
97 */
98static void put_hash_bucket(struct hash_bucket *bucket,
99 unsigned long *flags)
100{
101 unsigned long __flags = *flags;
102
103 spin_unlock_irqrestore(&bucket->lock, __flags);
104}
105
106/*
107 * Search a given entry in the hash bucket list
108 */
109static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
110 struct dma_debug_entry *ref)
111{
112 struct dma_debug_entry *entry;
113
114 list_for_each_entry(entry, &bucket->list, list) {
115 if ((entry->dev_addr == ref->dev_addr) &&
116 (entry->dev == ref->dev))
117 return entry;
118 }
119
120 return NULL;
121}
122
123/*
124 * Add an entry to a hash bucket
125 */
126static void hash_bucket_add(struct hash_bucket *bucket,
127 struct dma_debug_entry *entry)
128{
129 list_add_tail(&entry->list, &bucket->list);
130}
131
132/*
133 * Remove entry from a hash bucket list
134 */
135static void hash_bucket_del(struct dma_debug_entry *entry)
136{
137 list_del(&entry->list);
138}
139
140/*
141 * Wrapper function for adding an entry to the hash.
142 * This function takes care of locking itself.
143 */
144static void add_dma_entry(struct dma_debug_entry *entry)
145{
146 struct hash_bucket *bucket;
147 unsigned long flags;
148
149 bucket = get_hash_bucket(entry, &flags);
150 hash_bucket_add(bucket, entry);
151 put_hash_bucket(bucket, &flags);
152}
153
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100154/* struct dma_entry allocator
155 *
156 * The next two functions implement the allocator for
157 * struct dma_debug_entries.
158 */
159static struct dma_debug_entry *dma_entry_alloc(void)
160{
161 struct dma_debug_entry *entry = NULL;
162 unsigned long flags;
163
164 spin_lock_irqsave(&free_entries_lock, flags);
165
166 if (list_empty(&free_entries)) {
167 printk(KERN_ERR "DMA-API: debugging out of memory "
168 "- disabling\n");
169 global_disable = true;
170 goto out;
171 }
172
173 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
174 list_del(&entry->list);
175 memset(entry, 0, sizeof(*entry));
176
177 num_free_entries -= 1;
178 if (num_free_entries < min_free_entries)
179 min_free_entries = num_free_entries;
180
181out:
182 spin_unlock_irqrestore(&free_entries_lock, flags);
183
184 return entry;
185}
186
187static void dma_entry_free(struct dma_debug_entry *entry)
188{
189 unsigned long flags;
190
191 /*
192 * add to beginning of the list - this way the entries are
193 * more likely cache hot when they are reallocated.
194 */
195 spin_lock_irqsave(&free_entries_lock, flags);
196 list_add(&entry->list, &free_entries);
197 num_free_entries += 1;
198 spin_unlock_irqrestore(&free_entries_lock, flags);
199}
200