blob: 25157f57a6d0809aed11705dd77b7d13d8fbe129 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mthca_allocator.c 1349 2004-12-16 21:09:43Z roland $
33 */
34
35#include <linux/errno.h>
36#include <linux/slab.h>
37#include <linux/bitmap.h>
38
39#include "mthca_dev.h"
40
41/* Trivial bitmap-based allocator */
42u32 mthca_alloc(struct mthca_alloc *alloc)
43{
44 u32 obj;
45
46 spin_lock(&alloc->lock);
47 obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
48 if (obj >= alloc->max) {
49 alloc->top = (alloc->top + alloc->max) & alloc->mask;
50 obj = find_first_zero_bit(alloc->table, alloc->max);
51 }
52
53 if (obj < alloc->max) {
54 set_bit(obj, alloc->table);
55 obj |= alloc->top;
56 } else
57 obj = -1;
58
59 spin_unlock(&alloc->lock);
60
61 return obj;
62}
63
64void mthca_free(struct mthca_alloc *alloc, u32 obj)
65{
66 obj &= alloc->max - 1;
67 spin_lock(&alloc->lock);
68 clear_bit(obj, alloc->table);
69 alloc->last = min(alloc->last, obj);
70 alloc->top = (alloc->top + alloc->max) & alloc->mask;
71 spin_unlock(&alloc->lock);
72}
73
74int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
75 u32 reserved)
76{
77 int i;
78
79 /* num must be a power of 2 */
80 if (num != 1 << (ffs(num) - 1))
81 return -EINVAL;
82
83 alloc->last = 0;
84 alloc->top = 0;
85 alloc->max = num;
86 alloc->mask = mask;
87 spin_lock_init(&alloc->lock);
88 alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof (long),
89 GFP_KERNEL);
90 if (!alloc->table)
91 return -ENOMEM;
92
93 bitmap_zero(alloc->table, num);
94 for (i = 0; i < reserved; ++i)
95 set_bit(i, alloc->table);
96
97 return 0;
98}
99
100void mthca_alloc_cleanup(struct mthca_alloc *alloc)
101{
102 kfree(alloc->table);
103}
104
105/*
106 * Array of pointers with lazy allocation of leaf pages. Callers of
107 * _get, _set and _clear methods must use a lock or otherwise
108 * serialize access to the array.
109 */
110
Roland Dreier69e9fbb2006-08-03 09:44:22 -0700111#define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1)
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113void *mthca_array_get(struct mthca_array *array, int index)
114{
115 int p = (index * sizeof (void *)) >> PAGE_SHIFT;
116
Roland Dreier69e9fbb2006-08-03 09:44:22 -0700117 if (array->page_list[p].page)
118 return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
119 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 return NULL;
121}
122
123int mthca_array_set(struct mthca_array *array, int index, void *value)
124{
125 int p = (index * sizeof (void *)) >> PAGE_SHIFT;
126
127 /* Allocate with GFP_ATOMIC because we'll be called with locks held. */
128 if (!array->page_list[p].page)
129 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
130
131 if (!array->page_list[p].page)
132 return -ENOMEM;
133
Roland Dreier69e9fbb2006-08-03 09:44:22 -0700134 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 ++array->page_list[p].used;
136
137 return 0;
138}
139
140void mthca_array_clear(struct mthca_array *array, int index)
141{
142 int p = (index * sizeof (void *)) >> PAGE_SHIFT;
143
144 if (--array->page_list[p].used == 0) {
145 free_page((unsigned long) array->page_list[p].page);
146 array->page_list[p].page = NULL;
Michael S. Tsirkinbf74c742006-07-26 16:02:53 +0300147 } else
Roland Dreier69e9fbb2006-08-03 09:44:22 -0700148 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150 if (array->page_list[p].used < 0)
151 pr_debug("Array %p index %d page %d with ref count %d < 0\n",
152 array, index, p, array->page_list[p].used);
153}
154
155int mthca_array_init(struct mthca_array *array, int nent)
156{
157 int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
158 int i;
159
160 array->page_list = kmalloc(npage * sizeof *array->page_list, GFP_KERNEL);
161 if (!array->page_list)
162 return -ENOMEM;
163
164 for (i = 0; i < npage; ++i) {
165 array->page_list[i].page = NULL;
166 array->page_list[i].used = 0;
167 }
168
169 return 0;
170}
171
172void mthca_array_cleanup(struct mthca_array *array, int nent)
173{
174 int i;
175
176 for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
177 free_page((unsigned long) array->page_list[i].page);
178
179 kfree(array->page_list);
180}
Roland Dreier87b81672005-08-18 13:39:31 -0700181
182/*
183 * Handling for queue buffers -- we allocate a bunch of memory and
184 * register it in a memory region at HCA virtual address 0. If the
185 * requested size is > max_direct, we split the allocation into
186 * multiple pages, so we don't require too much contiguous memory.
187 */
188
189int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
190 union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
191 int hca_write, struct mthca_mr *mr)
192{
193 int err = -ENOMEM;
194 int npages, shift;
195 u64 *dma_list = NULL;
196 dma_addr_t t;
197 int i;
198
199 if (size <= max_direct) {
200 *is_direct = 1;
201 npages = 1;
202 shift = get_order(size) + PAGE_SHIFT;
203
204 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
205 size, &t, GFP_KERNEL);
206 if (!buf->direct.buf)
207 return -ENOMEM;
208
209 pci_unmap_addr_set(&buf->direct, mapping, t);
210
211 memset(buf->direct.buf, 0, size);
212
213 while (t & ((1 << shift) - 1)) {
214 --shift;
215 npages *= 2;
216 }
217
218 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
219 if (!dma_list)
220 goto err_free;
221
222 for (i = 0; i < npages; ++i)
223 dma_list[i] = t + i * (1 << shift);
224 } else {
225 *is_direct = 0;
226 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
227 shift = PAGE_SHIFT;
228
229 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
230 if (!dma_list)
231 return -ENOMEM;
232
233 buf->page_list = kmalloc(npages * sizeof *buf->page_list,
234 GFP_KERNEL);
235 if (!buf->page_list)
236 goto err_out;
237
238 for (i = 0; i < npages; ++i)
239 buf->page_list[i].buf = NULL;
240
241 for (i = 0; i < npages; ++i) {
242 buf->page_list[i].buf =
243 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
244 &t, GFP_KERNEL);
245 if (!buf->page_list[i].buf)
246 goto err_free;
247
248 dma_list[i] = t;
249 pci_unmap_addr_set(&buf->page_list[i], mapping, t);
250
251 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
252 }
253 }
254
255 err = mthca_mr_alloc_phys(dev, pd->pd_num,
256 dma_list, shift, npages,
257 0, size,
258 MTHCA_MPT_FLAG_LOCAL_READ |
259 (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
260 mr);
261 if (err)
262 goto err_free;
263
264 kfree(dma_list);
265
266 return 0;
267
268err_free:
269 mthca_buf_free(dev, size, buf, *is_direct, NULL);
270
271err_out:
272 kfree(dma_list);
273
274 return err;
275}
276
277void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
278 int is_direct, struct mthca_mr *mr)
279{
280 int i;
281
282 if (mr)
283 mthca_free_mr(dev, mr);
284
285 if (is_direct)
286 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
287 pci_unmap_addr(&buf->direct, mapping));
288 else {
289 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
290 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
291 buf->page_list[i].buf,
292 pci_unmap_addr(&buf->page_list[i],
293 mapping));
294 kfree(buf->page_list);
295 }
296}