Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | * $Id: mthca_allocator.c 1349 2004-12-16 21:09:43Z roland $ |
| 33 | */ |
| 34 | |
| 35 | #include <linux/errno.h> |
| 36 | #include <linux/slab.h> |
| 37 | #include <linux/bitmap.h> |
| 38 | |
| 39 | #include "mthca_dev.h" |
| 40 | |
| 41 | /* Trivial bitmap-based allocator */ |
| 42 | u32 mthca_alloc(struct mthca_alloc *alloc) |
| 43 | { |
| 44 | u32 obj; |
| 45 | |
| 46 | spin_lock(&alloc->lock); |
| 47 | obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); |
| 48 | if (obj >= alloc->max) { |
| 49 | alloc->top = (alloc->top + alloc->max) & alloc->mask; |
| 50 | obj = find_first_zero_bit(alloc->table, alloc->max); |
| 51 | } |
| 52 | |
| 53 | if (obj < alloc->max) { |
| 54 | set_bit(obj, alloc->table); |
| 55 | obj |= alloc->top; |
| 56 | } else |
| 57 | obj = -1; |
| 58 | |
| 59 | spin_unlock(&alloc->lock); |
| 60 | |
| 61 | return obj; |
| 62 | } |
| 63 | |
| 64 | void mthca_free(struct mthca_alloc *alloc, u32 obj) |
| 65 | { |
| 66 | obj &= alloc->max - 1; |
| 67 | spin_lock(&alloc->lock); |
| 68 | clear_bit(obj, alloc->table); |
| 69 | alloc->last = min(alloc->last, obj); |
| 70 | alloc->top = (alloc->top + alloc->max) & alloc->mask; |
| 71 | spin_unlock(&alloc->lock); |
| 72 | } |
| 73 | |
| 74 | int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, |
| 75 | u32 reserved) |
| 76 | { |
| 77 | int i; |
| 78 | |
| 79 | /* num must be a power of 2 */ |
| 80 | if (num != 1 << (ffs(num) - 1)) |
| 81 | return -EINVAL; |
| 82 | |
| 83 | alloc->last = 0; |
| 84 | alloc->top = 0; |
| 85 | alloc->max = num; |
| 86 | alloc->mask = mask; |
| 87 | spin_lock_init(&alloc->lock); |
| 88 | alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof (long), |
| 89 | GFP_KERNEL); |
| 90 | if (!alloc->table) |
| 91 | return -ENOMEM; |
| 92 | |
| 93 | bitmap_zero(alloc->table, num); |
| 94 | for (i = 0; i < reserved; ++i) |
| 95 | set_bit(i, alloc->table); |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | void mthca_alloc_cleanup(struct mthca_alloc *alloc) |
| 101 | { |
| 102 | kfree(alloc->table); |
| 103 | } |
| 104 | |
| 105 | /* |
| 106 | * Array of pointers with lazy allocation of leaf pages. Callers of |
| 107 | * _get, _set and _clear methods must use a lock or otherwise |
| 108 | * serialize access to the array. |
| 109 | */ |
| 110 | |
| 111 | void *mthca_array_get(struct mthca_array *array, int index) |
| 112 | { |
| 113 | int p = (index * sizeof (void *)) >> PAGE_SHIFT; |
| 114 | |
| 115 | if (array->page_list[p].page) { |
| 116 | int i = index & (PAGE_SIZE / sizeof (void *) - 1); |
| 117 | return array->page_list[p].page[i]; |
| 118 | } else |
| 119 | return NULL; |
| 120 | } |
| 121 | |
| 122 | int mthca_array_set(struct mthca_array *array, int index, void *value) |
| 123 | { |
| 124 | int p = (index * sizeof (void *)) >> PAGE_SHIFT; |
| 125 | |
| 126 | /* Allocate with GFP_ATOMIC because we'll be called with locks held. */ |
| 127 | if (!array->page_list[p].page) |
| 128 | array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); |
| 129 | |
| 130 | if (!array->page_list[p].page) |
| 131 | return -ENOMEM; |
| 132 | |
| 133 | array->page_list[p].page[index & (PAGE_SIZE / sizeof (void *) - 1)] = |
| 134 | value; |
| 135 | ++array->page_list[p].used; |
| 136 | |
| 137 | return 0; |
| 138 | } |
| 139 | |
| 140 | void mthca_array_clear(struct mthca_array *array, int index) |
| 141 | { |
| 142 | int p = (index * sizeof (void *)) >> PAGE_SHIFT; |
| 143 | |
| 144 | if (--array->page_list[p].used == 0) { |
| 145 | free_page((unsigned long) array->page_list[p].page); |
| 146 | array->page_list[p].page = NULL; |
| 147 | } |
| 148 | |
| 149 | if (array->page_list[p].used < 0) |
| 150 | pr_debug("Array %p index %d page %d with ref count %d < 0\n", |
| 151 | array, index, p, array->page_list[p].used); |
| 152 | } |
| 153 | |
| 154 | int mthca_array_init(struct mthca_array *array, int nent) |
| 155 | { |
| 156 | int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; |
| 157 | int i; |
| 158 | |
| 159 | array->page_list = kmalloc(npage * sizeof *array->page_list, GFP_KERNEL); |
| 160 | if (!array->page_list) |
| 161 | return -ENOMEM; |
| 162 | |
| 163 | for (i = 0; i < npage; ++i) { |
| 164 | array->page_list[i].page = NULL; |
| 165 | array->page_list[i].used = 0; |
| 166 | } |
| 167 | |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | void mthca_array_cleanup(struct mthca_array *array, int nent) |
| 172 | { |
| 173 | int i; |
| 174 | |
| 175 | for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i) |
| 176 | free_page((unsigned long) array->page_list[i].page); |
| 177 | |
| 178 | kfree(array->page_list); |
| 179 | } |
Roland Dreier | 87b8167 | 2005-08-18 13:39:31 -0700 | [diff] [blame^] | 180 | |
| 181 | /* |
| 182 | * Handling for queue buffers -- we allocate a bunch of memory and |
| 183 | * register it in a memory region at HCA virtual address 0. If the |
| 184 | * requested size is > max_direct, we split the allocation into |
| 185 | * multiple pages, so we don't require too much contiguous memory. |
| 186 | */ |
| 187 | |
| 188 | int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, |
| 189 | union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, |
| 190 | int hca_write, struct mthca_mr *mr) |
| 191 | { |
| 192 | int err = -ENOMEM; |
| 193 | int npages, shift; |
| 194 | u64 *dma_list = NULL; |
| 195 | dma_addr_t t; |
| 196 | int i; |
| 197 | |
| 198 | if (size <= max_direct) { |
| 199 | *is_direct = 1; |
| 200 | npages = 1; |
| 201 | shift = get_order(size) + PAGE_SHIFT; |
| 202 | |
| 203 | buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, |
| 204 | size, &t, GFP_KERNEL); |
| 205 | if (!buf->direct.buf) |
| 206 | return -ENOMEM; |
| 207 | |
| 208 | pci_unmap_addr_set(&buf->direct, mapping, t); |
| 209 | |
| 210 | memset(buf->direct.buf, 0, size); |
| 211 | |
| 212 | while (t & ((1 << shift) - 1)) { |
| 213 | --shift; |
| 214 | npages *= 2; |
| 215 | } |
| 216 | |
| 217 | dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); |
| 218 | if (!dma_list) |
| 219 | goto err_free; |
| 220 | |
| 221 | for (i = 0; i < npages; ++i) |
| 222 | dma_list[i] = t + i * (1 << shift); |
| 223 | } else { |
| 224 | *is_direct = 0; |
| 225 | npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
| 226 | shift = PAGE_SHIFT; |
| 227 | |
| 228 | dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); |
| 229 | if (!dma_list) |
| 230 | return -ENOMEM; |
| 231 | |
| 232 | buf->page_list = kmalloc(npages * sizeof *buf->page_list, |
| 233 | GFP_KERNEL); |
| 234 | if (!buf->page_list) |
| 235 | goto err_out; |
| 236 | |
| 237 | for (i = 0; i < npages; ++i) |
| 238 | buf->page_list[i].buf = NULL; |
| 239 | |
| 240 | for (i = 0; i < npages; ++i) { |
| 241 | buf->page_list[i].buf = |
| 242 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, |
| 243 | &t, GFP_KERNEL); |
| 244 | if (!buf->page_list[i].buf) |
| 245 | goto err_free; |
| 246 | |
| 247 | dma_list[i] = t; |
| 248 | pci_unmap_addr_set(&buf->page_list[i], mapping, t); |
| 249 | |
| 250 | memset(buf->page_list[i].buf, 0, PAGE_SIZE); |
| 251 | } |
| 252 | } |
| 253 | |
| 254 | err = mthca_mr_alloc_phys(dev, pd->pd_num, |
| 255 | dma_list, shift, npages, |
| 256 | 0, size, |
| 257 | MTHCA_MPT_FLAG_LOCAL_READ | |
| 258 | (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0), |
| 259 | mr); |
| 260 | if (err) |
| 261 | goto err_free; |
| 262 | |
| 263 | kfree(dma_list); |
| 264 | |
| 265 | return 0; |
| 266 | |
| 267 | err_free: |
| 268 | mthca_buf_free(dev, size, buf, *is_direct, NULL); |
| 269 | |
| 270 | err_out: |
| 271 | kfree(dma_list); |
| 272 | |
| 273 | return err; |
| 274 | } |
| 275 | |
| 276 | void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, |
| 277 | int is_direct, struct mthca_mr *mr) |
| 278 | { |
| 279 | int i; |
| 280 | |
| 281 | if (mr) |
| 282 | mthca_free_mr(dev, mr); |
| 283 | |
| 284 | if (is_direct) |
| 285 | dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, |
| 286 | pci_unmap_addr(&buf->direct, mapping)); |
| 287 | else { |
| 288 | for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) |
| 289 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
| 290 | buf->page_list[i].buf, |
| 291 | pci_unmap_addr(&buf->page_list[i], |
| 292 | mapping)); |
| 293 | kfree(buf->page_list); |
| 294 | } |
| 295 | } |