Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. |
| 3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. |
Roland Dreier | 2a1d9b7 | 2005-08-10 23:03:10 -0700 | [diff] [blame] | 4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 5 | * |
| 6 | * This software is available to you under a choice of one of two |
| 7 | * licenses. You may choose to be licensed under the terms of the GNU |
| 8 | * General Public License (GPL) Version 2, available from the file |
| 9 | * COPYING in the main directory of this source tree, or the |
| 10 | * OpenIB.org BSD license below: |
| 11 | * |
| 12 | * Redistribution and use in source and binary forms, with or |
| 13 | * without modification, are permitted provided that the following |
| 14 | * conditions are met: |
| 15 | * |
| 16 | * - Redistributions of source code must retain the above |
| 17 | * copyright notice, this list of conditions and the following |
| 18 | * disclaimer. |
| 19 | * |
| 20 | * - Redistributions in binary form must reproduce the above |
| 21 | * copyright notice, this list of conditions and the following |
| 22 | * disclaimer in the documentation and/or other materials |
| 23 | * provided with the distribution. |
| 24 | * |
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 32 | * SOFTWARE. |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 33 | */ |
| 34 | |
| 35 | #include <linux/mm.h> |
| 36 | #include <linux/dma-mapping.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 37 | #include <linux/sched.h> |
Joachim Fenkes | c8d8bee | 2007-09-13 18:15:28 +0200 | [diff] [blame] | 38 | #include <linux/hugetlb.h> |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 39 | #include <linux/dma-attrs.h> |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 40 | |
| 41 | #include "uverbs.h" |
| 42 | |
Dotan Barak | 92ddc44 | 2007-08-01 13:33:56 +0300 | [diff] [blame] | 43 | #define IB_UMEM_MAX_PAGE_CHUNK \ |
| 44 | ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ |
| 45 | ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ |
| 46 | (void *) &((struct ib_umem_chunk *) 0)->page_list[0])) |
| 47 | |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 48 | static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) |
| 49 | { |
| 50 | struct ib_umem_chunk *chunk, *tmp; |
| 51 | int i; |
| 52 | |
| 53 | list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 54 | ib_dma_unmap_sg(dev, chunk->page_list, |
| 55 | chunk->nents, DMA_BIDIRECTIONAL); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 56 | for (i = 0; i < chunk->nents; ++i) { |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 57 | struct page *page = sg_page(&chunk->page_list[i]); |
| 58 | |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 59 | if (umem->writable && dirty) |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 60 | set_page_dirty_lock(page); |
| 61 | put_page(page); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | kfree(chunk); |
| 65 | } |
| 66 | } |
| 67 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 68 | /** |
| 69 | * ib_umem_get - Pin and DMA map userspace memory. |
| 70 | * @context: userspace context to pin memory for |
| 71 | * @addr: userspace virtual address to start at |
| 72 | * @size: length of region to pin |
| 73 | * @access: IB_ACCESS_xxx flags for memory being pinned |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 74 | * @dmasync: flush in-flight DMA when the memory region is written |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 75 | */ |
| 76 | struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 77 | size_t size, int access, int dmasync) |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 78 | { |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 79 | struct ib_umem *umem; |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 80 | struct page **page_list; |
Joachim Fenkes | c8d8bee | 2007-09-13 18:15:28 +0200 | [diff] [blame] | 81 | struct vm_area_struct **vma_list; |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 82 | struct ib_umem_chunk *chunk; |
| 83 | unsigned long locked; |
| 84 | unsigned long lock_limit; |
| 85 | unsigned long cur_base; |
| 86 | unsigned long npages; |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 87 | int ret; |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 88 | int off; |
| 89 | int i; |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 90 | DEFINE_DMA_ATTRS(attrs); |
| 91 | |
| 92 | if (dmasync) |
| 93 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 94 | |
| 95 | if (!can_do_mlock()) |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 96 | return ERR_PTR(-EPERM); |
| 97 | |
| 98 | umem = kmalloc(sizeof *umem, GFP_KERNEL); |
| 99 | if (!umem) |
| 100 | return ERR_PTR(-ENOMEM); |
| 101 | |
| 102 | umem->context = context; |
| 103 | umem->length = size; |
| 104 | umem->offset = addr & ~PAGE_MASK; |
| 105 | umem->page_size = PAGE_SIZE; |
| 106 | /* |
| 107 | * We ask for writable memory if any access flags other than |
| 108 | * "remote read" are set. "Local write" and "remote write" |
| 109 | * obviously require write access. "Remote atomic" can do |
| 110 | * things like fetch and add, which will modify memory, and |
| 111 | * "MW bind" can change permissions by binding a window. |
| 112 | */ |
| 113 | umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ); |
| 114 | |
Joachim Fenkes | c8d8bee | 2007-09-13 18:15:28 +0200 | [diff] [blame] | 115 | /* We assume the memory is from hugetlb until proved otherwise */ |
| 116 | umem->hugetlb = 1; |
| 117 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 118 | INIT_LIST_HEAD(&umem->chunk_list); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 119 | |
| 120 | page_list = (struct page **) __get_free_page(GFP_KERNEL); |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 121 | if (!page_list) { |
| 122 | kfree(umem); |
| 123 | return ERR_PTR(-ENOMEM); |
| 124 | } |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 125 | |
Joachim Fenkes | c8d8bee | 2007-09-13 18:15:28 +0200 | [diff] [blame] | 126 | /* |
| 127 | * if we can't alloc the vma_list, it's not so bad; |
| 128 | * just assume the memory is not hugetlb memory |
| 129 | */ |
| 130 | vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL); |
| 131 | if (!vma_list) |
| 132 | umem->hugetlb = 0; |
| 133 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 134 | npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT; |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 135 | |
| 136 | down_write(¤t->mm->mmap_sem); |
| 137 | |
| 138 | locked = npages + current->mm->locked_vm; |
| 139 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; |
| 140 | |
| 141 | if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { |
| 142 | ret = -ENOMEM; |
| 143 | goto out; |
| 144 | } |
| 145 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 146 | cur_base = addr & PAGE_MASK; |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 147 | |
Andrew Morton | 1d3f4b9 | 2007-06-08 16:29:43 -0700 | [diff] [blame] | 148 | ret = 0; |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 149 | while (npages) { |
| 150 | ret = get_user_pages(current, current->mm, cur_base, |
Roland Dreier | 8079ffa | 2008-06-06 21:38:37 -0700 | [diff] [blame] | 151 | min_t(unsigned long, npages, |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 152 | PAGE_SIZE / sizeof (struct page *)), |
Joachim Fenkes | c8d8bee | 2007-09-13 18:15:28 +0200 | [diff] [blame] | 153 | 1, !umem->writable, page_list, vma_list); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 154 | |
| 155 | if (ret < 0) |
| 156 | goto out; |
| 157 | |
| 158 | cur_base += ret * PAGE_SIZE; |
| 159 | npages -= ret; |
| 160 | |
| 161 | off = 0; |
| 162 | |
| 163 | while (ret) { |
| 164 | chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) * |
| 165 | min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK), |
| 166 | GFP_KERNEL); |
| 167 | if (!chunk) { |
| 168 | ret = -ENOMEM; |
| 169 | goto out; |
| 170 | } |
| 171 | |
| 172 | chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 173 | sg_init_table(chunk->page_list, chunk->nents); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 174 | for (i = 0; i < chunk->nents; ++i) { |
Joachim Fenkes | c8d8bee | 2007-09-13 18:15:28 +0200 | [diff] [blame] | 175 | if (vma_list && |
| 176 | !is_vm_hugetlb_page(vma_list[i + off])) |
| 177 | umem->hugetlb = 0; |
Jens Axboe | 642f149 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 178 | sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 179 | } |
| 180 | |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 181 | chunk->nmap = ib_dma_map_sg_attrs(context->device, |
| 182 | &chunk->page_list[0], |
| 183 | chunk->nents, |
| 184 | DMA_BIDIRECTIONAL, |
| 185 | &attrs); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 186 | if (chunk->nmap <= 0) { |
| 187 | for (i = 0; i < chunk->nents; ++i) |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 188 | put_page(sg_page(&chunk->page_list[i])); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 189 | kfree(chunk); |
| 190 | |
| 191 | ret = -ENOMEM; |
| 192 | goto out; |
| 193 | } |
| 194 | |
| 195 | ret -= chunk->nents; |
| 196 | off += chunk->nents; |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 197 | list_add_tail(&chunk->list, &umem->chunk_list); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | ret = 0; |
| 201 | } |
| 202 | |
| 203 | out: |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 204 | if (ret < 0) { |
| 205 | __ib_umem_release(context->device, umem, 0); |
| 206 | kfree(umem); |
| 207 | } else |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 208 | current->mm->locked_vm = locked; |
| 209 | |
| 210 | up_write(¤t->mm->mmap_sem); |
Joachim Fenkes | c8d8bee | 2007-09-13 18:15:28 +0200 | [diff] [blame] | 211 | if (vma_list) |
| 212 | free_page((unsigned long) vma_list); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 213 | free_page((unsigned long) page_list); |
| 214 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 215 | return ret < 0 ? ERR_PTR(ret) : umem; |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 216 | } |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 217 | EXPORT_SYMBOL(ib_umem_get); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 218 | |
Roland Dreier | 1bf66a3 | 2007-04-18 20:20:28 -0700 | [diff] [blame] | 219 | static void ib_umem_account(struct work_struct *work) |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 220 | { |
Roland Dreier | 1bf66a3 | 2007-04-18 20:20:28 -0700 | [diff] [blame] | 221 | struct ib_umem *umem = container_of(work, struct ib_umem, work); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 222 | |
Roland Dreier | 1bf66a3 | 2007-04-18 20:20:28 -0700 | [diff] [blame] | 223 | down_write(&umem->mm->mmap_sem); |
| 224 | umem->mm->locked_vm -= umem->diff; |
| 225 | up_write(&umem->mm->mmap_sem); |
| 226 | mmput(umem->mm); |
| 227 | kfree(umem); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 228 | } |
| 229 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 230 | /** |
| 231 | * ib_umem_release - release memory pinned with ib_umem_get |
| 232 | * @umem: umem struct to release |
| 233 | */ |
| 234 | void ib_umem_release(struct ib_umem *umem) |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 235 | { |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 236 | struct ib_ucontext *context = umem->context; |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 237 | struct mm_struct *mm; |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 238 | unsigned long diff; |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 239 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 240 | __ib_umem_release(umem->context->device, umem, 1); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 241 | |
| 242 | mm = get_task_mm(current); |
Eli Cohen | 7b82cd8 | 2007-05-14 11:35:43 +0300 | [diff] [blame] | 243 | if (!mm) { |
| 244 | kfree(umem); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 245 | return; |
Eli Cohen | 7b82cd8 | 2007-05-14 11:35:43 +0300 | [diff] [blame] | 246 | } |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 247 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 248 | diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 249 | |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 250 | /* |
| 251 | * We may be called with the mm's mmap_sem already held. This |
| 252 | * can happen when a userspace munmap() is the call that drops |
| 253 | * the last reference to our file and calls our release |
| 254 | * method. If there are memory regions to destroy, we'll end |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 255 | * up here and not be able to take the mmap_sem. In that case |
| 256 | * we defer the vm_locked accounting to the system workqueue. |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 257 | */ |
Roland Dreier | 24bce50 | 2007-06-21 11:05:58 -0700 | [diff] [blame] | 258 | if (context->closing) { |
| 259 | if (!down_write_trylock(&mm->mmap_sem)) { |
| 260 | INIT_WORK(&umem->work, ib_umem_account); |
| 261 | umem->mm = mm; |
| 262 | umem->diff = diff; |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 263 | |
Roland Dreier | 24bce50 | 2007-06-21 11:05:58 -0700 | [diff] [blame] | 264 | schedule_work(&umem->work); |
| 265 | return; |
| 266 | } |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 267 | } else |
| 268 | down_write(&mm->mmap_sem); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 269 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 270 | current->mm->locked_vm -= diff; |
| 271 | up_write(&mm->mmap_sem); |
| 272 | mmput(mm); |
Roland Dreier | 1bf66a3 | 2007-04-18 20:20:28 -0700 | [diff] [blame] | 273 | kfree(umem); |
Roland Dreier | eb8ffbf | 2005-07-07 17:57:14 -0700 | [diff] [blame] | 274 | } |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 275 | EXPORT_SYMBOL(ib_umem_release); |
| 276 | |
| 277 | int ib_umem_page_count(struct ib_umem *umem) |
| 278 | { |
| 279 | struct ib_umem_chunk *chunk; |
| 280 | int shift; |
| 281 | int i; |
| 282 | int n; |
| 283 | |
| 284 | shift = ilog2(umem->page_size); |
| 285 | |
| 286 | n = 0; |
| 287 | list_for_each_entry(chunk, &umem->chunk_list, list) |
| 288 | for (i = 0; i < chunk->nmap; ++i) |
| 289 | n += sg_dma_len(&chunk->page_list[i]) >> shift; |
| 290 | |
| 291 | return n; |
| 292 | } |
| 293 | EXPORT_SYMBOL(ib_umem_page_count); |