blob: c22fde6207d141bdaec17dcda3d8b585a1140255 [file] [log] [blame]
Roland Dreiereb8ffbf2005-07-07 17:57:14 -07001/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07004 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
Roland Dreiereb8ffbf2005-07-07 17:57:14 -07005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070033 */
34
35#include <linux/mm.h>
36#include <linux/dma-mapping.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040037#include <linux/sched.h>
Paul Gortmakerb108d972011-05-27 15:29:33 -040038#include <linux/export.h>
Joachim Fenkesc8d8bee2007-09-13 18:15:28 +020039#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Shachar Raindel8ada2c12014-12-11 17:04:17 +020041#include <rdma/ib_umem_odp.h>
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070042
43#include "uverbs.h"
44
Dotan Barak92ddc442007-08-01 13:33:56 +030045
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070046static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
47{
Yishai Hadaseeb84612014-01-28 13:40:15 +020048 struct scatterlist *sg;
49 struct page *page;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070050 int i;
51
Yishai Hadaseeb84612014-01-28 13:40:15 +020052 if (umem->nmap > 0)
53 ib_dma_unmap_sg(dev, umem->sg_head.sgl,
54 umem->nmap,
55 DMA_BIDIRECTIONAL);
Jens Axboe45711f12007-10-22 21:19:53 +020056
Yishai Hadaseeb84612014-01-28 13:40:15 +020057 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070058
Yishai Hadaseeb84612014-01-28 13:40:15 +020059 page = sg_page(sg);
60 if (umem->writable && dirty)
61 set_page_dirty_lock(page);
62 put_page(page);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070063 }
Yishai Hadaseeb84612014-01-28 13:40:15 +020064
65 sg_free_table(&umem->sg_head);
66 return;
67
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070068}
69
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080070/**
71 * ib_umem_get - Pin and DMA map userspace memory.
Shachar Raindel8ada2c12014-12-11 17:04:17 +020072 *
73 * If access flags indicate ODP memory, avoid pinning. Instead, stores
Haggai Eran882214e2014-12-11 17:04:18 +020074 * the mm for future page fault handling in conjunction with MMU notifiers.
Shachar Raindel8ada2c12014-12-11 17:04:17 +020075 *
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080076 * @context: userspace context to pin memory for
77 * @addr: userspace virtual address to start at
78 * @size: length of region to pin
79 * @access: IB_ACCESS_xxx flags for memory being pinned
Arthur Kepnercb9fbc52008-04-29 01:00:34 -070080 * @dmasync: flush in-flight DMA when the memory region is written
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080081 */
82struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
Arthur Kepnercb9fbc52008-04-29 01:00:34 -070083 size_t size, int access, int dmasync)
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070084{
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080085 struct ib_umem *umem;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070086 struct page **page_list;
Joachim Fenkesc8d8bee2007-09-13 18:15:28 +020087 struct vm_area_struct **vma_list;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070088 unsigned long locked;
89 unsigned long lock_limit;
90 unsigned long cur_base;
91 unsigned long npages;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080092 int ret;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -070093 int i;
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070094 unsigned long dma_attrs = 0;
Yishai Hadaseeb84612014-01-28 13:40:15 +020095 struct scatterlist *sg, *sg_list_start;
96 int need_release = 0;
Lorenzo Stoakes768ae302016-10-13 01:20:16 +010097 unsigned int gup_flags = FOLL_WRITE;
Arthur Kepnercb9fbc52008-04-29 01:00:34 -070098
99 if (dmasync)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700100 dma_attrs |= DMA_ATTR_WRITE_BARRIER;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700101
Yann Droneaud8abaae62015-04-13 14:56:22 +0200102 if (!size)
103 return ERR_PTR(-EINVAL);
104
Shachar Raindel84940572015-03-18 17:39:08 +0000105 /*
106 * If the combination of the addr and size requested for this memory
107 * region causes an integer overflow, return error.
108 */
Yann Droneaud66578b02015-04-13 14:56:23 +0200109 if (((addr + size) < addr) ||
110 PAGE_ALIGN(addr + size) < (addr + size))
Shachar Raindel84940572015-03-18 17:39:08 +0000111 return ERR_PTR(-EINVAL);
112
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700113 if (!can_do_mlock())
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800114 return ERR_PTR(-EPERM);
115
Yishai Hadaseeb84612014-01-28 13:40:15 +0200116 umem = kzalloc(sizeof *umem, GFP_KERNEL);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800117 if (!umem)
118 return ERR_PTR(-ENOMEM);
119
120 umem->context = context;
121 umem->length = size;
Haggai Eran406f9e52014-12-11 17:04:12 +0200122 umem->address = addr;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800123 umem->page_size = PAGE_SIZE;
Shawn Bohrer87773dd2014-09-03 12:13:57 -0500124 umem->pid = get_task_pid(current, PIDTYPE_PID);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800125 /*
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200126 * We ask for writable memory if any of the following
127 * access flags are set. "Local write" and "remote write"
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800128 * obviously require write access. "Remote atomic" can do
129 * things like fetch and add, which will modify memory, and
130 * "MW bind" can change permissions by binding a window.
131 */
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200132 umem->writable = !!(access &
133 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
134 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800135
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200136 if (access & IB_ACCESS_ON_DEMAND) {
Kenneth Leec81ee4e2017-01-05 15:00:05 +0800137 put_pid(umem->pid);
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200138 ret = ib_umem_odp_get(context, umem);
139 if (ret) {
140 kfree(umem);
141 return ERR_PTR(ret);
142 }
143 return umem;
144 }
145
146 umem->odp_data = NULL;
147
Joachim Fenkesc8d8bee2007-09-13 18:15:28 +0200148 /* We assume the memory is from hugetlb until proved otherwise */
149 umem->hugetlb = 1;
150
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700151 page_list = (struct page **) __get_free_page(GFP_KERNEL);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800152 if (!page_list) {
Kenneth Leec81ee4e2017-01-05 15:00:05 +0800153 put_pid(umem->pid);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800154 kfree(umem);
155 return ERR_PTR(-ENOMEM);
156 }
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700157
Joachim Fenkesc8d8bee2007-09-13 18:15:28 +0200158 /*
159 * if we can't alloc the vma_list, it's not so bad;
160 * just assume the memory is not hugetlb memory
161 */
162 vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
163 if (!vma_list)
164 umem->hugetlb = 0;
165
Haggai Eran406f9e52014-12-11 17:04:12 +0200166 npages = ib_umem_num_pages(umem);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700167
168 down_write(&current->mm->mmap_sem);
169
Christoph Lameterbc3e53f2011-10-31 17:07:30 -0700170 locked = npages + current->mm->pinned_vm;
Jiri Slabyccbe9f02010-02-11 15:40:48 -0800171 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700172
173 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
174 ret = -ENOMEM;
175 goto out;
176 }
177
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800178 cur_base = addr & PAGE_MASK;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700179
Mark Bloch3c7ba572016-10-27 16:36:31 +0300180 if (npages == 0 || npages > UINT_MAX) {
Yishai Hadaseeb84612014-01-28 13:40:15 +0200181 ret = -EINVAL;
182 goto out;
183 }
184
185 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
186 if (ret)
187 goto out;
188
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100189 if (!umem->writable)
190 gup_flags |= FOLL_FORCE;
191
Yishai Hadaseeb84612014-01-28 13:40:15 +0200192 need_release = 1;
193 sg_list_start = umem->sg_head.sgl;
194
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700195 while (npages) {
Dave Hansend4edcf02016-02-12 13:01:56 -0800196 ret = get_user_pages(cur_base,
Roland Dreier8079ffa2008-06-06 21:38:37 -0700197 min_t(unsigned long, npages,
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700198 PAGE_SIZE / sizeof (struct page *)),
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100199 gup_flags, page_list, vma_list);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700200
201 if (ret < 0)
202 goto out;
203
Yishai Hadaseeb84612014-01-28 13:40:15 +0200204 umem->npages += ret;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700205 cur_base += ret * PAGE_SIZE;
206 npages -= ret;
207
Yishai Hadaseeb84612014-01-28 13:40:15 +0200208 for_each_sg(sg_list_start, sg, ret, i) {
209 if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
210 umem->hugetlb = 0;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700211
Yishai Hadaseeb84612014-01-28 13:40:15 +0200212 sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700213 }
214
Yishai Hadaseeb84612014-01-28 13:40:15 +0200215 /* preparing for next loop */
216 sg_list_start = sg;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700217 }
218
Yishai Hadaseeb84612014-01-28 13:40:15 +0200219 umem->nmap = ib_dma_map_sg_attrs(context->device,
220 umem->sg_head.sgl,
221 umem->npages,
222 DMA_BIDIRECTIONAL,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700223 dma_attrs);
Yishai Hadaseeb84612014-01-28 13:40:15 +0200224
225 if (umem->nmap <= 0) {
226 ret = -ENOMEM;
227 goto out;
228 }
229
230 ret = 0;
231
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700232out:
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800233 if (ret < 0) {
Yishai Hadaseeb84612014-01-28 13:40:15 +0200234 if (need_release)
235 __ib_umem_release(context->device, umem, 0);
Shawn Bohrer87773dd2014-09-03 12:13:57 -0500236 put_pid(umem->pid);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800237 kfree(umem);
238 } else
Christoph Lameterbc3e53f2011-10-31 17:07:30 -0700239 current->mm->pinned_vm = locked;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700240
241 up_write(&current->mm->mmap_sem);
Joachim Fenkesc8d8bee2007-09-13 18:15:28 +0200242 if (vma_list)
243 free_page((unsigned long) vma_list);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700244 free_page((unsigned long) page_list);
245
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800246 return ret < 0 ? ERR_PTR(ret) : umem;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700247}
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800248EXPORT_SYMBOL(ib_umem_get);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700249
Roland Dreier1bf66a32007-04-18 20:20:28 -0700250static void ib_umem_account(struct work_struct *work)
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700251{
Roland Dreier1bf66a32007-04-18 20:20:28 -0700252 struct ib_umem *umem = container_of(work, struct ib_umem, work);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700253
Roland Dreier1bf66a32007-04-18 20:20:28 -0700254 down_write(&umem->mm->mmap_sem);
Christoph Lameterbc3e53f2011-10-31 17:07:30 -0700255 umem->mm->pinned_vm -= umem->diff;
Roland Dreier1bf66a32007-04-18 20:20:28 -0700256 up_write(&umem->mm->mmap_sem);
257 mmput(umem->mm);
258 kfree(umem);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700259}
260
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800261/**
262 * ib_umem_release - release memory pinned with ib_umem_get
263 * @umem: umem struct to release
264 */
265void ib_umem_release(struct ib_umem *umem)
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700266{
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800267 struct ib_ucontext *context = umem->context;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700268 struct mm_struct *mm;
Shawn Bohrer87773dd2014-09-03 12:13:57 -0500269 struct task_struct *task;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800270 unsigned long diff;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700271
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200272 if (umem->odp_data) {
273 ib_umem_odp_release(umem);
274 return;
275 }
276
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800277 __ib_umem_release(umem->context->device, umem, 1);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700278
Shawn Bohrer87773dd2014-09-03 12:13:57 -0500279 task = get_pid_task(umem->pid, PIDTYPE_PID);
280 put_pid(umem->pid);
281 if (!task)
282 goto out;
283 mm = get_task_mm(task);
284 put_task_struct(task);
285 if (!mm)
286 goto out;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700287
Haggai Eran406f9e52014-12-11 17:04:12 +0200288 diff = ib_umem_num_pages(umem);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800289
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700290 /*
291 * We may be called with the mm's mmap_sem already held. This
292 * can happen when a userspace munmap() is the call that drops
293 * the last reference to our file and calls our release
294 * method. If there are memory regions to destroy, we'll end
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800295 * up here and not be able to take the mmap_sem. In that case
296 * we defer the vm_locked accounting to the system workqueue.
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700297 */
Roland Dreier24bce502007-06-21 11:05:58 -0700298 if (context->closing) {
299 if (!down_write_trylock(&mm->mmap_sem)) {
300 INIT_WORK(&umem->work, ib_umem_account);
301 umem->mm = mm;
302 umem->diff = diff;
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700303
Tejun Heof0626712010-10-19 15:24:36 +0000304 queue_work(ib_wq, &umem->work);
Roland Dreier24bce502007-06-21 11:05:58 -0700305 return;
306 }
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800307 } else
308 down_write(&mm->mmap_sem);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700309
Shawn Bohrer87773dd2014-09-03 12:13:57 -0500310 mm->pinned_vm -= diff;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800311 up_write(&mm->mmap_sem);
312 mmput(mm);
Shawn Bohrer87773dd2014-09-03 12:13:57 -0500313out:
Roland Dreier1bf66a32007-04-18 20:20:28 -0700314 kfree(umem);
Roland Dreiereb8ffbf2005-07-07 17:57:14 -0700315}
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800316EXPORT_SYMBOL(ib_umem_release);
317
318int ib_umem_page_count(struct ib_umem *umem)
319{
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800320 int shift;
321 int i;
322 int n;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200323 struct scatterlist *sg;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800324
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200325 if (umem->odp_data)
326 return ib_umem_num_pages(umem);
327
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800328 shift = ilog2(umem->page_size);
329
330 n = 0;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200331 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
332 n += sg_dma_len(sg) >> shift;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800333
334 return n;
335}
336EXPORT_SYMBOL(ib_umem_page_count);
Haggai Eranc5d76f12014-12-11 17:04:13 +0200337
338/*
339 * Copy from the given ib_umem's pages to the given buffer.
340 *
341 * umem - the umem to copy from
342 * offset - offset to start copying from
343 * dst - destination buffer
344 * length - buffer length
345 *
346 * Returns 0 on success, or an error code.
347 */
348int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
349 size_t length)
350{
351 size_t end = offset + length;
352 int ret;
353
354 if (offset > umem->length || length > umem->length - offset) {
355 pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
356 offset, umem->length, end);
357 return -EINVAL;
358 }
359
360 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
361 offset + ib_umem_offset(umem));
362
363 if (ret < 0)
364 return ret;
365 else if (ret != length)
366 return -EINVAL;
367 else
368 return 0;
369}
370EXPORT_SYMBOL(ib_umem_copy_from);