blob: ac617f3ecd0c0de328cf6df1f094a4af641fff72 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Joe Perches25d04792012-03-16 21:43:50 -070031#define pr_fmt(fmt) "[TTM] " fmt
32
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020033#include <ttm/ttm_module.h>
34#include <ttm/ttm_bo_driver.h>
35#include <ttm/ttm_placement.h>
David Herrmann72525b32013-07-24 21:08:53 +020036#include <drm/drm_vma_manager.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020037#include <linux/mm.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038#include <linux/rbtree.h>
39#include <linux/module.h>
40#include <linux/uaccess.h>
41
42#define TTM_BO_VM_NUM_PREFAULT 16
43
Thomas Hellstromcbe12e72013-10-09 03:18:07 -070044static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
45 struct vm_area_struct *vma,
46 struct vm_fault *vmf)
47{
48 struct ttm_bo_device *bdev = bo->bdev;
49 int ret = 0;
50
51 spin_lock(&bdev->fence_lock);
52 if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
53 goto out_unlock;
54
55 /*
56 * Quick non-stalling check for idle.
57 */
58 ret = ttm_bo_wait(bo, false, false, true);
59 if (likely(ret == 0))
60 goto out_unlock;
61
62 /*
63 * If possible, avoid waiting for GPU with mmap_sem
64 * held.
65 */
66 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
67 ret = VM_FAULT_RETRY;
68 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
69 goto out_unlock;
70
71 up_read(&vma->vm_mm->mmap_sem);
72 (void) ttm_bo_wait(bo, false, true, false);
73 goto out_unlock;
74 }
75
76 /*
77 * Ordinary wait.
78 */
79 ret = ttm_bo_wait(bo, false, true, false);
80 if (unlikely(ret != 0))
81 ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
82 VM_FAULT_NOPAGE;
83
84out_unlock:
85 spin_unlock(&bdev->fence_lock);
86 return ret;
87}
88
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020089static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
90{
91 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
92 vma->vm_private_data;
93 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020094 unsigned long page_offset;
95 unsigned long page_last;
96 unsigned long pfn;
97 struct ttm_tt *ttm = NULL;
98 struct page *page;
99 int ret;
100 int i;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200101 unsigned long address = (unsigned long)vmf->virtual_address;
102 int retval = VM_FAULT_NOPAGE;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100103 struct ttm_mem_type_manager *man =
104 &bdev->man[bo->mem.mem_type];
Thomas Hellstrom39438752013-11-06 09:32:59 -0800105 struct vm_area_struct cvma;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200106
107 /*
108 * Work around locking order reversal in fault / nopfn
109 * between mmap_sem and bo_reserve: Perform a trylock operation
110 * for reserve, and if it fails, retry the fault after scheduling.
111 */
112
113 ret = ttm_bo_reserve(bo, true, true, false, 0);
114 if (unlikely(ret != 0)) {
115 if (ret == -EBUSY)
116 set_need_resched();
117 return VM_FAULT_NOPAGE;
118 }
119
Jerome Glisse82c5da62010-04-09 14:39:23 +0200120 if (bdev->driver->fault_reserve_notify) {
121 ret = bdev->driver->fault_reserve_notify(bo);
122 switch (ret) {
123 case 0:
124 break;
125 case -EBUSY:
126 set_need_resched();
127 case -ERESTARTSYS:
128 retval = VM_FAULT_NOPAGE;
129 goto out_unlock;
130 default:
131 retval = VM_FAULT_SIGBUS;
132 goto out_unlock;
133 }
134 }
Dave Airliee024e112009-06-24 09:48:08 +1000135
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200136 /*
137 * Wait for buffer data in transit, due to a pipelined
138 * move.
139 */
Thomas Hellstromcbe12e72013-10-09 03:18:07 -0700140 ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
141 if (unlikely(ret != 0)) {
142 retval = ret;
143 goto out_unlock;
144 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200145
Thomas Hellstromeba67092010-11-11 09:41:57 +0100146 ret = ttm_mem_io_lock(man, true);
147 if (unlikely(ret != 0)) {
148 retval = VM_FAULT_NOPAGE;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200149 goto out_unlock;
150 }
Thomas Hellstromeba67092010-11-11 09:41:57 +0100151 ret = ttm_mem_io_reserve_vm(bo);
152 if (unlikely(ret != 0)) {
153 retval = VM_FAULT_SIGBUS;
154 goto out_io_unlock;
155 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200156
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200157 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
David Herrmann72525b32013-07-24 21:08:53 +0200158 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
Libin025df772013-04-15 20:48:57 +0800159 page_last = vma_pages(vma) +
David Herrmann72525b32013-07-24 21:08:53 +0200160 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200161
162 if (unlikely(page_offset >= bo->num_pages)) {
163 retval = VM_FAULT_SIGBUS;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100164 goto out_io_unlock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200165 }
166
167 /*
Thomas Hellstrom39438752013-11-06 09:32:59 -0800168 * Make a local vma copy to modify the page_prot member
169 * and vm_flags if necessary. The vma parameter is protected
170 * by mmap_sem in write mode.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200171 */
Thomas Hellstrom39438752013-11-06 09:32:59 -0800172 cvma = *vma;
173 cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
174
Jerome Glisse82c5da62010-04-09 14:39:23 +0200175 if (bo->mem.bus.is_iomem) {
Thomas Hellstrom39438752013-11-06 09:32:59 -0800176 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
177 cvma.vm_page_prot);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200178 } else {
179 ttm = bo->ttm;
Thomas Hellstrom39438752013-11-06 09:32:59 -0800180 if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
181 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
182 cvma.vm_page_prot);
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400183
184 /* Allocate all page at once, most common usage */
185 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
186 retval = VM_FAULT_OOM;
187 goto out_io_unlock;
188 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200189 }
190
191 /*
192 * Speculatively prefault a number of pages. Only error on
193 * first page.
194 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200195 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
Jerome Glisse82c5da62010-04-09 14:39:23 +0200196 if (bo->mem.bus.is_iomem)
197 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200198 else {
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400199 page = ttm->pages[page_offset];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200200 if (unlikely(!page && i == 0)) {
201 retval = VM_FAULT_OOM;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100202 goto out_io_unlock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200203 } else if (unlikely(!page)) {
204 break;
205 }
206 pfn = page_to_pfn(page);
207 }
208
Thomas Hellstrom39438752013-11-06 09:32:59 -0800209 ret = vm_insert_mixed(&cvma, address, pfn);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200210 /*
211 * Somebody beat us to this PTE or prefaulting to
212 * an already populated PTE, or prefaulting error.
213 */
214
215 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
216 break;
217 else if (unlikely(ret != 0)) {
218 retval =
219 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100220 goto out_io_unlock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200221 }
222
223 address += PAGE_SIZE;
224 if (unlikely(++page_offset >= page_last))
225 break;
226 }
Thomas Hellstromeba67092010-11-11 09:41:57 +0100227out_io_unlock:
228 ttm_mem_io_unlock(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200229out_unlock:
230 ttm_bo_unreserve(bo);
231 return retval;
232}
233
234static void ttm_bo_vm_open(struct vm_area_struct *vma)
235{
236 struct ttm_buffer_object *bo =
237 (struct ttm_buffer_object *)vma->vm_private_data;
238
239 (void)ttm_bo_reference(bo);
240}
241
242static void ttm_bo_vm_close(struct vm_area_struct *vma)
243{
Jerome Glisse82c5da62010-04-09 14:39:23 +0200244 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200245
246 ttm_bo_unref(&bo);
247 vma->vm_private_data = NULL;
248}
249
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400250static const struct vm_operations_struct ttm_bo_vm_ops = {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200251 .fault = ttm_bo_vm_fault,
252 .open = ttm_bo_vm_open,
253 .close = ttm_bo_vm_close
254};
255
David Herrmann72525b32013-07-24 21:08:53 +0200256static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
257 unsigned long offset,
258 unsigned long pages)
259{
260 struct drm_vma_offset_node *node;
261 struct ttm_buffer_object *bo = NULL;
262
263 drm_vma_offset_lock_lookup(&bdev->vma_manager);
264
265 node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
266 if (likely(node)) {
267 bo = container_of(node, struct ttm_buffer_object, vma_node);
268 if (!kref_get_unless_zero(&bo->kref))
269 bo = NULL;
270 }
271
272 drm_vma_offset_unlock_lookup(&bdev->vma_manager);
273
274 if (!bo)
275 pr_err("Could not find buffer object to map\n");
276
277 return bo;
278}
279
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200280int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
281 struct ttm_bo_device *bdev)
282{
283 struct ttm_bo_driver *driver;
284 struct ttm_buffer_object *bo;
285 int ret;
286
David Herrmann72525b32013-07-24 21:08:53 +0200287 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
288 if (unlikely(!bo))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200289 return -EINVAL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200290
291 driver = bo->bdev->driver;
292 if (unlikely(!driver->verify_access)) {
293 ret = -EPERM;
294 goto out_unref;
295 }
296 ret = driver->verify_access(bo, filp);
297 if (unlikely(ret != 0))
298 goto out_unref;
299
300 vma->vm_ops = &ttm_bo_vm_ops;
301
302 /*
303 * Note: We're transferring the bo reference to
304 * vma->vm_private_data here.
305 */
306
307 vma->vm_private_data = bo;
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -0700308 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200309 return 0;
310out_unref:
311 ttm_bo_unref(&bo);
312 return ret;
313}
314EXPORT_SYMBOL(ttm_bo_mmap);
315
316int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
317{
318 if (vma->vm_pgoff != 0)
319 return -EACCES;
320
321 vma->vm_ops = &ttm_bo_vm_ops;
322 vma->vm_private_data = ttm_bo_reference(bo);
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -0700323 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200324 return 0;
325}
326EXPORT_SYMBOL(ttm_fbdev_mmap);