Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
| 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | /* |
| 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
| 29 | */ |
| 30 | |
Joe Perches | 25d0479 | 2012-03-16 21:43:50 -0700 | [diff] [blame] | 31 | #define pr_fmt(fmt) "[TTM] " fmt |
| 32 | |
Masahiro Yamada | 2da8331 | 2017-04-24 13:50:20 +0900 | [diff] [blame] | 33 | #include <drm/ttm/ttm_module.h> |
| 34 | #include <drm/ttm/ttm_bo_driver.h> |
| 35 | #include <drm/ttm/ttm_placement.h> |
David Herrmann | 72525b3 | 2013-07-24 21:08:53 +0200 | [diff] [blame] | 36 | #include <drm/drm_vma_manager.h> |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 37 | #include <linux/mm.h> |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 38 | #include <linux/pfn_t.h> |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 39 | #include <linux/rbtree.h> |
| 40 | #include <linux/module.h> |
| 41 | #include <linux/uaccess.h> |
Tom Lendacky | 95cf926 | 2017-07-17 16:10:26 -0500 | [diff] [blame] | 42 | #include <linux/mem_encrypt.h> |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 43 | |
| 44 | #define TTM_BO_VM_NUM_PREFAULT 16 |
| 45 | |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 46 | static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 47 | struct vm_fault *vmf) |
| 48 | { |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 49 | int ret = 0; |
| 50 | |
Christian König | 5bc7306 | 2016-06-15 13:44:01 +0200 | [diff] [blame] | 51 | if (likely(!bo->moving)) |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 52 | goto out_unlock; |
| 53 | |
| 54 | /* |
| 55 | * Quick non-stalling check for idle. |
| 56 | */ |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 57 | if (dma_fence_is_signaled(bo->moving)) |
Christian König | 5bc7306 | 2016-06-15 13:44:01 +0200 | [diff] [blame] | 58 | goto out_clear; |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 59 | |
| 60 | /* |
| 61 | * If possible, avoid waiting for GPU with mmap_sem |
| 62 | * held. |
| 63 | */ |
| 64 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { |
| 65 | ret = VM_FAULT_RETRY; |
| 66 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
| 67 | goto out_unlock; |
| 68 | |
Nicolai Hähnle | 3089c1d | 2017-02-18 22:59:56 +0100 | [diff] [blame] | 69 | ttm_bo_reference(bo); |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 70 | up_read(&vmf->vma->vm_mm->mmap_sem); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 71 | (void) dma_fence_wait(bo->moving, true); |
Nicolai Hähnle | 3089c1d | 2017-02-18 22:59:56 +0100 | [diff] [blame] | 72 | ttm_bo_unreserve(bo); |
| 73 | ttm_bo_unref(&bo); |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 74 | goto out_unlock; |
| 75 | } |
| 76 | |
| 77 | /* |
| 78 | * Ordinary wait. |
| 79 | */ |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 80 | ret = dma_fence_wait(bo->moving, true); |
Christian König | 5bc7306 | 2016-06-15 13:44:01 +0200 | [diff] [blame] | 81 | if (unlikely(ret != 0)) { |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 82 | ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : |
| 83 | VM_FAULT_NOPAGE; |
Christian König | 5bc7306 | 2016-06-15 13:44:01 +0200 | [diff] [blame] | 84 | goto out_unlock; |
| 85 | } |
| 86 | |
| 87 | out_clear: |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 88 | dma_fence_put(bo->moving); |
Christian König | 5bc7306 | 2016-06-15 13:44:01 +0200 | [diff] [blame] | 89 | bo->moving = NULL; |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 90 | |
| 91 | out_unlock: |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 92 | return ret; |
| 93 | } |
| 94 | |
Tan Xiaojun | c67fa6e | 2017-12-25 11:43:23 +0800 | [diff] [blame] | 95 | static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, |
| 96 | unsigned long page_offset) |
| 97 | { |
| 98 | struct ttm_bo_device *bdev = bo->bdev; |
| 99 | |
| 100 | if (bdev->driver->io_mem_pfn) |
| 101 | return bdev->driver->io_mem_pfn(bo, page_offset); |
| 102 | |
Tan Xiaojun | e83bf4a | 2017-12-25 11:43:34 +0800 | [diff] [blame] | 103 | return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) |
| 104 | + page_offset; |
Tan Xiaojun | c67fa6e | 2017-12-25 11:43:23 +0800 | [diff] [blame] | 105 | } |
| 106 | |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 107 | static int ttm_bo_vm_fault(struct vm_fault *vmf) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 108 | { |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 109 | struct vm_area_struct *vma = vmf->vma; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 110 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) |
| 111 | vma->vm_private_data; |
| 112 | struct ttm_bo_device *bdev = bo->bdev; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 113 | unsigned long page_offset; |
| 114 | unsigned long page_last; |
| 115 | unsigned long pfn; |
| 116 | struct ttm_tt *ttm = NULL; |
| 117 | struct page *page; |
| 118 | int ret; |
| 119 | int i; |
Jan Kara | 1a29d85 | 2016-12-14 15:07:01 -0800 | [diff] [blame] | 120 | unsigned long address = vmf->address; |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 121 | struct ttm_mem_type_manager *man = |
| 122 | &bdev->man[bo->mem.mem_type]; |
Thomas Hellstrom | 3943875 | 2013-11-06 09:32:59 -0800 | [diff] [blame] | 123 | struct vm_area_struct cvma; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 124 | |
| 125 | /* |
| 126 | * Work around locking order reversal in fault / nopfn |
| 127 | * between mmap_sem and bo_reserve: Perform a trylock operation |
Thomas Hellstrom | c58f009 | 2013-11-14 10:49:05 -0800 | [diff] [blame] | 128 | * for reserve, and if it fails, retry the fault after waiting |
| 129 | * for the buffer to become unreserved. |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 130 | */ |
Christian König | dfd5e50 | 2016-04-06 11:12:03 +0200 | [diff] [blame] | 131 | ret = ttm_bo_reserve(bo, true, true, NULL); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 132 | if (unlikely(ret != 0)) { |
Thomas Hellstrom | c58f009 | 2013-11-14 10:49:05 -0800 | [diff] [blame] | 133 | if (ret != -EBUSY) |
| 134 | return VM_FAULT_NOPAGE; |
| 135 | |
| 136 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { |
| 137 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
Nicolai Hähnle | 3089c1d | 2017-02-18 22:59:56 +0100 | [diff] [blame] | 138 | ttm_bo_reference(bo); |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 139 | up_read(&vmf->vma->vm_mm->mmap_sem); |
Thomas Hellstrom | c58f009 | 2013-11-14 10:49:05 -0800 | [diff] [blame] | 140 | (void) ttm_bo_wait_unreserved(bo); |
Nicolai Hähnle | 3089c1d | 2017-02-18 22:59:56 +0100 | [diff] [blame] | 141 | ttm_bo_unref(&bo); |
Thomas Hellstrom | c58f009 | 2013-11-14 10:49:05 -0800 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | return VM_FAULT_RETRY; |
| 145 | } |
| 146 | |
| 147 | /* |
| 148 | * If we'd want to change locking order to |
| 149 | * mmap_sem -> bo::reserve, we'd use a blocking reserve here |
| 150 | * instead of retrying the fault... |
| 151 | */ |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 152 | return VM_FAULT_NOPAGE; |
| 153 | } |
| 154 | |
Thomas Hellstrom | 667a50d | 2014-01-03 11:17:18 +0100 | [diff] [blame] | 155 | /* |
| 156 | * Refuse to fault imported pages. This should be handled |
| 157 | * (if at all) by redirecting mmap to the exporter. |
| 158 | */ |
| 159 | if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 160 | ret = VM_FAULT_SIGBUS; |
Thomas Hellstrom | 667a50d | 2014-01-03 11:17:18 +0100 | [diff] [blame] | 161 | goto out_unlock; |
| 162 | } |
| 163 | |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 164 | if (bdev->driver->fault_reserve_notify) { |
| 165 | ret = bdev->driver->fault_reserve_notify(bo); |
| 166 | switch (ret) { |
| 167 | case 0: |
| 168 | break; |
| 169 | case -EBUSY: |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 170 | case -ERESTARTSYS: |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 171 | ret = VM_FAULT_NOPAGE; |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 172 | goto out_unlock; |
| 173 | default: |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 174 | ret = VM_FAULT_SIGBUS; |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 175 | goto out_unlock; |
| 176 | } |
| 177 | } |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 178 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 179 | /* |
| 180 | * Wait for buffer data in transit, due to a pipelined |
| 181 | * move. |
| 182 | */ |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 183 | ret = ttm_bo_vm_fault_idle(bo, vmf); |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 184 | if (unlikely(ret != 0)) { |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 185 | if (ret == VM_FAULT_RETRY && |
Nicolai Hähnle | 3089c1d | 2017-02-18 22:59:56 +0100 | [diff] [blame] | 186 | !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
| 187 | /* The BO has already been unreserved. */ |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 188 | return ret; |
Nicolai Hähnle | 3089c1d | 2017-02-18 22:59:56 +0100 | [diff] [blame] | 189 | } |
| 190 | |
Thomas Hellstrom | cbe12e7 | 2013-10-09 03:18:07 -0700 | [diff] [blame] | 191 | goto out_unlock; |
| 192 | } |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 193 | |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 194 | ret = ttm_mem_io_lock(man, true); |
| 195 | if (unlikely(ret != 0)) { |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 196 | ret = VM_FAULT_NOPAGE; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 197 | goto out_unlock; |
| 198 | } |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 199 | ret = ttm_mem_io_reserve_vm(bo); |
| 200 | if (unlikely(ret != 0)) { |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 201 | ret = VM_FAULT_SIGBUS; |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 202 | goto out_io_unlock; |
| 203 | } |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 204 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 205 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
Thomas Hellstrom | d386735 | 2013-12-08 23:23:57 -0800 | [diff] [blame] | 206 | vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); |
| 207 | page_last = vma_pages(vma) + vma->vm_pgoff - |
| 208 | drm_vma_node_start(&bo->vma_node); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 209 | |
| 210 | if (unlikely(page_offset >= bo->num_pages)) { |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 211 | ret = VM_FAULT_SIGBUS; |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 212 | goto out_io_unlock; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | /* |
Thomas Hellstrom | 3943875 | 2013-11-06 09:32:59 -0800 | [diff] [blame] | 216 | * Make a local vma copy to modify the page_prot member |
| 217 | * and vm_flags if necessary. The vma parameter is protected |
| 218 | * by mmap_sem in write mode. |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 219 | */ |
Thomas Hellstrom | 3943875 | 2013-11-06 09:32:59 -0800 | [diff] [blame] | 220 | cvma = *vma; |
| 221 | cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags); |
| 222 | |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 223 | if (bo->mem.bus.is_iomem) { |
Thomas Hellstrom | 3943875 | 2013-11-06 09:32:59 -0800 | [diff] [blame] | 224 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
| 225 | cvma.vm_page_prot); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 226 | } else { |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 227 | struct ttm_operation_ctx ctx = { |
| 228 | .interruptible = false, |
Roger He | aa7662b | 2018-01-17 15:07:23 +0800 | [diff] [blame] | 229 | .no_wait_gpu = false, |
| 230 | .flags = TTM_OPT_FLAG_FORCE_ALLOC |
| 231 | |
Roger He | d0cef9f | 2017-12-21 17:42:50 +0800 | [diff] [blame] | 232 | }; |
| 233 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 234 | ttm = bo->ttm; |
Benjamin Herrenschmidt | 94318d5 | 2014-09-04 17:47:23 +1000 | [diff] [blame] | 235 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
| 236 | cvma.vm_page_prot); |
Jerome Glisse | b1e5f17 | 2011-11-02 23:59:28 -0400 | [diff] [blame] | 237 | |
| 238 | /* Allocate all page at once, most common usage */ |
Christian König | 25893a1 | 2018-02-01 14:39:29 +0100 | [diff] [blame] | 239 | if (ttm_tt_populate(ttm, &ctx)) { |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 240 | ret = VM_FAULT_OOM; |
Jerome Glisse | b1e5f17 | 2011-11-02 23:59:28 -0400 | [diff] [blame] | 241 | goto out_io_unlock; |
| 242 | } |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | /* |
| 246 | * Speculatively prefault a number of pages. Only error on |
| 247 | * first page. |
| 248 | */ |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 249 | for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { |
Tom Lendacky | 95cf926 | 2017-07-17 16:10:26 -0500 | [diff] [blame] | 250 | if (bo->mem.bus.is_iomem) { |
| 251 | /* Iomem should not be marked encrypted */ |
| 252 | cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot); |
Tan Xiaojun | c67fa6e | 2017-12-25 11:43:23 +0800 | [diff] [blame] | 253 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); |
Tom Lendacky | 95cf926 | 2017-07-17 16:10:26 -0500 | [diff] [blame] | 254 | } else { |
Jerome Glisse | b1e5f17 | 2011-11-02 23:59:28 -0400 | [diff] [blame] | 255 | page = ttm->pages[page_offset]; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 256 | if (unlikely(!page && i == 0)) { |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 257 | ret = VM_FAULT_OOM; |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 258 | goto out_io_unlock; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 259 | } else if (unlikely(!page)) { |
| 260 | break; |
| 261 | } |
Thomas Hellstrom | 58aa662 | 2014-01-03 11:47:23 +0100 | [diff] [blame] | 262 | page->index = drm_vma_node_start(&bo->vma_node) + |
| 263 | page_offset; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 264 | pfn = page_to_pfn(page); |
| 265 | } |
| 266 | |
Thomas Hellstrom | 7dfe8b6 | 2014-01-03 09:21:54 +0100 | [diff] [blame] | 267 | if (vma->vm_flags & VM_MIXEDMAP) |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 268 | ret = vm_insert_mixed(&cvma, address, |
| 269 | __pfn_to_pfn_t(pfn, PFN_DEV)); |
Thomas Hellstrom | 7dfe8b6 | 2014-01-03 09:21:54 +0100 | [diff] [blame] | 270 | else |
| 271 | ret = vm_insert_pfn(&cvma, address, pfn); |
| 272 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 273 | /* |
| 274 | * Somebody beat us to this PTE or prefaulting to |
| 275 | * an already populated PTE, or prefaulting error. |
| 276 | */ |
| 277 | |
| 278 | if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) |
| 279 | break; |
| 280 | else if (unlikely(ret != 0)) { |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 281 | ret = |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 282 | (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 283 | goto out_io_unlock; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | address += PAGE_SIZE; |
| 287 | if (unlikely(++page_offset >= page_last)) |
| 288 | break; |
| 289 | } |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 290 | ret = VM_FAULT_NOPAGE; |
Thomas Hellstrom | eba6709 | 2010-11-11 09:41:57 +0100 | [diff] [blame] | 291 | out_io_unlock: |
| 292 | ttm_mem_io_unlock(man); |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 293 | out_unlock: |
| 294 | ttm_bo_unreserve(bo); |
Tom St Denis | de8dfb8 | 2018-01-26 09:22:05 -0500 | [diff] [blame] | 295 | return ret; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | static void ttm_bo_vm_open(struct vm_area_struct *vma) |
| 299 | { |
| 300 | struct ttm_buffer_object *bo = |
| 301 | (struct ttm_buffer_object *)vma->vm_private_data; |
| 302 | |
Thomas Hellstrom | 58aa662 | 2014-01-03 11:47:23 +0100 | [diff] [blame] | 303 | WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); |
| 304 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 305 | (void)ttm_bo_reference(bo); |
| 306 | } |
| 307 | |
| 308 | static void ttm_bo_vm_close(struct vm_area_struct *vma) |
| 309 | { |
Jerome Glisse | 82c5da6 | 2010-04-09 14:39:23 +0200 | [diff] [blame] | 310 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 311 | |
| 312 | ttm_bo_unref(&bo); |
| 313 | vma->vm_private_data = NULL; |
| 314 | } |
| 315 | |
Felix Kuehling | 09ac4fc | 2017-07-13 17:01:16 -0400 | [diff] [blame] | 316 | static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, |
| 317 | unsigned long offset, |
Tom St Denis | 95244db | 2018-01-26 09:32:29 -0500 | [diff] [blame] | 318 | uint8_t *buf, int len, int write) |
Felix Kuehling | 09ac4fc | 2017-07-13 17:01:16 -0400 | [diff] [blame] | 319 | { |
| 320 | unsigned long page = offset >> PAGE_SHIFT; |
| 321 | unsigned long bytes_left = len; |
| 322 | int ret; |
| 323 | |
| 324 | /* Copy a page at a time, that way no extra virtual address |
| 325 | * mapping is needed |
| 326 | */ |
| 327 | offset -= page << PAGE_SHIFT; |
| 328 | do { |
| 329 | unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); |
| 330 | struct ttm_bo_kmap_obj map; |
| 331 | void *ptr; |
| 332 | bool is_iomem; |
| 333 | |
| 334 | ret = ttm_bo_kmap(bo, page, 1, &map); |
| 335 | if (ret) |
| 336 | return ret; |
| 337 | |
| 338 | ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; |
| 339 | WARN_ON_ONCE(is_iomem); |
| 340 | if (write) |
| 341 | memcpy(ptr, buf, bytes); |
| 342 | else |
| 343 | memcpy(buf, ptr, bytes); |
| 344 | ttm_bo_kunmap(&map); |
| 345 | |
| 346 | page++; |
Tom St Denis | 95244db | 2018-01-26 09:32:29 -0500 | [diff] [blame] | 347 | buf += bytes; |
Felix Kuehling | 09ac4fc | 2017-07-13 17:01:16 -0400 | [diff] [blame] | 348 | bytes_left -= bytes; |
| 349 | offset = 0; |
| 350 | } while (bytes_left); |
| 351 | |
| 352 | return len; |
| 353 | } |
| 354 | |
| 355 | static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, |
| 356 | void *buf, int len, int write) |
| 357 | { |
| 358 | unsigned long offset = (addr) - vma->vm_start; |
| 359 | struct ttm_buffer_object *bo = vma->vm_private_data; |
| 360 | int ret; |
| 361 | |
| 362 | if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) |
| 363 | return -EIO; |
| 364 | |
| 365 | ret = ttm_bo_reserve(bo, true, false, NULL); |
| 366 | if (ret) |
| 367 | return ret; |
| 368 | |
| 369 | switch (bo->mem.mem_type) { |
| 370 | case TTM_PL_SYSTEM: |
| 371 | if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { |
| 372 | ret = ttm_tt_swapin(bo->ttm); |
| 373 | if (unlikely(ret != 0)) |
| 374 | return ret; |
| 375 | } |
| 376 | /* fall through */ |
| 377 | case TTM_PL_TT: |
| 378 | ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); |
| 379 | break; |
| 380 | default: |
| 381 | if (bo->bdev->driver->access_memory) |
| 382 | ret = bo->bdev->driver->access_memory( |
| 383 | bo, offset, buf, len, write); |
| 384 | else |
| 385 | ret = -EIO; |
| 386 | } |
| 387 | |
| 388 | ttm_bo_unreserve(bo); |
| 389 | |
| 390 | return ret; |
| 391 | } |
| 392 | |
Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 393 | static const struct vm_operations_struct ttm_bo_vm_ops = { |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 394 | .fault = ttm_bo_vm_fault, |
| 395 | .open = ttm_bo_vm_open, |
Felix Kuehling | 09ac4fc | 2017-07-13 17:01:16 -0400 | [diff] [blame] | 396 | .close = ttm_bo_vm_close, |
| 397 | .access = ttm_bo_vm_access |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 398 | }; |
| 399 | |
David Herrmann | 72525b3 | 2013-07-24 21:08:53 +0200 | [diff] [blame] | 400 | static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, |
| 401 | unsigned long offset, |
| 402 | unsigned long pages) |
| 403 | { |
| 404 | struct drm_vma_offset_node *node; |
| 405 | struct ttm_buffer_object *bo = NULL; |
| 406 | |
| 407 | drm_vma_offset_lock_lookup(&bdev->vma_manager); |
| 408 | |
| 409 | node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); |
| 410 | if (likely(node)) { |
| 411 | bo = container_of(node, struct ttm_buffer_object, vma_node); |
| 412 | if (!kref_get_unless_zero(&bo->kref)) |
| 413 | bo = NULL; |
| 414 | } |
| 415 | |
| 416 | drm_vma_offset_unlock_lookup(&bdev->vma_manager); |
| 417 | |
| 418 | if (!bo) |
| 419 | pr_err("Could not find buffer object to map\n"); |
| 420 | |
| 421 | return bo; |
| 422 | } |
| 423 | |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 424 | int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, |
| 425 | struct ttm_bo_device *bdev) |
| 426 | { |
| 427 | struct ttm_bo_driver *driver; |
| 428 | struct ttm_buffer_object *bo; |
| 429 | int ret; |
| 430 | |
David Herrmann | 72525b3 | 2013-07-24 21:08:53 +0200 | [diff] [blame] | 431 | bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); |
| 432 | if (unlikely(!bo)) |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 433 | return -EINVAL; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 434 | |
| 435 | driver = bo->bdev->driver; |
| 436 | if (unlikely(!driver->verify_access)) { |
| 437 | ret = -EPERM; |
| 438 | goto out_unref; |
| 439 | } |
| 440 | ret = driver->verify_access(bo, filp); |
| 441 | if (unlikely(ret != 0)) |
| 442 | goto out_unref; |
| 443 | |
| 444 | vma->vm_ops = &ttm_bo_vm_ops; |
| 445 | |
| 446 | /* |
| 447 | * Note: We're transferring the bo reference to |
| 448 | * vma->vm_private_data here. |
| 449 | */ |
| 450 | |
| 451 | vma->vm_private_data = bo; |
Thomas Hellstrom | 7dfe8b6 | 2014-01-03 09:21:54 +0100 | [diff] [blame] | 452 | |
| 453 | /* |
Thomas Hellstrom | 0e6d6ec | 2014-03-12 10:41:32 +0100 | [diff] [blame] | 454 | * We'd like to use VM_PFNMAP on shared mappings, where |
| 455 | * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, |
| 456 | * but for some reason VM_PFNMAP + x86 PAT + write-combine is very |
| 457 | * bad for performance. Until that has been sorted out, use |
| 458 | * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 |
Thomas Hellstrom | 7dfe8b6 | 2014-01-03 09:21:54 +0100 | [diff] [blame] | 459 | */ |
Thomas Hellstrom | 0e6d6ec | 2014-03-12 10:41:32 +0100 | [diff] [blame] | 460 | vma->vm_flags |= VM_MIXEDMAP; |
Thomas Hellstrom | 7dfe8b6 | 2014-01-03 09:21:54 +0100 | [diff] [blame] | 461 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 462 | return 0; |
| 463 | out_unref: |
| 464 | ttm_bo_unref(&bo); |
| 465 | return ret; |
| 466 | } |
| 467 | EXPORT_SYMBOL(ttm_bo_mmap); |
| 468 | |
| 469 | int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) |
| 470 | { |
| 471 | if (vma->vm_pgoff != 0) |
| 472 | return -EACCES; |
| 473 | |
| 474 | vma->vm_ops = &ttm_bo_vm_ops; |
| 475 | vma->vm_private_data = ttm_bo_reference(bo); |
Thomas Hellstrom | 0e6d6ec | 2014-03-12 10:41:32 +0100 | [diff] [blame] | 476 | vma->vm_flags |= VM_MIXEDMAP; |
Thomas Hellstrom | 7dfe8b6 | 2014-01-03 09:21:54 +0100 | [diff] [blame] | 477 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; |
Thomas Hellstrom | ba4e7d9 | 2009-06-10 15:20:19 +0200 | [diff] [blame] | 478 | return 0; |
| 479 | } |
| 480 | EXPORT_SYMBOL(ttm_fbdev_mmap); |