Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 Red Hat |
| 3 | * Author: Rob Clark <robdclark@gmail.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published by |
| 7 | * the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/spinlock.h> |
| 19 | #include <linux/shmem_fs.h> |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 20 | #include <linux/dma-buf.h> |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 21 | #include <linux/pfn_t.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 22 | |
| 23 | #include "msm_drv.h" |
Rob Clark | fde5de6 | 2016-03-15 15:35:08 -0400 | [diff] [blame] | 24 | #include "msm_fence.h" |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 25 | #include "msm_gem.h" |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 26 | #include "msm_gpu.h" |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 27 | #include "msm_mmu.h" |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 28 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 29 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj); |
| 30 | |
| 31 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 32 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
| 33 | { |
| 34 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 35 | struct msm_drm_private *priv = obj->dev->dev_private; |
| 36 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + |
| 37 | priv->vram.paddr; |
| 38 | } |
| 39 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 40 | static bool use_pages(struct drm_gem_object *obj) |
| 41 | { |
| 42 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 43 | return !msm_obj->vram_node; |
| 44 | } |
| 45 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 46 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 47 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 48 | { |
| 49 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 50 | struct msm_drm_private *priv = obj->dev->dev_private; |
| 51 | dma_addr_t paddr; |
| 52 | struct page **p; |
| 53 | int ret, i; |
| 54 | |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 55 | p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 56 | if (!p) |
| 57 | return ERR_PTR(-ENOMEM); |
| 58 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 59 | spin_lock(&priv->vram.lock); |
Chris Wilson | 4e64e55 | 2017-02-02 21:04:38 +0000 | [diff] [blame] | 60 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 61 | spin_unlock(&priv->vram.lock); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 62 | if (ret) { |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 63 | kvfree(p); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 64 | return ERR_PTR(ret); |
| 65 | } |
| 66 | |
| 67 | paddr = physaddr(obj); |
| 68 | for (i = 0; i < npages; i++) { |
| 69 | p[i] = phys_to_page(paddr); |
| 70 | paddr += PAGE_SIZE; |
| 71 | } |
| 72 | |
| 73 | return p; |
| 74 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 75 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 76 | static struct page **get_pages(struct drm_gem_object *obj) |
| 77 | { |
| 78 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 79 | |
| 80 | if (!msm_obj->pages) { |
| 81 | struct drm_device *dev = obj->dev; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 82 | struct page **p; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 83 | int npages = obj->size >> PAGE_SHIFT; |
| 84 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 85 | if (use_pages(obj)) |
David Herrmann | 0cdbe8a | 2014-05-25 12:59:47 +0200 | [diff] [blame] | 86 | p = drm_gem_get_pages(obj); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 87 | else |
| 88 | p = get_pages_vram(obj, npages); |
| 89 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 90 | if (IS_ERR(p)) { |
| 91 | dev_err(dev->dev, "could not get pages: %ld\n", |
| 92 | PTR_ERR(p)); |
| 93 | return p; |
| 94 | } |
| 95 | |
| 96 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); |
Wei Yongjun | 1f70e07 | 2013-09-11 06:56:12 +0800 | [diff] [blame] | 97 | if (IS_ERR(msm_obj->sgt)) { |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 98 | dev_err(dev->dev, "failed to allocate sgt\n"); |
Wei Yongjun | 1f70e07 | 2013-09-11 06:56:12 +0800 | [diff] [blame] | 99 | return ERR_CAST(msm_obj->sgt); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | msm_obj->pages = p; |
| 103 | |
| 104 | /* For non-cached buffers, ensure the new pages are clean |
| 105 | * because display controller, GPU, etc. are not coherent: |
| 106 | */ |
| 107 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) |
| 108 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, |
| 109 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
| 110 | } |
| 111 | |
| 112 | return msm_obj->pages; |
| 113 | } |
| 114 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 115 | static void put_pages_vram(struct drm_gem_object *obj) |
| 116 | { |
| 117 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 118 | struct msm_drm_private *priv = obj->dev->dev_private; |
| 119 | |
| 120 | spin_lock(&priv->vram.lock); |
| 121 | drm_mm_remove_node(msm_obj->vram_node); |
| 122 | spin_unlock(&priv->vram.lock); |
| 123 | |
| 124 | kvfree(msm_obj->pages); |
| 125 | } |
| 126 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 127 | static void put_pages(struct drm_gem_object *obj) |
| 128 | { |
| 129 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 130 | |
| 131 | if (msm_obj->pages) { |
| 132 | /* For non-cached buffers, ensure the new pages are clean |
| 133 | * because display controller, GPU, etc. are not coherent: |
| 134 | */ |
| 135 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) |
| 136 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, |
| 137 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
| 138 | sg_free_table(msm_obj->sgt); |
| 139 | kfree(msm_obj->sgt); |
| 140 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 141 | if (use_pages(obj)) |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 142 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 143 | else |
| 144 | put_pages_vram(obj); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 145 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 146 | msm_obj->pages = NULL; |
| 147 | } |
| 148 | } |
| 149 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 150 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) |
| 151 | { |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 152 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 153 | struct page **p; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 154 | |
| 155 | mutex_lock(&msm_obj->lock); |
| 156 | |
| 157 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { |
| 158 | mutex_unlock(&msm_obj->lock); |
| 159 | return ERR_PTR(-EBUSY); |
| 160 | } |
| 161 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 162 | p = get_pages(obj); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 163 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 164 | return p; |
| 165 | } |
| 166 | |
| 167 | void msm_gem_put_pages(struct drm_gem_object *obj) |
| 168 | { |
| 169 | /* when we start tracking the pin count, then do something here */ |
| 170 | } |
| 171 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 172 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
| 173 | struct vm_area_struct *vma) |
| 174 | { |
| 175 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 176 | |
| 177 | vma->vm_flags &= ~VM_PFNMAP; |
| 178 | vma->vm_flags |= VM_MIXEDMAP; |
| 179 | |
| 180 | if (msm_obj->flags & MSM_BO_WC) { |
| 181 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
| 182 | } else if (msm_obj->flags & MSM_BO_UNCACHED) { |
| 183 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); |
| 184 | } else { |
| 185 | /* |
| 186 | * Shunt off cached objs to shmem file so they have their own |
| 187 | * address_space (so unmap_mapping_range does what we want, |
| 188 | * in particular in the case of mmap'd dmabufs) |
| 189 | */ |
| 190 | fput(vma->vm_file); |
| 191 | get_file(obj->filp); |
| 192 | vma->vm_pgoff = 0; |
| 193 | vma->vm_file = obj->filp; |
| 194 | |
| 195 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
| 196 | } |
| 197 | |
| 198 | return 0; |
| 199 | } |
| 200 | |
| 201 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
| 202 | { |
| 203 | int ret; |
| 204 | |
| 205 | ret = drm_gem_mmap(filp, vma); |
| 206 | if (ret) { |
| 207 | DBG("mmap failed: %d", ret); |
| 208 | return ret; |
| 209 | } |
| 210 | |
| 211 | return msm_gem_mmap_obj(vma->vm_private_data, vma); |
| 212 | } |
| 213 | |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 214 | int msm_gem_fault(struct vm_fault *vmf) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 215 | { |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 216 | struct vm_area_struct *vma = vmf->vma; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 217 | struct drm_gem_object *obj = vma->vm_private_data; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 218 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 219 | struct page **pages; |
| 220 | unsigned long pfn; |
| 221 | pgoff_t pgoff; |
| 222 | int ret; |
| 223 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 224 | /* |
| 225 | * vm_ops.open/drm_gem_mmap_obj and close get and put |
| 226 | * a reference on obj. So, we dont need to hold one here. |
Rob Clark | d78d383 | 2016-08-22 15:28:38 -0400 | [diff] [blame] | 227 | */ |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 228 | ret = mutex_lock_interruptible(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 229 | if (ret) |
| 230 | goto out; |
| 231 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 232 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { |
| 233 | mutex_unlock(&msm_obj->lock); |
| 234 | return VM_FAULT_SIGBUS; |
| 235 | } |
| 236 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 237 | /* make sure we have pages attached now */ |
| 238 | pages = get_pages(obj); |
| 239 | if (IS_ERR(pages)) { |
| 240 | ret = PTR_ERR(pages); |
| 241 | goto out_unlock; |
| 242 | } |
| 243 | |
| 244 | /* We don't use vmf->pgoff since that has the fake offset: */ |
Jan Kara | 1a29d85 | 2016-12-14 15:07:01 -0800 | [diff] [blame] | 245 | pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 246 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 247 | pfn = page_to_pfn(pages[pgoff]); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 248 | |
Jan Kara | 1a29d85 | 2016-12-14 15:07:01 -0800 | [diff] [blame] | 249 | VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 250 | pfn, pfn << PAGE_SHIFT); |
| 251 | |
Jan Kara | 1a29d85 | 2016-12-14 15:07:01 -0800 | [diff] [blame] | 252 | ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 253 | |
| 254 | out_unlock: |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 255 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 256 | out: |
| 257 | switch (ret) { |
| 258 | case -EAGAIN: |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 259 | case 0: |
| 260 | case -ERESTARTSYS: |
| 261 | case -EINTR: |
Rob Clark | 505886d | 2013-10-20 11:57:52 -0400 | [diff] [blame] | 262 | case -EBUSY: |
| 263 | /* |
| 264 | * EBUSY is ok: this just means that another thread |
| 265 | * already did the job. |
| 266 | */ |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 267 | return VM_FAULT_NOPAGE; |
| 268 | case -ENOMEM: |
| 269 | return VM_FAULT_OOM; |
| 270 | default: |
| 271 | return VM_FAULT_SIGBUS; |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | /** get mmap offset */ |
| 276 | static uint64_t mmap_offset(struct drm_gem_object *obj) |
| 277 | { |
| 278 | struct drm_device *dev = obj->dev; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 279 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 280 | int ret; |
| 281 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 282 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 283 | |
| 284 | /* Make it mmapable */ |
| 285 | ret = drm_gem_create_mmap_offset(obj); |
| 286 | |
| 287 | if (ret) { |
| 288 | dev_err(dev->dev, "could not allocate mmap offset\n"); |
| 289 | return 0; |
| 290 | } |
| 291 | |
| 292 | return drm_vma_node_offset_addr(&obj->vma_node); |
| 293 | } |
| 294 | |
| 295 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) |
| 296 | { |
| 297 | uint64_t offset; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 298 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 299 | |
| 300 | mutex_lock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 301 | offset = mmap_offset(obj); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 302 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 303 | return offset; |
| 304 | } |
| 305 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 306 | static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, |
| 307 | struct msm_gem_address_space *aspace) |
| 308 | { |
| 309 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 310 | struct msm_gem_vma *vma; |
| 311 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 312 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
| 313 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 314 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
| 315 | if (!vma) |
| 316 | return ERR_PTR(-ENOMEM); |
| 317 | |
| 318 | vma->aspace = aspace; |
| 319 | |
| 320 | list_add_tail(&vma->list, &msm_obj->vmas); |
| 321 | |
| 322 | return vma; |
| 323 | } |
| 324 | |
| 325 | static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, |
| 326 | struct msm_gem_address_space *aspace) |
| 327 | { |
| 328 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 329 | struct msm_gem_vma *vma; |
| 330 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 331 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 332 | |
| 333 | list_for_each_entry(vma, &msm_obj->vmas, list) { |
| 334 | if (vma->aspace == aspace) |
| 335 | return vma; |
| 336 | } |
| 337 | |
| 338 | return NULL; |
| 339 | } |
| 340 | |
| 341 | static void del_vma(struct msm_gem_vma *vma) |
| 342 | { |
| 343 | if (!vma) |
| 344 | return; |
| 345 | |
| 346 | list_del(&vma->list); |
| 347 | kfree(vma); |
| 348 | } |
| 349 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 350 | /* Called with msm_obj->lock locked */ |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 351 | static void |
| 352 | put_iova(struct drm_gem_object *obj) |
| 353 | { |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 354 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 355 | struct msm_gem_vma *vma, *tmp; |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 356 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 357 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 358 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 359 | list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { |
| 360 | msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt); |
| 361 | del_vma(vma); |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 362 | } |
| 363 | } |
| 364 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 365 | /* get iova, taking a reference. Should have a matching put */ |
| 366 | int msm_gem_get_iova(struct drm_gem_object *obj, |
Rob Clark | 8bdcd94 | 2017-06-13 11:07:08 -0400 | [diff] [blame] | 367 | struct msm_gem_address_space *aspace, uint64_t *iova) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 368 | { |
| 369 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 370 | struct msm_gem_vma *vma; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 371 | int ret = 0; |
| 372 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 373 | mutex_lock(&msm_obj->lock); |
| 374 | |
| 375 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { |
| 376 | mutex_unlock(&msm_obj->lock); |
| 377 | return -EBUSY; |
| 378 | } |
Rob Clark | cb1e381 | 2017-06-13 09:15:36 -0400 | [diff] [blame] | 379 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 380 | vma = lookup_vma(obj, aspace); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 381 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 382 | if (!vma) { |
| 383 | struct page **pages; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 384 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 385 | vma = add_vma(obj, aspace); |
Dan Carpenter | 71e3dfa | 2017-07-10 10:20:42 +0300 | [diff] [blame] | 386 | if (IS_ERR(vma)) { |
| 387 | ret = PTR_ERR(vma); |
| 388 | goto unlock; |
| 389 | } |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 390 | |
| 391 | pages = get_pages(obj); |
| 392 | if (IS_ERR(pages)) { |
| 393 | ret = PTR_ERR(pages); |
| 394 | goto fail; |
| 395 | } |
| 396 | |
| 397 | ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt, |
| 398 | obj->size >> PAGE_SHIFT); |
| 399 | if (ret) |
| 400 | goto fail; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 401 | } |
| 402 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 403 | *iova = vma->iova; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 404 | |
| 405 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 406 | return 0; |
| 407 | |
| 408 | fail: |
| 409 | del_vma(vma); |
Dan Carpenter | 71e3dfa | 2017-07-10 10:20:42 +0300 | [diff] [blame] | 410 | unlock: |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 411 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 412 | return ret; |
| 413 | } |
| 414 | |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 415 | /* get iova without taking a reference, used in places where you have |
| 416 | * already done a 'msm_gem_get_iova()'. |
| 417 | */ |
Rob Clark | 8bdcd94 | 2017-06-13 11:07:08 -0400 | [diff] [blame] | 418 | uint64_t msm_gem_iova(struct drm_gem_object *obj, |
| 419 | struct msm_gem_address_space *aspace) |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 420 | { |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 421 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 422 | struct msm_gem_vma *vma; |
| 423 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 424 | mutex_lock(&msm_obj->lock); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 425 | vma = lookup_vma(obj, aspace); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 426 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 427 | WARN_ON(!vma); |
| 428 | |
| 429 | return vma ? vma->iova : 0; |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 430 | } |
| 431 | |
Rob Clark | 8bdcd94 | 2017-06-13 11:07:08 -0400 | [diff] [blame] | 432 | void msm_gem_put_iova(struct drm_gem_object *obj, |
| 433 | struct msm_gem_address_space *aspace) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 434 | { |
| 435 | // XXX TODO .. |
| 436 | // NOTE: probably don't need a _locked() version.. we wouldn't |
| 437 | // normally unmap here, but instead just mark that it could be |
| 438 | // unmapped (if the iova refcnt drops to zero), but then later |
| 439 | // if another _get_iova_locked() fails we can start unmapping |
| 440 | // things that are no longer needed.. |
| 441 | } |
| 442 | |
| 443 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 444 | struct drm_mode_create_dumb *args) |
| 445 | { |
| 446 | args->pitch = align_pitch(args->width, args->bpp); |
| 447 | args->size = PAGE_ALIGN(args->pitch * args->height); |
| 448 | return msm_gem_new_handle(dev, file, args->size, |
| 449 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); |
| 450 | } |
| 451 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 452 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
| 453 | uint32_t handle, uint64_t *offset) |
| 454 | { |
| 455 | struct drm_gem_object *obj; |
| 456 | int ret = 0; |
| 457 | |
| 458 | /* GEM does all our handle to object mapping */ |
Chris Wilson | a8ad0bd | 2016-05-09 11:04:54 +0100 | [diff] [blame] | 459 | obj = drm_gem_object_lookup(file, handle); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 460 | if (obj == NULL) { |
| 461 | ret = -ENOENT; |
| 462 | goto fail; |
| 463 | } |
| 464 | |
| 465 | *offset = msm_gem_mmap_offset(obj); |
| 466 | |
| 467 | drm_gem_object_unreference_unlocked(obj); |
| 468 | |
| 469 | fail: |
| 470 | return ret; |
| 471 | } |
| 472 | |
Rob Clark | fad33f4 | 2017-09-15 08:38:20 -0400 | [diff] [blame] | 473 | static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 474 | { |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 475 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 476 | int ret = 0; |
| 477 | |
| 478 | mutex_lock(&msm_obj->lock); |
| 479 | |
Rob Clark | fad33f4 | 2017-09-15 08:38:20 -0400 | [diff] [blame] | 480 | if (WARN_ON(msm_obj->madv > madv)) { |
| 481 | dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n", |
| 482 | msm_obj->madv, madv); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 483 | mutex_unlock(&msm_obj->lock); |
| 484 | return ERR_PTR(-EBUSY); |
| 485 | } |
| 486 | |
| 487 | /* increment vmap_count *before* vmap() call, so shrinker can |
| 488 | * check vmap_count (is_vunmapable()) outside of msm_obj->lock. |
| 489 | * This guarantees that we won't try to msm_gem_vunmap() this |
| 490 | * same object from within the vmap() call (while we already |
| 491 | * hold msm_obj->lock) |
| 492 | */ |
| 493 | msm_obj->vmap_count++; |
| 494 | |
| 495 | if (!msm_obj->vaddr) { |
| 496 | struct page **pages = get_pages(obj); |
| 497 | if (IS_ERR(pages)) { |
| 498 | ret = PTR_ERR(pages); |
| 499 | goto fail; |
| 500 | } |
| 501 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
| 502 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
| 503 | if (msm_obj->vaddr == NULL) { |
| 504 | ret = -ENOMEM; |
| 505 | goto fail; |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | mutex_unlock(&msm_obj->lock); |
| 510 | return msm_obj->vaddr; |
| 511 | |
| 512 | fail: |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 513 | msm_obj->vmap_count--; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 514 | mutex_unlock(&msm_obj->lock); |
| 515 | return ERR_PTR(ret); |
Rob Clark | 18f2304 | 2016-05-26 16:24:35 -0400 | [diff] [blame] | 516 | } |
| 517 | |
Rob Clark | fad33f4 | 2017-09-15 08:38:20 -0400 | [diff] [blame] | 518 | void *msm_gem_get_vaddr(struct drm_gem_object *obj) |
| 519 | { |
| 520 | return get_vaddr(obj, MSM_MADV_WILLNEED); |
| 521 | } |
| 522 | |
| 523 | /* |
| 524 | * Don't use this! It is for the very special case of dumping |
| 525 | * submits from GPU hangs or faults, were the bo may already |
| 526 | * be MSM_MADV_DONTNEED, but we know the buffer is still on the |
| 527 | * active list. |
| 528 | */ |
| 529 | void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) |
| 530 | { |
| 531 | return get_vaddr(obj, __MSM_MADV_PURGED); |
| 532 | } |
| 533 | |
Rob Clark | 18f2304 | 2016-05-26 16:24:35 -0400 | [diff] [blame] | 534 | void msm_gem_put_vaddr(struct drm_gem_object *obj) |
| 535 | { |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 536 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 537 | |
| 538 | mutex_lock(&msm_obj->lock); |
| 539 | WARN_ON(msm_obj->vmap_count < 1); |
| 540 | msm_obj->vmap_count--; |
| 541 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 18f2304 | 2016-05-26 16:24:35 -0400 | [diff] [blame] | 542 | } |
| 543 | |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 544 | /* Update madvise status, returns true if not purged, else |
| 545 | * false or -errno. |
| 546 | */ |
| 547 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) |
| 548 | { |
| 549 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 550 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 551 | mutex_lock(&msm_obj->lock); |
| 552 | |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 553 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
| 554 | |
| 555 | if (msm_obj->madv != __MSM_MADV_PURGED) |
| 556 | msm_obj->madv = madv; |
| 557 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 558 | madv = msm_obj->madv; |
| 559 | |
| 560 | mutex_unlock(&msm_obj->lock); |
| 561 | |
| 562 | return (madv != __MSM_MADV_PURGED); |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 563 | } |
| 564 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 565 | void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 566 | { |
| 567 | struct drm_device *dev = obj->dev; |
| 568 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 569 | |
| 570 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 571 | WARN_ON(!is_purgeable(msm_obj)); |
| 572 | WARN_ON(obj->import_attach); |
| 573 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 574 | mutex_lock_nested(&msm_obj->lock, subclass); |
| 575 | |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 576 | put_iova(obj); |
| 577 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 578 | msm_gem_vunmap_locked(obj); |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 579 | |
| 580 | put_pages(obj); |
| 581 | |
| 582 | msm_obj->madv = __MSM_MADV_PURGED; |
| 583 | |
| 584 | drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); |
| 585 | drm_gem_free_mmap_offset(obj); |
| 586 | |
| 587 | /* Our goal here is to return as much of the memory as |
| 588 | * is possible back to the system as we are called from OOM. |
| 589 | * To do this we must instruct the shmfs to drop all of its |
| 590 | * backing pages, *now*. |
| 591 | */ |
| 592 | shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); |
| 593 | |
| 594 | invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, |
| 595 | 0, (loff_t)-1); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 596 | |
| 597 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 598 | } |
| 599 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 600 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj) |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 601 | { |
| 602 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 603 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 604 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
| 605 | |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 606 | if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) |
| 607 | return; |
| 608 | |
| 609 | vunmap(msm_obj->vaddr); |
| 610 | msm_obj->vaddr = NULL; |
| 611 | } |
| 612 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 613 | void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
| 614 | { |
| 615 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 616 | |
| 617 | mutex_lock_nested(&msm_obj->lock, subclass); |
| 618 | msm_gem_vunmap_locked(obj); |
| 619 | mutex_unlock(&msm_obj->lock); |
| 620 | } |
| 621 | |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 622 | /* must be called before _move_to_active().. */ |
| 623 | int msm_gem_sync_object(struct drm_gem_object *obj, |
| 624 | struct msm_fence_context *fctx, bool exclusive) |
| 625 | { |
| 626 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 627 | struct reservation_object_list *fobj; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 628 | struct dma_fence *fence; |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 629 | int i, ret; |
| 630 | |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 631 | fobj = reservation_object_get_list(msm_obj->resv); |
| 632 | if (!fobj || (fobj->shared_count == 0)) { |
| 633 | fence = reservation_object_get_excl(msm_obj->resv); |
| 634 | /* don't need to wait on our own fences, since ring is fifo */ |
| 635 | if (fence && (fence->context != fctx->context)) { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 636 | ret = dma_fence_wait(fence, true); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 637 | if (ret) |
| 638 | return ret; |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | if (!exclusive || !fobj) |
| 643 | return 0; |
| 644 | |
| 645 | for (i = 0; i < fobj->shared_count; i++) { |
| 646 | fence = rcu_dereference_protected(fobj->shared[i], |
| 647 | reservation_object_held(msm_obj->resv)); |
| 648 | if (fence->context != fctx->context) { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 649 | ret = dma_fence_wait(fence, true); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 650 | if (ret) |
| 651 | return ret; |
| 652 | } |
| 653 | } |
| 654 | |
| 655 | return 0; |
| 656 | } |
| 657 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 658 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 659 | struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 660 | { |
| 661 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 662 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 663 | msm_obj->gpu = gpu; |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 664 | if (exclusive) |
| 665 | reservation_object_add_excl_fence(msm_obj->resv, fence); |
Rob Clark | bf6811f | 2013-09-01 13:25:09 -0400 | [diff] [blame] | 666 | else |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 667 | reservation_object_add_shared_fence(msm_obj->resv, fence); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 668 | list_del_init(&msm_obj->mm_list); |
| 669 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); |
| 670 | } |
| 671 | |
| 672 | void msm_gem_move_to_inactive(struct drm_gem_object *obj) |
| 673 | { |
| 674 | struct drm_device *dev = obj->dev; |
| 675 | struct msm_drm_private *priv = dev->dev_private; |
| 676 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 677 | |
| 678 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 679 | |
| 680 | msm_obj->gpu = NULL; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 681 | list_del_init(&msm_obj->mm_list); |
| 682 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 683 | } |
| 684 | |
Rob Clark | ba00c3f | 2016-03-16 18:18:17 -0400 | [diff] [blame] | 685 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
| 686 | { |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 687 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 688 | bool write = !!(op & MSM_PREP_WRITE); |
Chris Wilson | f755e22 | 2016-08-29 08:08:26 +0100 | [diff] [blame] | 689 | unsigned long remain = |
| 690 | op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); |
| 691 | long ret; |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 692 | |
Chris Wilson | f755e22 | 2016-08-29 08:08:26 +0100 | [diff] [blame] | 693 | ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, |
| 694 | true, remain); |
| 695 | if (ret == 0) |
| 696 | return remain == 0 ? -EBUSY : -ETIMEDOUT; |
| 697 | else if (ret < 0) |
| 698 | return ret; |
Rob Clark | ba00c3f | 2016-03-16 18:18:17 -0400 | [diff] [blame] | 699 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 700 | /* TODO cache maintenance */ |
| 701 | |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 702 | return 0; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 703 | } |
| 704 | |
| 705 | int msm_gem_cpu_fini(struct drm_gem_object *obj) |
| 706 | { |
| 707 | /* TODO cache maintenance */ |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 708 | return 0; |
| 709 | } |
| 710 | |
| 711 | #ifdef CONFIG_DEBUG_FS |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 712 | static void describe_fence(struct dma_fence *fence, const char *type, |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 713 | struct seq_file *m) |
| 714 | { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 715 | if (!dma_fence_is_signaled(fence)) |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 716 | seq_printf(m, "\t%9s: %s %s seq %u\n", type, |
| 717 | fence->ops->get_driver_name(fence), |
| 718 | fence->ops->get_timeline_name(fence), |
| 719 | fence->seqno); |
| 720 | } |
| 721 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 722 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
| 723 | { |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 724 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 725 | struct reservation_object *robj = msm_obj->resv; |
| 726 | struct reservation_object_list *fobj; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 727 | struct dma_fence *fence; |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 728 | struct msm_gem_vma *vma; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 729 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 730 | const char *madv; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 731 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 732 | mutex_lock(&msm_obj->lock); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 733 | |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 734 | switch (msm_obj->madv) { |
| 735 | case __MSM_MADV_PURGED: |
| 736 | madv = " purged"; |
| 737 | break; |
| 738 | case MSM_MADV_DONTNEED: |
| 739 | madv = " purgeable"; |
| 740 | break; |
| 741 | case MSM_MADV_WILLNEED: |
| 742 | default: |
| 743 | madv = ""; |
| 744 | break; |
| 745 | } |
| 746 | |
Rob Clark | 667ce33 | 2016-09-28 19:58:32 -0400 | [diff] [blame] | 747 | seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 748 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
Peter Zijlstra | 2c935bc | 2016-11-14 17:29:48 +0100 | [diff] [blame] | 749 | obj->name, kref_read(&obj->refcount), |
Rob Clark | 667ce33 | 2016-09-28 19:58:32 -0400 | [diff] [blame] | 750 | off, msm_obj->vaddr); |
| 751 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 752 | /* FIXME: we need to print the address space here too */ |
| 753 | list_for_each_entry(vma, &msm_obj->vmas, list) |
| 754 | seq_printf(m, " %08llx", vma->iova); |
Rob Clark | 667ce33 | 2016-09-28 19:58:32 -0400 | [diff] [blame] | 755 | |
| 756 | seq_printf(m, " %zu%s\n", obj->size, madv); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 757 | |
| 758 | rcu_read_lock(); |
| 759 | fobj = rcu_dereference(robj->fence); |
| 760 | if (fobj) { |
| 761 | unsigned int i, shared_count = fobj->shared_count; |
| 762 | |
| 763 | for (i = 0; i < shared_count; i++) { |
| 764 | fence = rcu_dereference(fobj->shared[i]); |
| 765 | describe_fence(fence, "Shared", m); |
| 766 | } |
| 767 | } |
| 768 | |
| 769 | fence = rcu_dereference(robj->fence_excl); |
| 770 | if (fence) |
| 771 | describe_fence(fence, "Exclusive", m); |
| 772 | rcu_read_unlock(); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 773 | |
| 774 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 775 | } |
| 776 | |
| 777 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) |
| 778 | { |
| 779 | struct msm_gem_object *msm_obj; |
| 780 | int count = 0; |
| 781 | size_t size = 0; |
| 782 | |
| 783 | list_for_each_entry(msm_obj, list, mm_list) { |
| 784 | struct drm_gem_object *obj = &msm_obj->base; |
| 785 | seq_printf(m, " "); |
| 786 | msm_gem_describe(obj, m); |
| 787 | count++; |
| 788 | size += obj->size; |
| 789 | } |
| 790 | |
| 791 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); |
| 792 | } |
| 793 | #endif |
| 794 | |
| 795 | void msm_gem_free_object(struct drm_gem_object *obj) |
| 796 | { |
| 797 | struct drm_device *dev = obj->dev; |
| 798 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 799 | |
| 800 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 801 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 802 | /* object should not be on active list: */ |
| 803 | WARN_ON(is_active(msm_obj)); |
| 804 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 805 | list_del(&msm_obj->mm_list); |
| 806 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 807 | mutex_lock(&msm_obj->lock); |
| 808 | |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 809 | put_iova(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 810 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 811 | if (obj->import_attach) { |
| 812 | if (msm_obj->vaddr) |
| 813 | dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 814 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 815 | /* Don't drop the pages for imported dmabuf, as they are not |
| 816 | * ours, just free the array we allocated: |
| 817 | */ |
| 818 | if (msm_obj->pages) |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 819 | kvfree(msm_obj->pages); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 820 | |
jilai wang | f28730c | 2015-04-07 13:51:32 -0400 | [diff] [blame] | 821 | drm_prime_gem_destroy(obj, msm_obj->sgt); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 822 | } else { |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 823 | msm_gem_vunmap_locked(obj); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 824 | put_pages(obj); |
| 825 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 826 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 827 | if (msm_obj->resv == &msm_obj->_resv) |
| 828 | reservation_object_fini(msm_obj->resv); |
| 829 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 830 | drm_gem_object_release(obj); |
| 831 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 832 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 833 | kfree(msm_obj); |
| 834 | } |
| 835 | |
| 836 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
| 837 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
| 838 | uint32_t size, uint32_t flags, uint32_t *handle) |
| 839 | { |
| 840 | struct drm_gem_object *obj; |
| 841 | int ret; |
| 842 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 843 | obj = msm_gem_new(dev, size, flags); |
| 844 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 845 | if (IS_ERR(obj)) |
| 846 | return PTR_ERR(obj); |
| 847 | |
| 848 | ret = drm_gem_handle_create(file, obj, handle); |
| 849 | |
| 850 | /* drop reference from allocate - handle holds it now */ |
| 851 | drm_gem_object_unreference_unlocked(obj); |
| 852 | |
| 853 | return ret; |
| 854 | } |
| 855 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 856 | static int msm_gem_new_impl(struct drm_device *dev, |
| 857 | uint32_t size, uint32_t flags, |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 858 | struct reservation_object *resv, |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 859 | struct drm_gem_object **obj, |
| 860 | bool struct_mutex_locked) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 861 | { |
| 862 | struct msm_drm_private *priv = dev->dev_private; |
| 863 | struct msm_gem_object *msm_obj; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 864 | |
| 865 | switch (flags & MSM_BO_CACHE_MASK) { |
| 866 | case MSM_BO_UNCACHED: |
| 867 | case MSM_BO_CACHED: |
| 868 | case MSM_BO_WC: |
| 869 | break; |
| 870 | default: |
| 871 | dev_err(dev->dev, "invalid cache flag: %x\n", |
| 872 | (flags & MSM_BO_CACHE_MASK)); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 873 | return -EINVAL; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 874 | } |
| 875 | |
Rob Clark | 667ce33 | 2016-09-28 19:58:32 -0400 | [diff] [blame] | 876 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 877 | if (!msm_obj) |
| 878 | return -ENOMEM; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 879 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 880 | mutex_init(&msm_obj->lock); |
| 881 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 882 | msm_obj->flags = flags; |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 883 | msm_obj->madv = MSM_MADV_WILLNEED; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 884 | |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 885 | if (resv) { |
| 886 | msm_obj->resv = resv; |
| 887 | } else { |
| 888 | msm_obj->resv = &msm_obj->_resv; |
| 889 | reservation_object_init(msm_obj->resv); |
| 890 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 891 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 892 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 893 | INIT_LIST_HEAD(&msm_obj->vmas); |
| 894 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 895 | if (struct_mutex_locked) { |
| 896 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 897 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
| 898 | } else { |
| 899 | mutex_lock(&dev->struct_mutex); |
| 900 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
| 901 | mutex_unlock(&dev->struct_mutex); |
| 902 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 903 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 904 | *obj = &msm_obj->base; |
| 905 | |
| 906 | return 0; |
| 907 | } |
| 908 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 909 | static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, |
| 910 | uint32_t size, uint32_t flags, bool struct_mutex_locked) |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 911 | { |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 912 | struct msm_drm_private *priv = dev->dev_private; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 913 | struct drm_gem_object *obj = NULL; |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 914 | bool use_vram = false; |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 915 | int ret; |
| 916 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 917 | size = PAGE_ALIGN(size); |
| 918 | |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 919 | if (!iommu_present(&platform_bus_type)) |
| 920 | use_vram = true; |
| 921 | else if ((flags & MSM_BO_STOLEN) && priv->vram.size) |
| 922 | use_vram = true; |
| 923 | |
| 924 | if (WARN_ON(use_vram && !priv->vram.size)) |
| 925 | return ERR_PTR(-EINVAL); |
| 926 | |
Jordan Crouse | 1a5dff5 | 2017-03-07 10:02:51 -0700 | [diff] [blame] | 927 | /* Disallow zero sized objects as they make the underlying |
| 928 | * infrastructure grumpy |
| 929 | */ |
| 930 | if (size == 0) |
| 931 | return ERR_PTR(-EINVAL); |
| 932 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 933 | ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 934 | if (ret) |
| 935 | goto fail; |
| 936 | |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 937 | if (use_vram) { |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 938 | struct msm_gem_vma *vma; |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 939 | struct page **pages; |
Hans Verkuil | b3949a9 | 2017-07-30 14:42:36 +0200 | [diff] [blame] | 940 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 941 | |
| 942 | mutex_lock(&msm_obj->lock); |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 943 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 944 | vma = add_vma(obj, NULL); |
Hans Verkuil | b3949a9 | 2017-07-30 14:42:36 +0200 | [diff] [blame] | 945 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 946 | if (IS_ERR(vma)) { |
| 947 | ret = PTR_ERR(vma); |
| 948 | goto fail; |
| 949 | } |
| 950 | |
| 951 | to_msm_bo(obj)->vram_node = &vma->node; |
| 952 | |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 953 | drm_gem_private_object_init(dev, obj, size); |
| 954 | |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 955 | pages = get_pages(obj); |
| 956 | if (IS_ERR(pages)) { |
| 957 | ret = PTR_ERR(pages); |
| 958 | goto fail; |
| 959 | } |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 960 | |
| 961 | vma->iova = physaddr(obj); |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 962 | } else { |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 963 | ret = drm_gem_object_init(dev, obj, size); |
| 964 | if (ret) |
| 965 | goto fail; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 966 | } |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 967 | |
| 968 | return obj; |
| 969 | |
| 970 | fail: |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 971 | drm_gem_object_unreference_unlocked(obj); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 972 | return ERR_PTR(ret); |
| 973 | } |
| 974 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 975 | struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, |
| 976 | uint32_t size, uint32_t flags) |
| 977 | { |
| 978 | return _msm_gem_new(dev, size, flags, true); |
| 979 | } |
| 980 | |
| 981 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
| 982 | uint32_t size, uint32_t flags) |
| 983 | { |
| 984 | return _msm_gem_new(dev, size, flags, false); |
| 985 | } |
| 986 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 987 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 988 | struct dma_buf *dmabuf, struct sg_table *sgt) |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 989 | { |
| 990 | struct msm_gem_object *msm_obj; |
| 991 | struct drm_gem_object *obj; |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 992 | uint32_t size; |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 993 | int ret, npages; |
| 994 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 995 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
| 996 | if (!iommu_present(&platform_bus_type)) { |
| 997 | dev_err(dev->dev, "cannot import without IOMMU\n"); |
| 998 | return ERR_PTR(-EINVAL); |
| 999 | } |
| 1000 | |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 1001 | size = PAGE_ALIGN(dmabuf->size); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1002 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1003 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1004 | if (ret) |
| 1005 | goto fail; |
| 1006 | |
| 1007 | drm_gem_private_object_init(dev, obj, size); |
| 1008 | |
| 1009 | npages = size / PAGE_SIZE; |
| 1010 | |
| 1011 | msm_obj = to_msm_bo(obj); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1012 | mutex_lock(&msm_obj->lock); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1013 | msm_obj->sgt = sgt; |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 1014 | msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1015 | if (!msm_obj->pages) { |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1016 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1017 | ret = -ENOMEM; |
| 1018 | goto fail; |
| 1019 | } |
| 1020 | |
| 1021 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1022 | if (ret) { |
| 1023 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1024 | goto fail; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1025 | } |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1026 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1027 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1028 | return obj; |
| 1029 | |
| 1030 | fail: |
Markus Elfring | e73a856 | 2016-07-13 19:15:35 +0200 | [diff] [blame] | 1031 | drm_gem_object_unreference_unlocked(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1032 | return ERR_PTR(ret); |
| 1033 | } |
Jordan Crouse | 8223286 | 2017-07-27 10:42:40 -0600 | [diff] [blame] | 1034 | |
| 1035 | static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, |
| 1036 | uint32_t flags, struct msm_gem_address_space *aspace, |
| 1037 | struct drm_gem_object **bo, uint64_t *iova, bool locked) |
| 1038 | { |
| 1039 | void *vaddr; |
| 1040 | struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); |
| 1041 | int ret; |
| 1042 | |
| 1043 | if (IS_ERR(obj)) |
| 1044 | return ERR_CAST(obj); |
| 1045 | |
| 1046 | if (iova) { |
| 1047 | ret = msm_gem_get_iova(obj, aspace, iova); |
| 1048 | if (ret) { |
| 1049 | drm_gem_object_unreference(obj); |
| 1050 | return ERR_PTR(ret); |
| 1051 | } |
| 1052 | } |
| 1053 | |
| 1054 | vaddr = msm_gem_get_vaddr(obj); |
Wei Yongjun | c9811d0 | 2017-10-11 11:36:56 +0000 | [diff] [blame] | 1055 | if (IS_ERR(vaddr)) { |
Jordan Crouse | 8223286 | 2017-07-27 10:42:40 -0600 | [diff] [blame] | 1056 | msm_gem_put_iova(obj, aspace); |
| 1057 | drm_gem_object_unreference(obj); |
Wei Yongjun | c9811d0 | 2017-10-11 11:36:56 +0000 | [diff] [blame] | 1058 | return ERR_CAST(vaddr); |
Jordan Crouse | 8223286 | 2017-07-27 10:42:40 -0600 | [diff] [blame] | 1059 | } |
| 1060 | |
| 1061 | if (bo) |
| 1062 | *bo = obj; |
| 1063 | |
| 1064 | return vaddr; |
| 1065 | } |
| 1066 | |
| 1067 | void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, |
| 1068 | uint32_t flags, struct msm_gem_address_space *aspace, |
| 1069 | struct drm_gem_object **bo, uint64_t *iova) |
| 1070 | { |
| 1071 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); |
| 1072 | } |
| 1073 | |
| 1074 | void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, |
| 1075 | uint32_t flags, struct msm_gem_address_space *aspace, |
| 1076 | struct drm_gem_object **bo, uint64_t *iova) |
| 1077 | { |
| 1078 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); |
| 1079 | } |