Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 Red Hat |
| 3 | * Author: Rob Clark <robdclark@gmail.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published by |
| 7 | * the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/spinlock.h> |
| 19 | #include <linux/shmem_fs.h> |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 20 | #include <linux/dma-buf.h> |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 21 | #include <linux/pfn_t.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 22 | |
| 23 | #include "msm_drv.h" |
| 24 | #include "msm_gem.h" |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 25 | #include "msm_gpu.h" |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 26 | #include "msm_mmu.h" |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 27 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 28 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
| 29 | { |
| 30 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 31 | struct msm_drm_private *priv = obj->dev->dev_private; |
| 32 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + |
| 33 | priv->vram.paddr; |
| 34 | } |
| 35 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 36 | static bool use_pages(struct drm_gem_object *obj) |
| 37 | { |
| 38 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 39 | return !msm_obj->vram_node; |
| 40 | } |
| 41 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 42 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
| 43 | static struct page **get_pages_vram(struct drm_gem_object *obj, |
| 44 | int npages) |
| 45 | { |
| 46 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 47 | struct msm_drm_private *priv = obj->dev->dev_private; |
| 48 | dma_addr_t paddr; |
| 49 | struct page **p; |
| 50 | int ret, i; |
| 51 | |
| 52 | p = drm_malloc_ab(npages, sizeof(struct page *)); |
| 53 | if (!p) |
| 54 | return ERR_PTR(-ENOMEM); |
| 55 | |
| 56 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, |
| 57 | npages, 0, DRM_MM_SEARCH_DEFAULT); |
| 58 | if (ret) { |
| 59 | drm_free_large(p); |
| 60 | return ERR_PTR(ret); |
| 61 | } |
| 62 | |
| 63 | paddr = physaddr(obj); |
| 64 | for (i = 0; i < npages; i++) { |
| 65 | p[i] = phys_to_page(paddr); |
| 66 | paddr += PAGE_SIZE; |
| 67 | } |
| 68 | |
| 69 | return p; |
| 70 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 71 | |
| 72 | /* called with dev->struct_mutex held */ |
| 73 | static struct page **get_pages(struct drm_gem_object *obj) |
| 74 | { |
| 75 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 76 | |
| 77 | if (!msm_obj->pages) { |
| 78 | struct drm_device *dev = obj->dev; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 79 | struct page **p; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 80 | int npages = obj->size >> PAGE_SHIFT; |
| 81 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 82 | if (use_pages(obj)) |
David Herrmann | 0cdbe8a | 2014-05-25 12:59:47 +0200 | [diff] [blame] | 83 | p = drm_gem_get_pages(obj); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 84 | else |
| 85 | p = get_pages_vram(obj, npages); |
| 86 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 87 | if (IS_ERR(p)) { |
| 88 | dev_err(dev->dev, "could not get pages: %ld\n", |
| 89 | PTR_ERR(p)); |
| 90 | return p; |
| 91 | } |
| 92 | |
| 93 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); |
Wei Yongjun | 1f70e07 | 2013-09-11 06:56:12 +0800 | [diff] [blame] | 94 | if (IS_ERR(msm_obj->sgt)) { |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 95 | dev_err(dev->dev, "failed to allocate sgt\n"); |
Wei Yongjun | 1f70e07 | 2013-09-11 06:56:12 +0800 | [diff] [blame] | 96 | return ERR_CAST(msm_obj->sgt); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | msm_obj->pages = p; |
| 100 | |
| 101 | /* For non-cached buffers, ensure the new pages are clean |
| 102 | * because display controller, GPU, etc. are not coherent: |
| 103 | */ |
| 104 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) |
| 105 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, |
| 106 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
| 107 | } |
| 108 | |
| 109 | return msm_obj->pages; |
| 110 | } |
| 111 | |
| 112 | static void put_pages(struct drm_gem_object *obj) |
| 113 | { |
| 114 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 115 | |
| 116 | if (msm_obj->pages) { |
| 117 | /* For non-cached buffers, ensure the new pages are clean |
| 118 | * because display controller, GPU, etc. are not coherent: |
| 119 | */ |
| 120 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) |
| 121 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, |
| 122 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
| 123 | sg_free_table(msm_obj->sgt); |
| 124 | kfree(msm_obj->sgt); |
| 125 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 126 | if (use_pages(obj)) |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 127 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
Micah Richert | 1ffa242 | 2014-04-09 14:11:31 -0700 | [diff] [blame] | 128 | else { |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 129 | drm_mm_remove_node(msm_obj->vram_node); |
Micah Richert | 1ffa242 | 2014-04-09 14:11:31 -0700 | [diff] [blame] | 130 | drm_free_large(msm_obj->pages); |
| 131 | } |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 132 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 133 | msm_obj->pages = NULL; |
| 134 | } |
| 135 | } |
| 136 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 137 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) |
| 138 | { |
| 139 | struct drm_device *dev = obj->dev; |
| 140 | struct page **p; |
| 141 | mutex_lock(&dev->struct_mutex); |
| 142 | p = get_pages(obj); |
| 143 | mutex_unlock(&dev->struct_mutex); |
| 144 | return p; |
| 145 | } |
| 146 | |
| 147 | void msm_gem_put_pages(struct drm_gem_object *obj) |
| 148 | { |
| 149 | /* when we start tracking the pin count, then do something here */ |
| 150 | } |
| 151 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 152 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
| 153 | struct vm_area_struct *vma) |
| 154 | { |
| 155 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 156 | |
| 157 | vma->vm_flags &= ~VM_PFNMAP; |
| 158 | vma->vm_flags |= VM_MIXEDMAP; |
| 159 | |
| 160 | if (msm_obj->flags & MSM_BO_WC) { |
| 161 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
| 162 | } else if (msm_obj->flags & MSM_BO_UNCACHED) { |
| 163 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); |
| 164 | } else { |
| 165 | /* |
| 166 | * Shunt off cached objs to shmem file so they have their own |
| 167 | * address_space (so unmap_mapping_range does what we want, |
| 168 | * in particular in the case of mmap'd dmabufs) |
| 169 | */ |
| 170 | fput(vma->vm_file); |
| 171 | get_file(obj->filp); |
| 172 | vma->vm_pgoff = 0; |
| 173 | vma->vm_file = obj->filp; |
| 174 | |
| 175 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
| 176 | } |
| 177 | |
| 178 | return 0; |
| 179 | } |
| 180 | |
| 181 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
| 182 | { |
| 183 | int ret; |
| 184 | |
| 185 | ret = drm_gem_mmap(filp, vma); |
| 186 | if (ret) { |
| 187 | DBG("mmap failed: %d", ret); |
| 188 | return ret; |
| 189 | } |
| 190 | |
| 191 | return msm_gem_mmap_obj(vma->vm_private_data, vma); |
| 192 | } |
| 193 | |
| 194 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 195 | { |
| 196 | struct drm_gem_object *obj = vma->vm_private_data; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 197 | struct drm_device *dev = obj->dev; |
| 198 | struct page **pages; |
| 199 | unsigned long pfn; |
| 200 | pgoff_t pgoff; |
| 201 | int ret; |
| 202 | |
| 203 | /* Make sure we don't parallel update on a fault, nor move or remove |
| 204 | * something from beneath our feet |
| 205 | */ |
| 206 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 207 | if (ret) |
| 208 | goto out; |
| 209 | |
| 210 | /* make sure we have pages attached now */ |
| 211 | pages = get_pages(obj); |
| 212 | if (IS_ERR(pages)) { |
| 213 | ret = PTR_ERR(pages); |
| 214 | goto out_unlock; |
| 215 | } |
| 216 | |
| 217 | /* We don't use vmf->pgoff since that has the fake offset: */ |
| 218 | pgoff = ((unsigned long)vmf->virtual_address - |
| 219 | vma->vm_start) >> PAGE_SHIFT; |
| 220 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 221 | pfn = page_to_pfn(pages[pgoff]); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 222 | |
| 223 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, |
| 224 | pfn, pfn << PAGE_SHIFT); |
| 225 | |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 226 | ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, |
| 227 | __pfn_to_pfn_t(pfn, PFN_DEV)); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 228 | |
| 229 | out_unlock: |
| 230 | mutex_unlock(&dev->struct_mutex); |
| 231 | out: |
| 232 | switch (ret) { |
| 233 | case -EAGAIN: |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 234 | case 0: |
| 235 | case -ERESTARTSYS: |
| 236 | case -EINTR: |
Rob Clark | 505886d | 2013-10-20 11:57:52 -0400 | [diff] [blame] | 237 | case -EBUSY: |
| 238 | /* |
| 239 | * EBUSY is ok: this just means that another thread |
| 240 | * already did the job. |
| 241 | */ |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 242 | return VM_FAULT_NOPAGE; |
| 243 | case -ENOMEM: |
| 244 | return VM_FAULT_OOM; |
| 245 | default: |
| 246 | return VM_FAULT_SIGBUS; |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | /** get mmap offset */ |
| 251 | static uint64_t mmap_offset(struct drm_gem_object *obj) |
| 252 | { |
| 253 | struct drm_device *dev = obj->dev; |
| 254 | int ret; |
| 255 | |
| 256 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 257 | |
| 258 | /* Make it mmapable */ |
| 259 | ret = drm_gem_create_mmap_offset(obj); |
| 260 | |
| 261 | if (ret) { |
| 262 | dev_err(dev->dev, "could not allocate mmap offset\n"); |
| 263 | return 0; |
| 264 | } |
| 265 | |
| 266 | return drm_vma_node_offset_addr(&obj->vma_node); |
| 267 | } |
| 268 | |
| 269 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) |
| 270 | { |
| 271 | uint64_t offset; |
| 272 | mutex_lock(&obj->dev->struct_mutex); |
| 273 | offset = mmap_offset(obj); |
| 274 | mutex_unlock(&obj->dev->struct_mutex); |
| 275 | return offset; |
| 276 | } |
| 277 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 278 | /* should be called under struct_mutex.. although it can be called |
| 279 | * from atomic context without struct_mutex to acquire an extra |
| 280 | * iova ref if you know one is already held. |
| 281 | * |
| 282 | * That means when I do eventually need to add support for unpinning |
| 283 | * the refcnt counter needs to be atomic_t. |
| 284 | */ |
| 285 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, |
| 286 | uint32_t *iova) |
| 287 | { |
| 288 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 289 | int ret = 0; |
| 290 | |
| 291 | if (!msm_obj->domain[id].iova) { |
| 292 | struct msm_drm_private *priv = obj->dev->dev_private; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 293 | struct page **pages = get_pages(obj); |
| 294 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 295 | if (IS_ERR(pages)) |
| 296 | return PTR_ERR(pages); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 297 | |
| 298 | if (iommu_present(&platform_bus_type)) { |
Rob Clark | 1c4997f | 2014-07-01 14:49:55 -0400 | [diff] [blame] | 299 | struct msm_mmu *mmu = priv->mmus[id]; |
| 300 | uint32_t offset; |
| 301 | |
| 302 | if (WARN_ON(!mmu)) |
| 303 | return -EINVAL; |
| 304 | |
| 305 | offset = (uint32_t)mmap_offset(obj); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 306 | ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, |
| 307 | obj->size, IOMMU_READ | IOMMU_WRITE); |
| 308 | msm_obj->domain[id].iova = offset; |
| 309 | } else { |
| 310 | msm_obj->domain[id].iova = physaddr(obj); |
| 311 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | if (!ret) |
| 315 | *iova = msm_obj->domain[id].iova; |
| 316 | |
| 317 | return ret; |
| 318 | } |
| 319 | |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 320 | /* get iova, taking a reference. Should have a matching put */ |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 321 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) |
| 322 | { |
Rob Clark | edd4fc6 | 2013-09-14 14:01:55 -0400 | [diff] [blame] | 323 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 324 | int ret; |
Rob Clark | edd4fc6 | 2013-09-14 14:01:55 -0400 | [diff] [blame] | 325 | |
| 326 | /* this is safe right now because we don't unmap until the |
| 327 | * bo is deleted: |
| 328 | */ |
| 329 | if (msm_obj->domain[id].iova) { |
| 330 | *iova = msm_obj->domain[id].iova; |
| 331 | return 0; |
| 332 | } |
| 333 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 334 | mutex_lock(&obj->dev->struct_mutex); |
| 335 | ret = msm_gem_get_iova_locked(obj, id, iova); |
| 336 | mutex_unlock(&obj->dev->struct_mutex); |
| 337 | return ret; |
| 338 | } |
| 339 | |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 340 | /* get iova without taking a reference, used in places where you have |
| 341 | * already done a 'msm_gem_get_iova()'. |
| 342 | */ |
| 343 | uint32_t msm_gem_iova(struct drm_gem_object *obj, int id) |
| 344 | { |
| 345 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 346 | WARN_ON(!msm_obj->domain[id].iova); |
| 347 | return msm_obj->domain[id].iova; |
| 348 | } |
| 349 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 350 | void msm_gem_put_iova(struct drm_gem_object *obj, int id) |
| 351 | { |
| 352 | // XXX TODO .. |
| 353 | // NOTE: probably don't need a _locked() version.. we wouldn't |
| 354 | // normally unmap here, but instead just mark that it could be |
| 355 | // unmapped (if the iova refcnt drops to zero), but then later |
| 356 | // if another _get_iova_locked() fails we can start unmapping |
| 357 | // things that are no longer needed.. |
| 358 | } |
| 359 | |
| 360 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 361 | struct drm_mode_create_dumb *args) |
| 362 | { |
| 363 | args->pitch = align_pitch(args->width, args->bpp); |
| 364 | args->size = PAGE_ALIGN(args->pitch * args->height); |
| 365 | return msm_gem_new_handle(dev, file, args->size, |
| 366 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); |
| 367 | } |
| 368 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 369 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
| 370 | uint32_t handle, uint64_t *offset) |
| 371 | { |
| 372 | struct drm_gem_object *obj; |
| 373 | int ret = 0; |
| 374 | |
| 375 | /* GEM does all our handle to object mapping */ |
| 376 | obj = drm_gem_object_lookup(dev, file, handle); |
| 377 | if (obj == NULL) { |
| 378 | ret = -ENOENT; |
| 379 | goto fail; |
| 380 | } |
| 381 | |
| 382 | *offset = msm_gem_mmap_offset(obj); |
| 383 | |
| 384 | drm_gem_object_unreference_unlocked(obj); |
| 385 | |
| 386 | fail: |
| 387 | return ret; |
| 388 | } |
| 389 | |
| 390 | void *msm_gem_vaddr_locked(struct drm_gem_object *obj) |
| 391 | { |
| 392 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 393 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
| 394 | if (!msm_obj->vaddr) { |
| 395 | struct page **pages = get_pages(obj); |
| 396 | if (IS_ERR(pages)) |
| 397 | return ERR_CAST(pages); |
| 398 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
| 399 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
| 400 | } |
| 401 | return msm_obj->vaddr; |
| 402 | } |
| 403 | |
| 404 | void *msm_gem_vaddr(struct drm_gem_object *obj) |
| 405 | { |
| 406 | void *ret; |
| 407 | mutex_lock(&obj->dev->struct_mutex); |
| 408 | ret = msm_gem_vaddr_locked(obj); |
| 409 | mutex_unlock(&obj->dev->struct_mutex); |
| 410 | return ret; |
| 411 | } |
| 412 | |
Rob Clark | edd4fc6 | 2013-09-14 14:01:55 -0400 | [diff] [blame] | 413 | /* setup callback for when bo is no longer busy.. |
| 414 | * TODO probably want to differentiate read vs write.. |
| 415 | */ |
| 416 | int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, |
| 417 | struct msm_fence_cb *cb) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 418 | { |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 419 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 69193e5 | 2014-11-07 18:10:04 -0500 | [diff] [blame] | 420 | uint32_t fence = msm_gem_fence(msm_obj, |
| 421 | MSM_PREP_READ | MSM_PREP_WRITE); |
| 422 | return msm_queue_fence_cb(obj->dev, cb, fence); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 423 | } |
| 424 | |
| 425 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
Rob Clark | bf6811f | 2013-09-01 13:25:09 -0400 | [diff] [blame] | 426 | struct msm_gpu *gpu, bool write, uint32_t fence) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 427 | { |
| 428 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 429 | msm_obj->gpu = gpu; |
Rob Clark | bf6811f | 2013-09-01 13:25:09 -0400 | [diff] [blame] | 430 | if (write) |
| 431 | msm_obj->write_fence = fence; |
| 432 | else |
| 433 | msm_obj->read_fence = fence; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 434 | list_del_init(&msm_obj->mm_list); |
| 435 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); |
| 436 | } |
| 437 | |
| 438 | void msm_gem_move_to_inactive(struct drm_gem_object *obj) |
| 439 | { |
| 440 | struct drm_device *dev = obj->dev; |
| 441 | struct msm_drm_private *priv = dev->dev_private; |
| 442 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 443 | |
| 444 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 445 | |
| 446 | msm_obj->gpu = NULL; |
Rob Clark | bf6811f | 2013-09-01 13:25:09 -0400 | [diff] [blame] | 447 | msm_obj->read_fence = 0; |
| 448 | msm_obj->write_fence = 0; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 449 | list_del_init(&msm_obj->mm_list); |
| 450 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 451 | } |
| 452 | |
Rob Clark | 56c2da8 | 2015-05-11 11:50:03 -0400 | [diff] [blame] | 453 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 454 | { |
| 455 | struct drm_device *dev = obj->dev; |
| 456 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 457 | int ret = 0; |
| 458 | |
Rob Clark | f816f27 | 2013-09-11 17:34:07 -0400 | [diff] [blame] | 459 | if (is_active(msm_obj)) { |
Rob Clark | 69193e5 | 2014-11-07 18:10:04 -0500 | [diff] [blame] | 460 | uint32_t fence = msm_gem_fence(msm_obj, op); |
Rob Clark | f816f27 | 2013-09-11 17:34:07 -0400 | [diff] [blame] | 461 | |
Rob Clark | f816f27 | 2013-09-11 17:34:07 -0400 | [diff] [blame] | 462 | if (op & MSM_PREP_NOSYNC) |
| 463 | timeout = NULL; |
| 464 | |
Wentao Xu | 99fc1bc | 2015-06-22 11:53:42 -0400 | [diff] [blame] | 465 | ret = msm_wait_fence(dev, fence, timeout, true); |
Rob Clark | bf6811f | 2013-09-01 13:25:09 -0400 | [diff] [blame] | 466 | } |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 467 | |
| 468 | /* TODO cache maintenance */ |
| 469 | |
| 470 | return ret; |
| 471 | } |
| 472 | |
| 473 | int msm_gem_cpu_fini(struct drm_gem_object *obj) |
| 474 | { |
| 475 | /* TODO cache maintenance */ |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 476 | return 0; |
| 477 | } |
| 478 | |
| 479 | #ifdef CONFIG_DEBUG_FS |
| 480 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
| 481 | { |
| 482 | struct drm_device *dev = obj->dev; |
| 483 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 484 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
| 485 | |
| 486 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
Thierry Reding | fc99f97 | 2015-04-09 16:39:51 +0200 | [diff] [blame] | 487 | seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n", |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 488 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
Rob Clark | bf6811f | 2013-09-01 13:25:09 -0400 | [diff] [blame] | 489 | msm_obj->read_fence, msm_obj->write_fence, |
| 490 | obj->name, obj->refcount.refcount.counter, |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 491 | off, msm_obj->vaddr, obj->size); |
| 492 | } |
| 493 | |
| 494 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) |
| 495 | { |
| 496 | struct msm_gem_object *msm_obj; |
| 497 | int count = 0; |
| 498 | size_t size = 0; |
| 499 | |
| 500 | list_for_each_entry(msm_obj, list, mm_list) { |
| 501 | struct drm_gem_object *obj = &msm_obj->base; |
| 502 | seq_printf(m, " "); |
| 503 | msm_gem_describe(obj, m); |
| 504 | count++; |
| 505 | size += obj->size; |
| 506 | } |
| 507 | |
| 508 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); |
| 509 | } |
| 510 | #endif |
| 511 | |
| 512 | void msm_gem_free_object(struct drm_gem_object *obj) |
| 513 | { |
| 514 | struct drm_device *dev = obj->dev; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 515 | struct msm_drm_private *priv = obj->dev->dev_private; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 516 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 517 | int id; |
| 518 | |
| 519 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 520 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 521 | /* object should not be on active list: */ |
| 522 | WARN_ON(is_active(msm_obj)); |
| 523 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 524 | list_del(&msm_obj->mm_list); |
| 525 | |
| 526 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 527 | struct msm_mmu *mmu = priv->mmus[id]; |
| 528 | if (mmu && msm_obj->domain[id].iova) { |
Rob Clark | 257d06f | 2014-11-12 15:25:50 -0500 | [diff] [blame] | 529 | uint32_t offset = msm_obj->domain[id].iova; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 530 | mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 531 | } |
| 532 | } |
| 533 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 534 | if (obj->import_attach) { |
| 535 | if (msm_obj->vaddr) |
| 536 | dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 537 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 538 | /* Don't drop the pages for imported dmabuf, as they are not |
| 539 | * ours, just free the array we allocated: |
| 540 | */ |
| 541 | if (msm_obj->pages) |
| 542 | drm_free_large(msm_obj->pages); |
| 543 | |
jilai wang | f28730c | 2015-04-07 13:51:32 -0400 | [diff] [blame] | 544 | drm_prime_gem_destroy(obj, msm_obj->sgt); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 545 | } else { |
Markus Elfring | 264f7d6 | 2014-11-25 14:30:28 +0100 | [diff] [blame] | 546 | vunmap(msm_obj->vaddr); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 547 | put_pages(obj); |
| 548 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 549 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 550 | if (msm_obj->resv == &msm_obj->_resv) |
| 551 | reservation_object_fini(msm_obj->resv); |
| 552 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 553 | drm_gem_object_release(obj); |
| 554 | |
| 555 | kfree(msm_obj); |
| 556 | } |
| 557 | |
| 558 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
| 559 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
| 560 | uint32_t size, uint32_t flags, uint32_t *handle) |
| 561 | { |
| 562 | struct drm_gem_object *obj; |
| 563 | int ret; |
| 564 | |
| 565 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 566 | if (ret) |
| 567 | return ret; |
| 568 | |
| 569 | obj = msm_gem_new(dev, size, flags); |
| 570 | |
| 571 | mutex_unlock(&dev->struct_mutex); |
| 572 | |
| 573 | if (IS_ERR(obj)) |
| 574 | return PTR_ERR(obj); |
| 575 | |
| 576 | ret = drm_gem_handle_create(file, obj, handle); |
| 577 | |
| 578 | /* drop reference from allocate - handle holds it now */ |
| 579 | drm_gem_object_unreference_unlocked(obj); |
| 580 | |
| 581 | return ret; |
| 582 | } |
| 583 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 584 | static int msm_gem_new_impl(struct drm_device *dev, |
| 585 | uint32_t size, uint32_t flags, |
| 586 | struct drm_gem_object **obj) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 587 | { |
| 588 | struct msm_drm_private *priv = dev->dev_private; |
| 589 | struct msm_gem_object *msm_obj; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 590 | unsigned sz; |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 591 | bool use_vram = false; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 592 | |
| 593 | switch (flags & MSM_BO_CACHE_MASK) { |
| 594 | case MSM_BO_UNCACHED: |
| 595 | case MSM_BO_CACHED: |
| 596 | case MSM_BO_WC: |
| 597 | break; |
| 598 | default: |
| 599 | dev_err(dev->dev, "invalid cache flag: %x\n", |
| 600 | (flags & MSM_BO_CACHE_MASK)); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 601 | return -EINVAL; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 602 | } |
| 603 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 604 | if (!iommu_present(&platform_bus_type)) |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 605 | use_vram = true; |
| 606 | else if ((flags & MSM_BO_STOLEN) && priv->vram.size) |
| 607 | use_vram = true; |
| 608 | |
| 609 | if (WARN_ON(use_vram && !priv->vram.size)) |
| 610 | return -EINVAL; |
| 611 | |
| 612 | sz = sizeof(*msm_obj); |
| 613 | if (use_vram) |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 614 | sz += sizeof(struct drm_mm_node); |
| 615 | |
| 616 | msm_obj = kzalloc(sz, GFP_KERNEL); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 617 | if (!msm_obj) |
| 618 | return -ENOMEM; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 619 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 620 | if (use_vram) |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 621 | msm_obj->vram_node = (void *)&msm_obj[1]; |
| 622 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 623 | msm_obj->flags = flags; |
| 624 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 625 | msm_obj->resv = &msm_obj->_resv; |
| 626 | reservation_object_init(msm_obj->resv); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 627 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 628 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 629 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
| 630 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 631 | *obj = &msm_obj->base; |
| 632 | |
| 633 | return 0; |
| 634 | } |
| 635 | |
| 636 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
| 637 | uint32_t size, uint32_t flags) |
| 638 | { |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 639 | struct drm_gem_object *obj = NULL; |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 640 | int ret; |
| 641 | |
| 642 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 643 | |
| 644 | size = PAGE_ALIGN(size); |
| 645 | |
| 646 | ret = msm_gem_new_impl(dev, size, flags, &obj); |
| 647 | if (ret) |
| 648 | goto fail; |
| 649 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 650 | if (use_pages(obj)) { |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 651 | ret = drm_gem_object_init(dev, obj, size); |
| 652 | if (ret) |
| 653 | goto fail; |
| 654 | } else { |
| 655 | drm_gem_private_object_init(dev, obj, size); |
| 656 | } |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 657 | |
| 658 | return obj; |
| 659 | |
| 660 | fail: |
| 661 | if (obj) |
Rob Clark | 9999f10 | 2014-02-04 14:17:32 -0500 | [diff] [blame] | 662 | drm_gem_object_unreference(obj); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 663 | |
| 664 | return ERR_PTR(ret); |
| 665 | } |
| 666 | |
| 667 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
| 668 | uint32_t size, struct sg_table *sgt) |
| 669 | { |
| 670 | struct msm_gem_object *msm_obj; |
| 671 | struct drm_gem_object *obj; |
| 672 | int ret, npages; |
| 673 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 674 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
| 675 | if (!iommu_present(&platform_bus_type)) { |
| 676 | dev_err(dev->dev, "cannot import without IOMMU\n"); |
| 677 | return ERR_PTR(-EINVAL); |
| 678 | } |
| 679 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 680 | size = PAGE_ALIGN(size); |
| 681 | |
| 682 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); |
| 683 | if (ret) |
| 684 | goto fail; |
| 685 | |
| 686 | drm_gem_private_object_init(dev, obj, size); |
| 687 | |
| 688 | npages = size / PAGE_SIZE; |
| 689 | |
| 690 | msm_obj = to_msm_bo(obj); |
| 691 | msm_obj->sgt = sgt; |
| 692 | msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); |
| 693 | if (!msm_obj->pages) { |
| 694 | ret = -ENOMEM; |
| 695 | goto fail; |
| 696 | } |
| 697 | |
| 698 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); |
| 699 | if (ret) |
| 700 | goto fail; |
| 701 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 702 | return obj; |
| 703 | |
| 704 | fail: |
| 705 | if (obj) |
| 706 | drm_gem_object_unreference_unlocked(obj); |
| 707 | |
| 708 | return ERR_PTR(ret); |
| 709 | } |