Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1 | /* |
Rob Clark | 8bb0daf | 2013-02-11 12:43:09 -0500 | [diff] [blame] | 2 | * drivers/gpu/drm/omapdrm/omap_gem.c |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2011 Texas Instruments |
| 5 | * Author: Rob Clark <rob.clark@linaro.org> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms of the GNU General Public License version 2 as published by |
| 9 | * the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 14 | * more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License along with |
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
Arnd Bergmann | 2d80245 | 2016-05-11 18:01:45 +0200 | [diff] [blame] | 20 | #include <linux/seq_file.h> |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 21 | #include <linux/shmem_fs.h> |
Laurent Pinchart | 2d278f5 | 2015-03-05 21:31:37 +0200 | [diff] [blame] | 22 | #include <linux/spinlock.h> |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 23 | #include <linux/pfn_t.h> |
Laurent Pinchart | 2d278f5 | 2015-03-05 21:31:37 +0200 | [diff] [blame] | 24 | |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 25 | #include <drm/drm_vma_manager.h> |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 26 | |
| 27 | #include "omap_drv.h" |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 28 | #include "omap_dmm_tiler.h" |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 29 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 30 | /* |
| 31 | * GEM buffer object implementation. |
| 32 | */ |
| 33 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 34 | /* note: we use upper 8 bits of flags for driver-internal flags: */ |
Laurent Pinchart | cdb0381 | 2015-12-14 22:39:37 +0200 | [diff] [blame] | 35 | #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */ |
| 36 | #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */ |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 37 | #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */ |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 38 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 39 | struct omap_gem_object { |
| 40 | struct drm_gem_object base; |
| 41 | |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 42 | struct list_head mm_list; |
| 43 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 44 | uint32_t flags; |
| 45 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 46 | /** width/height for tiled formats (rounded up to slot boundaries) */ |
| 47 | uint16_t width, height; |
| 48 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 49 | /** roll applied when mapping to DMM */ |
| 50 | uint32_t roll; |
| 51 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 52 | /** |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 53 | * paddr contains the buffer DMA address. It is valid for |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 54 | * |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 55 | * - buffers allocated through the DMA mapping API (with the |
| 56 | * OMAP_BO_MEM_DMA_API flag set) |
| 57 | * |
| 58 | * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set) |
| 59 | * if they are physically contiguous (when sgt->orig_nents == 1) |
| 60 | * |
| 61 | * - buffers mapped through the TILER when paddr_cnt is not zero, in |
| 62 | * which case the DMA address points to the TILER aperture |
| 63 | * |
| 64 | * Physically contiguous buffers have their DMA address equal to the |
| 65 | * physical address as we don't remap those buffers through the TILER. |
| 66 | * |
| 67 | * Buffers mapped to the TILER have their DMA address pointing to the |
| 68 | * TILER aperture. As TILER mappings are refcounted (through paddr_cnt) |
| 69 | * the DMA address must be accessed through omap_get_get_paddr() to |
| 70 | * ensure that the mapping won't disappear unexpectedly. References must |
| 71 | * be released with omap_gem_put_paddr(). |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 72 | */ |
| 73 | dma_addr_t paddr; |
| 74 | |
| 75 | /** |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 76 | * # of users of paddr |
| 77 | */ |
| 78 | uint32_t paddr_cnt; |
| 79 | |
| 80 | /** |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 81 | * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag |
| 82 | * is set and the sgt field is valid. |
| 83 | */ |
| 84 | struct sg_table *sgt; |
| 85 | |
| 86 | /** |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 87 | * tiler block used when buffer is remapped in DMM/TILER. |
| 88 | */ |
| 89 | struct tiler_block *block; |
| 90 | |
| 91 | /** |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 92 | * Array of backing pages, if allocated. Note that pages are never |
| 93 | * allocated for buffers originally allocated from contiguous memory |
| 94 | */ |
| 95 | struct page **pages; |
| 96 | |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 97 | /** addresses corresponding to pages in above array */ |
| 98 | dma_addr_t *addrs; |
| 99 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 100 | /** |
| 101 | * Virtual address, if mapped. |
| 102 | */ |
| 103 | void *vaddr; |
| 104 | |
| 105 | /** |
| 106 | * sync-object allocated on demand (if needed) |
| 107 | * |
| 108 | * Per-buffer sync-object for tracking pending and completed hw/dma |
Tomi Valkeinen | 3f50eff | 2016-01-27 10:58:43 +0200 | [diff] [blame] | 109 | * read and write operations. |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 110 | */ |
| 111 | struct { |
| 112 | uint32_t write_pending; |
| 113 | uint32_t write_complete; |
| 114 | uint32_t read_pending; |
| 115 | uint32_t read_complete; |
| 116 | } *sync; |
| 117 | }; |
| 118 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 119 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) |
Rob Clark | c5b1247 | 2012-01-18 18:33:02 -0600 | [diff] [blame] | 120 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 121 | /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are |
| 122 | * not necessarily pinned in TILER all the time, and (b) when they are |
| 123 | * they are not necessarily page aligned, we reserve one or more small |
| 124 | * regions in each of the 2d containers to use as a user-GART where we |
| 125 | * can create a second page-aligned mapping of parts of the buffer |
| 126 | * being accessed from userspace. |
| 127 | * |
| 128 | * Note that we could optimize slightly when we know that multiple |
| 129 | * tiler containers are backed by the same PAT.. but I'll leave that |
| 130 | * for later.. |
| 131 | */ |
| 132 | #define NUM_USERGART_ENTRIES 2 |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 133 | struct omap_drm_usergart_entry { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 134 | struct tiler_block *block; /* the reserved tiler block */ |
| 135 | dma_addr_t paddr; |
| 136 | struct drm_gem_object *obj; /* the current pinned obj */ |
| 137 | pgoff_t obj_pgoff; /* page offset of obj currently |
| 138 | mapped in */ |
| 139 | }; |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 140 | |
| 141 | struct omap_drm_usergart { |
| 142 | struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES]; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 143 | int height; /* height in rows */ |
| 144 | int height_shift; /* ilog2(height in rows) */ |
| 145 | int slot_shift; /* ilog2(width per slot) */ |
| 146 | int stride_pfn; /* stride in pages */ |
| 147 | int last; /* index of last used entry */ |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 148 | }; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 149 | |
Laurent Pinchart | b902f8f | 2015-12-14 22:39:32 +0200 | [diff] [blame] | 150 | /* ----------------------------------------------------------------------------- |
| 151 | * Helpers |
| 152 | */ |
| 153 | |
| 154 | /** get mmap offset */ |
| 155 | static uint64_t mmap_offset(struct drm_gem_object *obj) |
| 156 | { |
| 157 | struct drm_device *dev = obj->dev; |
| 158 | int ret; |
| 159 | size_t size; |
| 160 | |
| 161 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 162 | |
| 163 | /* Make it mmapable */ |
| 164 | size = omap_gem_mmap_size(obj); |
| 165 | ret = drm_gem_create_mmap_offset_size(obj, size); |
| 166 | if (ret) { |
| 167 | dev_err(dev->dev, "could not allocate mmap offset\n"); |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | return drm_vma_node_offset_addr(&obj->vma_node); |
| 172 | } |
| 173 | |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 174 | static bool is_contiguous(struct omap_gem_object *omap_obj) |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 175 | { |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 176 | if (omap_obj->flags & OMAP_BO_MEM_DMA_API) |
| 177 | return true; |
| 178 | |
| 179 | if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1) |
| 180 | return true; |
| 181 | |
| 182 | return false; |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | /* ----------------------------------------------------------------------------- |
| 186 | * Eviction |
| 187 | */ |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 188 | |
| 189 | static void evict_entry(struct drm_gem_object *obj, |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 190 | enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 191 | { |
David Herrmann | 6796cb1 | 2014-01-03 14:24:19 +0100 | [diff] [blame] | 192 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 193 | struct omap_drm_private *priv = obj->dev->dev_private; |
| 194 | int n = priv->usergart[fmt].height; |
David Herrmann | 6796cb1 | 2014-01-03 14:24:19 +0100 | [diff] [blame] | 195 | size_t size = PAGE_SIZE * n; |
| 196 | loff_t off = mmap_offset(obj) + |
| 197 | (entry->obj_pgoff << PAGE_SHIFT); |
| 198 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); |
| 199 | |
| 200 | if (m > 1) { |
| 201 | int i; |
| 202 | /* if stride > than PAGE_SIZE then sparse mapping: */ |
| 203 | for (i = n; i > 0; i--) { |
| 204 | unmap_mapping_range(obj->dev->anon_inode->i_mapping, |
| 205 | off, PAGE_SIZE, 1); |
| 206 | off += PAGE_SIZE * m; |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 207 | } |
David Herrmann | 6796cb1 | 2014-01-03 14:24:19 +0100 | [diff] [blame] | 208 | } else { |
| 209 | unmap_mapping_range(obj->dev->anon_inode->i_mapping, |
| 210 | off, size, 1); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | entry->obj = NULL; |
| 214 | } |
| 215 | |
| 216 | /* Evict a buffer from usergart, if it is mapped there */ |
| 217 | static void evict(struct drm_gem_object *obj) |
| 218 | { |
| 219 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 220 | struct omap_drm_private *priv = obj->dev->dev_private; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 221 | |
| 222 | if (omap_obj->flags & OMAP_BO_TILED) { |
| 223 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
| 224 | int i; |
| 225 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 226 | for (i = 0; i < NUM_USERGART_ENTRIES; i++) { |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 227 | struct omap_drm_usergart_entry *entry = |
| 228 | &priv->usergart[fmt].entry[i]; |
| 229 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 230 | if (entry->obj == obj) |
| 231 | evict_entry(obj, fmt, entry); |
| 232 | } |
| 233 | } |
| 234 | } |
| 235 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 236 | /* ----------------------------------------------------------------------------- |
| 237 | * Page Management |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 238 | */ |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 239 | |
| 240 | /** ensure backing pages are allocated */ |
| 241 | static int omap_gem_attach_pages(struct drm_gem_object *obj) |
| 242 | { |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 243 | struct drm_device *dev = obj->dev; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 244 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 245 | struct page **pages; |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 246 | int npages = obj->size >> PAGE_SHIFT; |
| 247 | int i, ret; |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 248 | dma_addr_t *addrs; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 249 | |
| 250 | WARN_ON(omap_obj->pages); |
| 251 | |
David Herrmann | 0cdbe8a | 2014-05-25 12:59:47 +0200 | [diff] [blame] | 252 | pages = drm_gem_get_pages(obj); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 253 | if (IS_ERR(pages)) { |
| 254 | dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); |
| 255 | return PTR_ERR(pages); |
| 256 | } |
| 257 | |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 258 | /* for non-cached buffers, ensure the new pages are clean because |
| 259 | * DSS, GPU, etc. are not cache coherent: |
| 260 | */ |
| 261 | if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { |
Vincent Penquerc'h | 23d84ed | 2012-10-09 19:40:39 +0100 | [diff] [blame] | 262 | addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 263 | if (!addrs) { |
| 264 | ret = -ENOMEM; |
| 265 | goto free_pages; |
| 266 | } |
| 267 | |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 268 | for (i = 0; i < npages; i++) { |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 269 | addrs[i] = dma_map_page(dev->dev, pages[i], |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 270 | 0, PAGE_SIZE, DMA_BIDIRECTIONAL); |
Tomi Valkeinen | 579ef25 | 2016-01-05 11:43:14 +0200 | [diff] [blame] | 271 | |
| 272 | if (dma_mapping_error(dev->dev, addrs[i])) { |
| 273 | dev_warn(dev->dev, |
| 274 | "%s: failed to map page\n", __func__); |
| 275 | |
| 276 | for (i = i - 1; i >= 0; --i) { |
| 277 | dma_unmap_page(dev->dev, addrs[i], |
| 278 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 279 | } |
| 280 | |
| 281 | ret = -ENOMEM; |
| 282 | goto free_addrs; |
| 283 | } |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 284 | } |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 285 | } else { |
Vincent Penquerc'h | 23d84ed | 2012-10-09 19:40:39 +0100 | [diff] [blame] | 286 | addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 287 | if (!addrs) { |
| 288 | ret = -ENOMEM; |
| 289 | goto free_pages; |
| 290 | } |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 291 | } |
| 292 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 293 | omap_obj->addrs = addrs; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 294 | omap_obj->pages = pages; |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 295 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 296 | return 0; |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 297 | |
Tomi Valkeinen | 579ef25 | 2016-01-05 11:43:14 +0200 | [diff] [blame] | 298 | free_addrs: |
| 299 | kfree(addrs); |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 300 | free_pages: |
Rob Clark | ddcd09d | 2013-08-07 13:41:27 -0400 | [diff] [blame] | 301 | drm_gem_put_pages(obj, pages, true, false); |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 302 | |
| 303 | return ret; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 304 | } |
| 305 | |
Laurent Pinchart | b902f8f | 2015-12-14 22:39:32 +0200 | [diff] [blame] | 306 | /* acquire pages when needed (for example, for DMA where physically |
| 307 | * contiguous buffer is not required |
| 308 | */ |
| 309 | static int get_pages(struct drm_gem_object *obj, struct page ***pages) |
| 310 | { |
| 311 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 312 | int ret = 0; |
| 313 | |
Laurent Pinchart | cdb0381 | 2015-12-14 22:39:37 +0200 | [diff] [blame] | 314 | if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) { |
Laurent Pinchart | b902f8f | 2015-12-14 22:39:32 +0200 | [diff] [blame] | 315 | ret = omap_gem_attach_pages(obj); |
| 316 | if (ret) { |
| 317 | dev_err(obj->dev->dev, "could not attach pages\n"); |
| 318 | return ret; |
| 319 | } |
| 320 | } |
| 321 | |
| 322 | /* TODO: even phys-contig.. we should have a list of pages? */ |
| 323 | *pages = omap_obj->pages; |
| 324 | |
| 325 | return 0; |
| 326 | } |
| 327 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 328 | /** release backing pages */ |
| 329 | static void omap_gem_detach_pages(struct drm_gem_object *obj) |
| 330 | { |
| 331 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 332 | |
| 333 | /* for non-cached buffers, ensure the new pages are clean because |
| 334 | * DSS, GPU, etc. are not cache coherent: |
| 335 | */ |
| 336 | if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { |
| 337 | int i, npages = obj->size >> PAGE_SHIFT; |
| 338 | for (i = 0; i < npages; i++) { |
| 339 | dma_unmap_page(obj->dev->dev, omap_obj->addrs[i], |
| 340 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 341 | } |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 342 | } |
| 343 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 344 | kfree(omap_obj->addrs); |
| 345 | omap_obj->addrs = NULL; |
| 346 | |
Rob Clark | ddcd09d | 2013-08-07 13:41:27 -0400 | [diff] [blame] | 347 | drm_gem_put_pages(obj, omap_obj->pages, true, false); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 348 | omap_obj->pages = NULL; |
| 349 | } |
| 350 | |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 351 | /* get buffer flags */ |
| 352 | uint32_t omap_gem_flags(struct drm_gem_object *obj) |
| 353 | { |
| 354 | return to_omap_bo(obj)->flags; |
| 355 | } |
| 356 | |
Rob Clark | c5b1247 | 2012-01-18 18:33:02 -0600 | [diff] [blame] | 357 | uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) |
| 358 | { |
| 359 | uint64_t offset; |
| 360 | mutex_lock(&obj->dev->struct_mutex); |
| 361 | offset = mmap_offset(obj); |
| 362 | mutex_unlock(&obj->dev->struct_mutex); |
| 363 | return offset; |
| 364 | } |
| 365 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 366 | /** get mmap size */ |
| 367 | size_t omap_gem_mmap_size(struct drm_gem_object *obj) |
| 368 | { |
| 369 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 370 | size_t size = obj->size; |
| 371 | |
| 372 | if (omap_obj->flags & OMAP_BO_TILED) { |
| 373 | /* for tiled buffers, the virtual size has stride rounded up |
| 374 | * to 4kb.. (to hide the fact that row n+1 might start 16kb or |
| 375 | * 32kb later!). But we don't back the entire buffer with |
| 376 | * pages, only the valid picture part.. so need to adjust for |
| 377 | * this in the size used to mmap and generate mmap offset |
| 378 | */ |
| 379 | size = tiler_vsize(gem2fmt(omap_obj->flags), |
| 380 | omap_obj->width, omap_obj->height); |
| 381 | } |
| 382 | |
| 383 | return size; |
| 384 | } |
| 385 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 386 | /* ----------------------------------------------------------------------------- |
| 387 | * Fault Handling |
| 388 | */ |
| 389 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 390 | /* Normal handling for the case of faulting in non-tiled buffers */ |
| 391 | static int fault_1d(struct drm_gem_object *obj, |
| 392 | struct vm_area_struct *vma, struct vm_fault *vmf) |
| 393 | { |
| 394 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 395 | unsigned long pfn; |
| 396 | pgoff_t pgoff; |
| 397 | |
| 398 | /* We don't use vmf->pgoff since that has the fake offset: */ |
| 399 | pgoff = ((unsigned long)vmf->virtual_address - |
| 400 | vma->vm_start) >> PAGE_SHIFT; |
| 401 | |
| 402 | if (omap_obj->pages) { |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 403 | omap_gem_cpu_sync(obj, pgoff); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 404 | pfn = page_to_pfn(omap_obj->pages[pgoff]); |
| 405 | } else { |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 406 | BUG_ON(!is_contiguous(omap_obj)); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 407 | pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; |
| 408 | } |
| 409 | |
| 410 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, |
| 411 | pfn, pfn << PAGE_SHIFT); |
| 412 | |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 413 | return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, |
| 414 | __pfn_to_pfn_t(pfn, PFN_DEV)); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 415 | } |
| 416 | |
| 417 | /* Special handling for the case of faulting in 2d tiled buffers */ |
| 418 | static int fault_2d(struct drm_gem_object *obj, |
| 419 | struct vm_area_struct *vma, struct vm_fault *vmf) |
| 420 | { |
| 421 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 422 | struct omap_drm_private *priv = obj->dev->dev_private; |
| 423 | struct omap_drm_usergart_entry *entry; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 424 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
| 425 | struct page *pages[64]; /* XXX is this too much to have on stack? */ |
| 426 | unsigned long pfn; |
| 427 | pgoff_t pgoff, base_pgoff; |
| 428 | void __user *vaddr; |
| 429 | int i, ret, slots; |
| 430 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 431 | /* |
| 432 | * Note the height of the slot is also equal to the number of pages |
| 433 | * that need to be mapped in to fill 4kb wide CPU page. If the slot |
| 434 | * height is 64, then 64 pages fill a 4kb wide by 64 row region. |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 435 | */ |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 436 | const int n = priv->usergart[fmt].height; |
| 437 | const int n_shift = priv->usergart[fmt].height_shift; |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 438 | |
| 439 | /* |
| 440 | * If buffer width in bytes > PAGE_SIZE then the virtual stride is |
| 441 | * rounded up to next multiple of PAGE_SIZE.. this need to be taken |
| 442 | * into account in some of the math, so figure out virtual stride |
| 443 | * in pages |
| 444 | */ |
| 445 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 446 | |
| 447 | /* We don't use vmf->pgoff since that has the fake offset: */ |
| 448 | pgoff = ((unsigned long)vmf->virtual_address - |
| 449 | vma->vm_start) >> PAGE_SHIFT; |
| 450 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 451 | /* |
| 452 | * Actual address we start mapping at is rounded down to previous slot |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 453 | * boundary in the y direction: |
| 454 | */ |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 455 | base_pgoff = round_down(pgoff, m << n_shift); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 456 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 457 | /* figure out buffer width in slots */ |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 458 | slots = omap_obj->width >> priv->usergart[fmt].slot_shift; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 459 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 460 | vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); |
| 461 | |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 462 | entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 463 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 464 | /* evict previous buffer using this usergart entry, if any: */ |
| 465 | if (entry->obj) |
| 466 | evict_entry(entry->obj, fmt, entry); |
| 467 | |
| 468 | entry->obj = obj; |
| 469 | entry->obj_pgoff = base_pgoff; |
| 470 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 471 | /* now convert base_pgoff to phys offset from virt offset: */ |
| 472 | base_pgoff = (base_pgoff >> n_shift) * slots; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 473 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 474 | /* for wider-than 4k.. figure out which part of the slot-row we want: */ |
| 475 | if (m > 1) { |
| 476 | int off = pgoff % m; |
| 477 | entry->obj_pgoff += off; |
| 478 | base_pgoff /= m; |
| 479 | slots = min(slots - (off << n_shift), n); |
| 480 | base_pgoff += off << n_shift; |
| 481 | vaddr += off << PAGE_SHIFT; |
| 482 | } |
| 483 | |
| 484 | /* |
| 485 | * Map in pages. Beyond the valid pixel part of the buffer, we set |
| 486 | * pages[i] to NULL to get a dummy page mapped in.. if someone |
| 487 | * reads/writes it they will get random/undefined content, but at |
| 488 | * least it won't be corrupting whatever other random page used to |
| 489 | * be mapped in, or other undefined behavior. |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 490 | */ |
| 491 | memcpy(pages, &omap_obj->pages[base_pgoff], |
| 492 | sizeof(struct page *) * slots); |
| 493 | memset(pages + slots, 0, |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 494 | sizeof(struct page *) * (n - slots)); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 495 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 496 | ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 497 | if (ret) { |
| 498 | dev_err(obj->dev->dev, "failed to pin: %d\n", ret); |
| 499 | return ret; |
| 500 | } |
| 501 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 502 | pfn = entry->paddr >> PAGE_SHIFT; |
| 503 | |
| 504 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, |
| 505 | pfn, pfn << PAGE_SHIFT); |
| 506 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 507 | for (i = n; i > 0; i--) { |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 508 | vm_insert_mixed(vma, (unsigned long)vaddr, |
| 509 | __pfn_to_pfn_t(pfn, PFN_DEV)); |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 510 | pfn += priv->usergart[fmt].stride_pfn; |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 511 | vaddr += PAGE_SIZE * m; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 512 | } |
| 513 | |
| 514 | /* simple round-robin: */ |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 515 | priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) |
| 516 | % NUM_USERGART_ENTRIES; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 517 | |
| 518 | return 0; |
| 519 | } |
| 520 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 521 | /** |
| 522 | * omap_gem_fault - pagefault handler for GEM objects |
| 523 | * @vma: the VMA of the GEM object |
| 524 | * @vmf: fault detail |
| 525 | * |
| 526 | * Invoked when a fault occurs on an mmap of a GEM managed area. GEM |
| 527 | * does most of the work for us including the actual map/unmap calls |
| 528 | * but we need to do the actual page work. |
| 529 | * |
| 530 | * The VMA was set up by GEM. In doing so it also ensured that the |
| 531 | * vma->vm_private_data points to the GEM object that is backing this |
| 532 | * mapping. |
| 533 | */ |
| 534 | int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 535 | { |
| 536 | struct drm_gem_object *obj = vma->vm_private_data; |
| 537 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 538 | struct drm_device *dev = obj->dev; |
| 539 | struct page **pages; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 540 | int ret; |
| 541 | |
| 542 | /* Make sure we don't parallel update on a fault, nor move or remove |
| 543 | * something from beneath our feet |
| 544 | */ |
| 545 | mutex_lock(&dev->struct_mutex); |
| 546 | |
| 547 | /* if a shmem backed object, make sure we have pages attached now */ |
| 548 | ret = get_pages(obj, &pages); |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 549 | if (ret) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 550 | goto fail; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 551 | |
| 552 | /* where should we do corresponding put_pages().. we are mapping |
| 553 | * the original page, rather than thru a GART, so we can't rely |
| 554 | * on eviction to trigger this. But munmap() or all mappings should |
| 555 | * probably trigger put_pages()? |
| 556 | */ |
| 557 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 558 | if (omap_obj->flags & OMAP_BO_TILED) |
| 559 | ret = fault_2d(obj, vma, vmf); |
| 560 | else |
| 561 | ret = fault_1d(obj, vma, vmf); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 562 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 563 | |
| 564 | fail: |
| 565 | mutex_unlock(&dev->struct_mutex); |
| 566 | switch (ret) { |
| 567 | case 0: |
| 568 | case -ERESTARTSYS: |
| 569 | case -EINTR: |
Rob Clark | e1d4ee0 | 2013-10-20 12:07:42 -0400 | [diff] [blame] | 570 | case -EBUSY: |
| 571 | /* |
| 572 | * EBUSY is ok: this just means that another thread |
| 573 | * already did the job. |
| 574 | */ |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 575 | return VM_FAULT_NOPAGE; |
| 576 | case -ENOMEM: |
| 577 | return VM_FAULT_OOM; |
| 578 | default: |
| 579 | return VM_FAULT_SIGBUS; |
| 580 | } |
| 581 | } |
| 582 | |
| 583 | /** We override mainly to fix up some of the vm mapping flags.. */ |
| 584 | int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
| 585 | { |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 586 | int ret; |
| 587 | |
| 588 | ret = drm_gem_mmap(filp, vma); |
| 589 | if (ret) { |
| 590 | DBG("mmap failed: %d", ret); |
| 591 | return ret; |
| 592 | } |
| 593 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 594 | return omap_gem_mmap_obj(vma->vm_private_data, vma); |
| 595 | } |
| 596 | |
| 597 | int omap_gem_mmap_obj(struct drm_gem_object *obj, |
| 598 | struct vm_area_struct *vma) |
| 599 | { |
| 600 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 601 | |
| 602 | vma->vm_flags &= ~VM_PFNMAP; |
| 603 | vma->vm_flags |= VM_MIXEDMAP; |
| 604 | |
| 605 | if (omap_obj->flags & OMAP_BO_WC) { |
| 606 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
| 607 | } else if (omap_obj->flags & OMAP_BO_UNCACHED) { |
| 608 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); |
| 609 | } else { |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 610 | /* |
| 611 | * We do have some private objects, at least for scanout buffers |
| 612 | * on hardware without DMM/TILER. But these are allocated write- |
| 613 | * combine |
| 614 | */ |
| 615 | if (WARN_ON(!obj->filp)) |
| 616 | return -EINVAL; |
| 617 | |
| 618 | /* |
| 619 | * Shunt off cached objs to shmem file so they have their own |
| 620 | * address_space (so unmap_mapping_range does what we want, |
| 621 | * in particular in the case of mmap'd dmabufs) |
| 622 | */ |
| 623 | fput(vma->vm_file); |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 624 | vma->vm_pgoff = 0; |
Al Viro | cb0942b | 2012-08-27 14:48:26 -0400 | [diff] [blame] | 625 | vma->vm_file = get_file(obj->filp); |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 626 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 627 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
| 628 | } |
| 629 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 630 | return 0; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 631 | } |
| 632 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 633 | /* ----------------------------------------------------------------------------- |
| 634 | * Dumb Buffers |
| 635 | */ |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 636 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 637 | /** |
| 638 | * omap_gem_dumb_create - create a dumb buffer |
| 639 | * @drm_file: our client file |
| 640 | * @dev: our device |
| 641 | * @args: the requested arguments copied from userspace |
| 642 | * |
| 643 | * Allocate a buffer suitable for use for a frame buffer of the |
| 644 | * form described by user space. Give userspace a handle by which |
| 645 | * to reference it. |
| 646 | */ |
| 647 | int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 648 | struct drm_mode_create_dumb *args) |
| 649 | { |
| 650 | union omap_gem_size gsize; |
| 651 | |
Tomi Valkeinen | ce481ed | 2016-04-19 09:06:32 +0300 | [diff] [blame] | 652 | args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); |
Tomi Valkeinen | 6a5228f | 2016-04-18 18:18:37 +0300 | [diff] [blame] | 653 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 654 | args->size = PAGE_ALIGN(args->pitch * args->height); |
| 655 | |
| 656 | gsize = (union omap_gem_size){ |
| 657 | .bytes = args->size, |
| 658 | }; |
| 659 | |
| 660 | return omap_gem_new_handle(dev, file, gsize, |
| 661 | OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); |
| 662 | } |
| 663 | |
| 664 | /** |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 665 | * omap_gem_dumb_map - buffer mapping for dumb interface |
| 666 | * @file: our drm client file |
| 667 | * @dev: drm device |
| 668 | * @handle: GEM handle to the object (from dumb_create) |
| 669 | * |
| 670 | * Do the necessary setup to allow the mapping of the frame buffer |
| 671 | * into user memory. We don't have to do much here at the moment. |
| 672 | */ |
| 673 | int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
| 674 | uint32_t handle, uint64_t *offset) |
| 675 | { |
| 676 | struct drm_gem_object *obj; |
| 677 | int ret = 0; |
| 678 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 679 | /* GEM does all our handle to object mapping */ |
Chris Wilson | a8ad0bd | 2016-05-09 11:04:54 +0100 | [diff] [blame] | 680 | obj = drm_gem_object_lookup(file, handle); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 681 | if (obj == NULL) { |
| 682 | ret = -ENOENT; |
| 683 | goto fail; |
| 684 | } |
| 685 | |
| 686 | *offset = omap_gem_mmap_offset(obj); |
| 687 | |
| 688 | drm_gem_object_unreference_unlocked(obj); |
| 689 | |
| 690 | fail: |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 691 | return ret; |
| 692 | } |
| 693 | |
Laurent Pinchart | e1c1174 | 2015-12-14 22:39:30 +0200 | [diff] [blame] | 694 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 695 | /* Set scrolling position. This allows us to implement fast scrolling |
| 696 | * for console. |
Rob Clark | 9b55b95 | 2012-03-05 10:48:33 -0600 | [diff] [blame] | 697 | * |
| 698 | * Call only from non-atomic contexts. |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 699 | */ |
| 700 | int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) |
| 701 | { |
| 702 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 703 | uint32_t npages = obj->size >> PAGE_SHIFT; |
| 704 | int ret = 0; |
| 705 | |
| 706 | if (roll > npages) { |
| 707 | dev_err(obj->dev->dev, "invalid roll: %d\n", roll); |
| 708 | return -EINVAL; |
| 709 | } |
| 710 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 711 | omap_obj->roll = roll; |
| 712 | |
Rob Clark | af69592 | 2011-12-16 11:34:34 -0600 | [diff] [blame] | 713 | mutex_lock(&obj->dev->struct_mutex); |
| 714 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 715 | /* if we aren't mapped yet, we don't need to do anything */ |
| 716 | if (omap_obj->block) { |
| 717 | struct page **pages; |
| 718 | ret = get_pages(obj, &pages); |
| 719 | if (ret) |
| 720 | goto fail; |
| 721 | ret = tiler_pin(omap_obj->block, pages, npages, roll, true); |
| 722 | if (ret) |
| 723 | dev_err(obj->dev->dev, "could not repin: %d\n", ret); |
| 724 | } |
| 725 | |
| 726 | fail: |
| 727 | mutex_unlock(&obj->dev->struct_mutex); |
| 728 | |
| 729 | return ret; |
| 730 | } |
Laurent Pinchart | e1c1174 | 2015-12-14 22:39:30 +0200 | [diff] [blame] | 731 | #endif |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 732 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 733 | /* ----------------------------------------------------------------------------- |
| 734 | * Memory Management & DMA Sync |
| 735 | */ |
| 736 | |
| 737 | /** |
| 738 | * shmem buffers that are mapped cached can simulate coherency via using |
| 739 | * page faulting to keep track of dirty pages |
| 740 | */ |
| 741 | static inline bool is_cached_coherent(struct drm_gem_object *obj) |
| 742 | { |
| 743 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
Laurent Pinchart | cdb0381 | 2015-12-14 22:39:37 +0200 | [diff] [blame] | 744 | |
| 745 | return (omap_obj->flags & OMAP_BO_MEM_SHMEM) && |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 746 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); |
| 747 | } |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 748 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 749 | /* Sync the buffer for CPU access.. note pages should already be |
| 750 | * attached, ie. omap_gem_get_pages() |
| 751 | */ |
| 752 | void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff) |
| 753 | { |
| 754 | struct drm_device *dev = obj->dev; |
| 755 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 756 | |
| 757 | if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) { |
| 758 | dma_unmap_page(dev->dev, omap_obj->addrs[pgoff], |
| 759 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 760 | omap_obj->addrs[pgoff] = 0; |
| 761 | } |
| 762 | } |
| 763 | |
| 764 | /* sync the buffer for DMA access */ |
| 765 | void omap_gem_dma_sync(struct drm_gem_object *obj, |
| 766 | enum dma_data_direction dir) |
| 767 | { |
| 768 | struct drm_device *dev = obj->dev; |
| 769 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 770 | |
| 771 | if (is_cached_coherent(obj)) { |
| 772 | int i, npages = obj->size >> PAGE_SHIFT; |
| 773 | struct page **pages = omap_obj->pages; |
| 774 | bool dirty = false; |
| 775 | |
| 776 | for (i = 0; i < npages; i++) { |
| 777 | if (!omap_obj->addrs[i]) { |
Tomi Valkeinen | a3d6345 | 2016-01-05 11:43:15 +0200 | [diff] [blame] | 778 | dma_addr_t addr; |
| 779 | |
| 780 | addr = dma_map_page(dev->dev, pages[i], 0, |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 781 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
Tomi Valkeinen | a3d6345 | 2016-01-05 11:43:15 +0200 | [diff] [blame] | 782 | |
| 783 | if (dma_mapping_error(dev->dev, addr)) { |
| 784 | dev_warn(dev->dev, |
| 785 | "%s: failed to map page\n", |
| 786 | __func__); |
| 787 | break; |
| 788 | } |
| 789 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 790 | dirty = true; |
Tomi Valkeinen | a3d6345 | 2016-01-05 11:43:15 +0200 | [diff] [blame] | 791 | omap_obj->addrs[i] = addr; |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 792 | } |
| 793 | } |
| 794 | |
| 795 | if (dirty) { |
| 796 | unmap_mapping_range(obj->filp->f_mapping, 0, |
| 797 | omap_gem_mmap_size(obj), 1); |
| 798 | } |
| 799 | } |
| 800 | } |
| 801 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 802 | /* Get physical address for DMA.. if 'remap' is true, and the buffer is not |
| 803 | * already contiguous, remap it to pin in physically contiguous memory.. (ie. |
| 804 | * map in TILER) |
| 805 | */ |
| 806 | int omap_gem_get_paddr(struct drm_gem_object *obj, |
| 807 | dma_addr_t *paddr, bool remap) |
| 808 | { |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 809 | struct omap_drm_private *priv = obj->dev->dev_private; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 810 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 811 | int ret = 0; |
| 812 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 813 | mutex_lock(&obj->dev->struct_mutex); |
| 814 | |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 815 | if (!is_contiguous(omap_obj) && remap && priv->has_dmm) { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 816 | if (omap_obj->paddr_cnt == 0) { |
| 817 | struct page **pages; |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 818 | uint32_t npages = obj->size >> PAGE_SHIFT; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 819 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
| 820 | struct tiler_block *block; |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 821 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 822 | BUG_ON(omap_obj->block); |
| 823 | |
| 824 | ret = get_pages(obj, &pages); |
| 825 | if (ret) |
| 826 | goto fail; |
| 827 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 828 | if (omap_obj->flags & OMAP_BO_TILED) { |
| 829 | block = tiler_reserve_2d(fmt, |
| 830 | omap_obj->width, |
| 831 | omap_obj->height, 0); |
| 832 | } else { |
| 833 | block = tiler_reserve_1d(obj->size); |
| 834 | } |
| 835 | |
| 836 | if (IS_ERR(block)) { |
| 837 | ret = PTR_ERR(block); |
| 838 | dev_err(obj->dev->dev, |
| 839 | "could not remap: %d (%d)\n", ret, fmt); |
| 840 | goto fail; |
| 841 | } |
| 842 | |
| 843 | /* TODO: enable async refill.. */ |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 844 | ret = tiler_pin(block, pages, npages, |
| 845 | omap_obj->roll, true); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 846 | if (ret) { |
| 847 | tiler_release(block); |
| 848 | dev_err(obj->dev->dev, |
| 849 | "could not pin: %d\n", ret); |
| 850 | goto fail; |
| 851 | } |
| 852 | |
| 853 | omap_obj->paddr = tiler_ssptr(block); |
| 854 | omap_obj->block = block; |
| 855 | |
Russell King | 2d31ca3 | 2014-07-12 10:53:41 +0100 | [diff] [blame] | 856 | DBG("got paddr: %pad", &omap_obj->paddr); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 857 | } |
| 858 | |
| 859 | omap_obj->paddr_cnt++; |
| 860 | |
| 861 | *paddr = omap_obj->paddr; |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 862 | } else if (is_contiguous(omap_obj)) { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 863 | *paddr = omap_obj->paddr; |
| 864 | } else { |
| 865 | ret = -EINVAL; |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 866 | goto fail; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 867 | } |
| 868 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 869 | fail: |
| 870 | mutex_unlock(&obj->dev->struct_mutex); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 871 | |
| 872 | return ret; |
| 873 | } |
| 874 | |
| 875 | /* Release physical address, when DMA is no longer being performed.. this |
| 876 | * could potentially unpin and unmap buffers from TILER |
| 877 | */ |
Tomi Valkeinen | 393a949 | 2015-04-28 14:01:36 +0300 | [diff] [blame] | 878 | void omap_gem_put_paddr(struct drm_gem_object *obj) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 879 | { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 880 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
Tomi Valkeinen | 393a949 | 2015-04-28 14:01:36 +0300 | [diff] [blame] | 881 | int ret; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 882 | |
| 883 | mutex_lock(&obj->dev->struct_mutex); |
| 884 | if (omap_obj->paddr_cnt > 0) { |
| 885 | omap_obj->paddr_cnt--; |
| 886 | if (omap_obj->paddr_cnt == 0) { |
| 887 | ret = tiler_unpin(omap_obj->block); |
| 888 | if (ret) { |
| 889 | dev_err(obj->dev->dev, |
| 890 | "could not unpin pages: %d\n", ret); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 891 | } |
| 892 | ret = tiler_release(omap_obj->block); |
| 893 | if (ret) { |
| 894 | dev_err(obj->dev->dev, |
| 895 | "could not release unmap: %d\n", ret); |
| 896 | } |
Tomi Valkeinen | 3f4d17c | 2014-09-03 19:25:53 +0000 | [diff] [blame] | 897 | omap_obj->paddr = 0; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 898 | omap_obj->block = NULL; |
| 899 | } |
| 900 | } |
Tomi Valkeinen | 393a949 | 2015-04-28 14:01:36 +0300 | [diff] [blame] | 901 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 902 | mutex_unlock(&obj->dev->struct_mutex); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 903 | } |
| 904 | |
Rob Clark | 3c810c6 | 2012-08-15 15:18:01 -0500 | [diff] [blame] | 905 | /* Get rotated scanout address (only valid if already pinned), at the |
| 906 | * specified orientation and x,y offset from top-left corner of buffer |
| 907 | * (only valid for tiled 2d buffers) |
| 908 | */ |
| 909 | int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, |
| 910 | int x, int y, dma_addr_t *paddr) |
| 911 | { |
| 912 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 913 | int ret = -EINVAL; |
| 914 | |
| 915 | mutex_lock(&obj->dev->struct_mutex); |
| 916 | if ((omap_obj->paddr_cnt > 0) && omap_obj->block && |
| 917 | (omap_obj->flags & OMAP_BO_TILED)) { |
| 918 | *paddr = tiler_tsptr(omap_obj->block, orient, x, y); |
| 919 | ret = 0; |
| 920 | } |
| 921 | mutex_unlock(&obj->dev->struct_mutex); |
| 922 | return ret; |
| 923 | } |
| 924 | |
| 925 | /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ |
| 926 | int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) |
| 927 | { |
| 928 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 929 | int ret = -EINVAL; |
| 930 | if (omap_obj->flags & OMAP_BO_TILED) |
| 931 | ret = tiler_stride(gem2fmt(omap_obj->flags), orient); |
| 932 | return ret; |
| 933 | } |
| 934 | |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 935 | /* if !remap, and we don't have pages backing, then fail, rather than |
| 936 | * increasing the pin count (which we don't really do yet anyways, |
| 937 | * because we don't support swapping pages back out). And 'remap' |
| 938 | * might not be quite the right name, but I wanted to keep it working |
| 939 | * similarly to omap_gem_get_paddr(). Note though that mutex is not |
| 940 | * aquired if !remap (because this can be called in atomic ctxt), |
| 941 | * but probably omap_gem_get_paddr() should be changed to work in the |
| 942 | * same way. If !remap, a matching omap_gem_put_pages() call is not |
| 943 | * required (and should not be made). |
| 944 | */ |
| 945 | int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, |
| 946 | bool remap) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 947 | { |
| 948 | int ret; |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 949 | if (!remap) { |
| 950 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 951 | if (!omap_obj->pages) |
| 952 | return -ENOMEM; |
| 953 | *pages = omap_obj->pages; |
| 954 | return 0; |
| 955 | } |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 956 | mutex_lock(&obj->dev->struct_mutex); |
| 957 | ret = get_pages(obj, pages); |
| 958 | mutex_unlock(&obj->dev->struct_mutex); |
| 959 | return ret; |
| 960 | } |
| 961 | |
| 962 | /* release pages when DMA no longer being performed */ |
| 963 | int omap_gem_put_pages(struct drm_gem_object *obj) |
| 964 | { |
| 965 | /* do something here if we dynamically attach/detach pages.. at |
| 966 | * least they would no longer need to be pinned if everyone has |
| 967 | * released the pages.. |
| 968 | */ |
| 969 | return 0; |
| 970 | } |
| 971 | |
Laurent Pinchart | e1c1174 | 2015-12-14 22:39:30 +0200 | [diff] [blame] | 972 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 973 | /* Get kernel virtual address for CPU access.. this more or less only |
| 974 | * exists for omap_fbdev. This should be called with struct_mutex |
| 975 | * held. |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 976 | */ |
| 977 | void *omap_gem_vaddr(struct drm_gem_object *obj) |
| 978 | { |
| 979 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
YAMANE Toshiaki | 696e3ca | 2012-11-14 19:33:43 +0900 | [diff] [blame] | 980 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 981 | if (!omap_obj->vaddr) { |
| 982 | struct page **pages; |
| 983 | int ret = get_pages(obj, &pages); |
| 984 | if (ret) |
| 985 | return ERR_PTR(ret); |
| 986 | omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
| 987 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
| 988 | } |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 989 | return omap_obj->vaddr; |
| 990 | } |
Laurent Pinchart | e1c1174 | 2015-12-14 22:39:30 +0200 | [diff] [blame] | 991 | #endif |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 992 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 993 | /* ----------------------------------------------------------------------------- |
| 994 | * Power Management |
| 995 | */ |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 996 | |
Andy Gross | e78edba | 2012-12-19 14:53:37 -0600 | [diff] [blame] | 997 | #ifdef CONFIG_PM |
| 998 | /* re-pin objects in DMM in resume path: */ |
| 999 | int omap_gem_resume(struct device *dev) |
| 1000 | { |
| 1001 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
| 1002 | struct omap_drm_private *priv = drm_dev->dev_private; |
| 1003 | struct omap_gem_object *omap_obj; |
| 1004 | int ret = 0; |
| 1005 | |
| 1006 | list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { |
| 1007 | if (omap_obj->block) { |
| 1008 | struct drm_gem_object *obj = &omap_obj->base; |
| 1009 | uint32_t npages = obj->size >> PAGE_SHIFT; |
| 1010 | WARN_ON(!omap_obj->pages); /* this can't happen */ |
| 1011 | ret = tiler_pin(omap_obj->block, |
| 1012 | omap_obj->pages, npages, |
| 1013 | omap_obj->roll, true); |
| 1014 | if (ret) { |
| 1015 | dev_err(dev, "could not repin: %d\n", ret); |
| 1016 | return ret; |
| 1017 | } |
| 1018 | } |
| 1019 | } |
| 1020 | |
| 1021 | return 0; |
| 1022 | } |
| 1023 | #endif |
| 1024 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 1025 | /* ----------------------------------------------------------------------------- |
| 1026 | * DebugFS |
| 1027 | */ |
| 1028 | |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1029 | #ifdef CONFIG_DEBUG_FS |
| 1030 | void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
| 1031 | { |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1032 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 1033 | uint64_t off; |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1034 | |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 1035 | off = drm_vma_node_start(&obj->vma_node); |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1036 | |
Russell King | 2d31ca3 | 2014-07-12 10:53:41 +0100 | [diff] [blame] | 1037 | seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1038 | omap_obj->flags, obj->name, obj->refcount.refcount.counter, |
Russell King | 2d31ca3 | 2014-07-12 10:53:41 +0100 | [diff] [blame] | 1039 | off, &omap_obj->paddr, omap_obj->paddr_cnt, |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1040 | omap_obj->vaddr, omap_obj->roll); |
| 1041 | |
| 1042 | if (omap_obj->flags & OMAP_BO_TILED) { |
| 1043 | seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); |
| 1044 | if (omap_obj->block) { |
| 1045 | struct tcm_area *area = &omap_obj->block->area; |
| 1046 | seq_printf(m, " (%dx%d, %dx%d)", |
| 1047 | area->p0.x, area->p0.y, |
| 1048 | area->p1.x, area->p1.y); |
| 1049 | } |
| 1050 | } else { |
| 1051 | seq_printf(m, " %d", obj->size); |
| 1052 | } |
| 1053 | |
| 1054 | seq_printf(m, "\n"); |
| 1055 | } |
| 1056 | |
| 1057 | void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) |
| 1058 | { |
| 1059 | struct omap_gem_object *omap_obj; |
| 1060 | int count = 0; |
| 1061 | size_t size = 0; |
| 1062 | |
| 1063 | list_for_each_entry(omap_obj, list, mm_list) { |
| 1064 | struct drm_gem_object *obj = &omap_obj->base; |
| 1065 | seq_printf(m, " "); |
| 1066 | omap_gem_describe(obj, m); |
| 1067 | count++; |
| 1068 | size += obj->size; |
| 1069 | } |
| 1070 | |
| 1071 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); |
| 1072 | } |
| 1073 | #endif |
| 1074 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 1075 | /* ----------------------------------------------------------------------------- |
| 1076 | * Buffer Synchronization |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1077 | */ |
| 1078 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 1079 | static DEFINE_SPINLOCK(sync_lock); |
| 1080 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1081 | struct omap_gem_sync_waiter { |
| 1082 | struct list_head list; |
| 1083 | struct omap_gem_object *omap_obj; |
| 1084 | enum omap_gem_op op; |
| 1085 | uint32_t read_target, write_target; |
| 1086 | /* notify called w/ sync_lock held */ |
| 1087 | void (*notify)(void *arg); |
| 1088 | void *arg; |
| 1089 | }; |
| 1090 | |
| 1091 | /* list of omap_gem_sync_waiter.. the notify fxn gets called back when |
| 1092 | * the read and/or write target count is achieved which can call a user |
| 1093 | * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for |
| 1094 | * cpu access), etc. |
| 1095 | */ |
| 1096 | static LIST_HEAD(waiters); |
| 1097 | |
| 1098 | static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) |
| 1099 | { |
| 1100 | struct omap_gem_object *omap_obj = waiter->omap_obj; |
| 1101 | if ((waiter->op & OMAP_GEM_READ) && |
Archit Taneja | f2cff0f | 2014-04-11 12:53:31 +0530 | [diff] [blame] | 1102 | (omap_obj->sync->write_complete < waiter->write_target)) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1103 | return true; |
| 1104 | if ((waiter->op & OMAP_GEM_WRITE) && |
Archit Taneja | f2cff0f | 2014-04-11 12:53:31 +0530 | [diff] [blame] | 1105 | (omap_obj->sync->read_complete < waiter->read_target)) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1106 | return true; |
| 1107 | return false; |
| 1108 | } |
| 1109 | |
| 1110 | /* macro for sync debug.. */ |
| 1111 | #define SYNCDBG 0 |
| 1112 | #define SYNC(fmt, ...) do { if (SYNCDBG) \ |
| 1113 | printk(KERN_ERR "%s:%d: "fmt"\n", \ |
| 1114 | __func__, __LINE__, ##__VA_ARGS__); \ |
| 1115 | } while (0) |
| 1116 | |
| 1117 | |
| 1118 | static void sync_op_update(void) |
| 1119 | { |
| 1120 | struct omap_gem_sync_waiter *waiter, *n; |
| 1121 | list_for_each_entry_safe(waiter, n, &waiters, list) { |
| 1122 | if (!is_waiting(waiter)) { |
| 1123 | list_del(&waiter->list); |
| 1124 | SYNC("notify: %p", waiter); |
| 1125 | waiter->notify(waiter->arg); |
| 1126 | kfree(waiter); |
| 1127 | } |
| 1128 | } |
| 1129 | } |
| 1130 | |
| 1131 | static inline int sync_op(struct drm_gem_object *obj, |
| 1132 | enum omap_gem_op op, bool start) |
| 1133 | { |
| 1134 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 1135 | int ret = 0; |
| 1136 | |
| 1137 | spin_lock(&sync_lock); |
| 1138 | |
| 1139 | if (!omap_obj->sync) { |
| 1140 | omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); |
| 1141 | if (!omap_obj->sync) { |
| 1142 | ret = -ENOMEM; |
| 1143 | goto unlock; |
| 1144 | } |
| 1145 | } |
| 1146 | |
| 1147 | if (start) { |
| 1148 | if (op & OMAP_GEM_READ) |
| 1149 | omap_obj->sync->read_pending++; |
| 1150 | if (op & OMAP_GEM_WRITE) |
| 1151 | omap_obj->sync->write_pending++; |
| 1152 | } else { |
| 1153 | if (op & OMAP_GEM_READ) |
| 1154 | omap_obj->sync->read_complete++; |
| 1155 | if (op & OMAP_GEM_WRITE) |
| 1156 | omap_obj->sync->write_complete++; |
| 1157 | sync_op_update(); |
| 1158 | } |
| 1159 | |
| 1160 | unlock: |
| 1161 | spin_unlock(&sync_lock); |
| 1162 | |
| 1163 | return ret; |
| 1164 | } |
| 1165 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1166 | /* mark the start of read and/or write operation */ |
| 1167 | int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) |
| 1168 | { |
| 1169 | return sync_op(obj, op, true); |
| 1170 | } |
| 1171 | |
| 1172 | int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) |
| 1173 | { |
| 1174 | return sync_op(obj, op, false); |
| 1175 | } |
| 1176 | |
| 1177 | static DECLARE_WAIT_QUEUE_HEAD(sync_event); |
| 1178 | |
| 1179 | static void sync_notify(void *arg) |
| 1180 | { |
| 1181 | struct task_struct **waiter_task = arg; |
| 1182 | *waiter_task = NULL; |
| 1183 | wake_up_all(&sync_event); |
| 1184 | } |
| 1185 | |
| 1186 | int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) |
| 1187 | { |
| 1188 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 1189 | int ret = 0; |
| 1190 | if (omap_obj->sync) { |
| 1191 | struct task_struct *waiter_task = current; |
| 1192 | struct omap_gem_sync_waiter *waiter = |
| 1193 | kzalloc(sizeof(*waiter), GFP_KERNEL); |
| 1194 | |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1195 | if (!waiter) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1196 | return -ENOMEM; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1197 | |
| 1198 | waiter->omap_obj = omap_obj; |
| 1199 | waiter->op = op; |
| 1200 | waiter->read_target = omap_obj->sync->read_pending; |
| 1201 | waiter->write_target = omap_obj->sync->write_pending; |
| 1202 | waiter->notify = sync_notify; |
| 1203 | waiter->arg = &waiter_task; |
| 1204 | |
| 1205 | spin_lock(&sync_lock); |
| 1206 | if (is_waiting(waiter)) { |
| 1207 | SYNC("waited: %p", waiter); |
| 1208 | list_add_tail(&waiter->list, &waiters); |
| 1209 | spin_unlock(&sync_lock); |
| 1210 | ret = wait_event_interruptible(sync_event, |
| 1211 | (waiter_task == NULL)); |
| 1212 | spin_lock(&sync_lock); |
| 1213 | if (waiter_task) { |
| 1214 | SYNC("interrupted: %p", waiter); |
| 1215 | /* we were interrupted */ |
| 1216 | list_del(&waiter->list); |
| 1217 | waiter_task = NULL; |
| 1218 | } else { |
| 1219 | /* freed in sync_op_update() */ |
| 1220 | waiter = NULL; |
| 1221 | } |
| 1222 | } |
| 1223 | spin_unlock(&sync_lock); |
Fabian Frederick | d2c87e2 | 2014-07-04 21:17:15 +0200 | [diff] [blame] | 1224 | kfree(waiter); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1225 | } |
| 1226 | return ret; |
| 1227 | } |
| 1228 | |
| 1229 | /* call fxn(arg), either synchronously or asynchronously if the op |
| 1230 | * is currently blocked.. fxn() can be called from any context |
| 1231 | * |
| 1232 | * (TODO for now fxn is called back from whichever context calls |
Tomi Valkeinen | 3f50eff | 2016-01-27 10:58:43 +0200 | [diff] [blame] | 1233 | * omap_gem_op_finish().. but this could be better defined later |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1234 | * if needed) |
| 1235 | * |
| 1236 | * TODO more code in common w/ _sync().. |
| 1237 | */ |
| 1238 | int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, |
| 1239 | void (*fxn)(void *arg), void *arg) |
| 1240 | { |
| 1241 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 1242 | if (omap_obj->sync) { |
| 1243 | struct omap_gem_sync_waiter *waiter = |
| 1244 | kzalloc(sizeof(*waiter), GFP_ATOMIC); |
| 1245 | |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1246 | if (!waiter) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1247 | return -ENOMEM; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1248 | |
| 1249 | waiter->omap_obj = omap_obj; |
| 1250 | waiter->op = op; |
| 1251 | waiter->read_target = omap_obj->sync->read_pending; |
| 1252 | waiter->write_target = omap_obj->sync->write_pending; |
| 1253 | waiter->notify = fxn; |
| 1254 | waiter->arg = arg; |
| 1255 | |
| 1256 | spin_lock(&sync_lock); |
| 1257 | if (is_waiting(waiter)) { |
| 1258 | SYNC("waited: %p", waiter); |
| 1259 | list_add_tail(&waiter->list, &waiters); |
| 1260 | spin_unlock(&sync_lock); |
| 1261 | return 0; |
| 1262 | } |
| 1263 | |
| 1264 | spin_unlock(&sync_lock); |
Subhajit Paul | 15ec2ca | 2014-04-11 12:53:30 +0530 | [diff] [blame] | 1265 | |
| 1266 | kfree(waiter); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1267 | } |
| 1268 | |
| 1269 | /* no waiting.. */ |
| 1270 | fxn(arg); |
| 1271 | |
| 1272 | return 0; |
| 1273 | } |
| 1274 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 1275 | /* ----------------------------------------------------------------------------- |
| 1276 | * Constructor & Destructor |
| 1277 | */ |
| 1278 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1279 | void omap_gem_free_object(struct drm_gem_object *obj) |
| 1280 | { |
| 1281 | struct drm_device *dev = obj->dev; |
Tomi Valkeinen | 76c4055 | 2014-12-17 14:34:22 +0200 | [diff] [blame] | 1282 | struct omap_drm_private *priv = dev->dev_private; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1283 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 1284 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1285 | evict(obj); |
| 1286 | |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1287 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 1288 | |
Tomi Valkeinen | 76c4055 | 2014-12-17 14:34:22 +0200 | [diff] [blame] | 1289 | spin_lock(&priv->list_lock); |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1290 | list_del(&omap_obj->mm_list); |
Tomi Valkeinen | 76c4055 | 2014-12-17 14:34:22 +0200 | [diff] [blame] | 1291 | spin_unlock(&priv->list_lock); |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1292 | |
Rob Clark | 9a0774e | 2012-01-16 12:51:17 -0600 | [diff] [blame] | 1293 | /* this means the object is still pinned.. which really should |
| 1294 | * not happen. I think.. |
| 1295 | */ |
| 1296 | WARN_ON(omap_obj->paddr_cnt > 0); |
| 1297 | |
Tomi Valkeinen | 3f50eff | 2016-01-27 10:58:43 +0200 | [diff] [blame] | 1298 | if (omap_obj->pages) { |
| 1299 | if (omap_obj->flags & OMAP_BO_MEM_DMABUF) |
| 1300 | kfree(omap_obj->pages); |
| 1301 | else |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1302 | omap_gem_detach_pages(obj); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1303 | } |
| 1304 | |
Tomi Valkeinen | 3f50eff | 2016-01-27 10:58:43 +0200 | [diff] [blame] | 1305 | if (omap_obj->flags & OMAP_BO_MEM_DMA_API) { |
Linus Torvalds | 266c73b | 2016-03-21 13:48:00 -0700 | [diff] [blame] | 1306 | dma_free_wc(dev->dev, obj->size, omap_obj->vaddr, |
| 1307 | omap_obj->paddr); |
Tomi Valkeinen | 3f50eff | 2016-01-27 10:58:43 +0200 | [diff] [blame] | 1308 | } else if (omap_obj->vaddr) { |
| 1309 | vunmap(omap_obj->vaddr); |
| 1310 | } else if (obj->import_attach) { |
| 1311 | drm_prime_gem_destroy(obj, omap_obj->sgt); |
| 1312 | } |
| 1313 | |
| 1314 | kfree(omap_obj->sync); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1315 | |
| 1316 | drm_gem_object_release(obj); |
| 1317 | |
Laurent Pinchart | 00e9c7c | 2015-12-14 22:39:38 +0200 | [diff] [blame] | 1318 | kfree(omap_obj); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1319 | } |
| 1320 | |
| 1321 | /* GEM buffer object constructor */ |
| 1322 | struct drm_gem_object *omap_gem_new(struct drm_device *dev, |
| 1323 | union omap_gem_size gsize, uint32_t flags) |
| 1324 | { |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 1325 | struct omap_drm_private *priv = dev->dev_private; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1326 | struct omap_gem_object *omap_obj; |
Laurent Pinchart | 92b4b44 | 2015-12-14 22:39:41 +0200 | [diff] [blame] | 1327 | struct drm_gem_object *obj; |
David Herrmann | ab5a60c | 2014-05-25 12:45:39 +0200 | [diff] [blame] | 1328 | struct address_space *mapping; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1329 | size_t size; |
| 1330 | int ret; |
| 1331 | |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1332 | /* Validate the flags and compute the memory and cache flags. */ |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1333 | if (flags & OMAP_BO_TILED) { |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 1334 | if (!priv->usergart) { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1335 | dev_err(dev->dev, "Tiled buffers require DMM\n"); |
Laurent Pinchart | 92b4b44 | 2015-12-14 22:39:41 +0200 | [diff] [blame] | 1336 | return NULL; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1337 | } |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1338 | |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1339 | /* |
| 1340 | * Tiled buffers are always shmem paged backed. When they are |
| 1341 | * scanned out, they are remapped into DMM/TILER. |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1342 | */ |
| 1343 | flags &= ~OMAP_BO_SCANOUT; |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1344 | flags |= OMAP_BO_MEM_SHMEM; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1345 | |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1346 | /* |
| 1347 | * Currently don't allow cached buffers. There is some caching |
| 1348 | * stuff that needs to be handled better. |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1349 | */ |
Tomi Valkeinen | 7cb0d6c | 2014-09-25 19:24:29 +0000 | [diff] [blame] | 1350 | flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); |
| 1351 | flags |= tiler_get_cpu_cache_flags(); |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1352 | } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { |
| 1353 | /* |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 1354 | * OMAP_BO_SCANOUT hints that the buffer doesn't need to be |
| 1355 | * tiled. However, to lower the pressure on memory allocation, |
| 1356 | * use contiguous memory only if no TILER is available. |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1357 | */ |
| 1358 | flags |= OMAP_BO_MEM_DMA_API; |
Tomi Valkeinen | 3f50eff | 2016-01-27 10:58:43 +0200 | [diff] [blame] | 1359 | } else if (!(flags & OMAP_BO_MEM_DMABUF)) { |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1360 | /* |
Tomi Valkeinen | 3f50eff | 2016-01-27 10:58:43 +0200 | [diff] [blame] | 1361 | * All other buffers not backed by dma_buf are shmem-backed. |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1362 | */ |
| 1363 | flags |= OMAP_BO_MEM_SHMEM; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1364 | } |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1365 | |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1366 | /* Allocate the initialize the OMAP GEM object. */ |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1367 | omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); |
Joe Perches | 78110bb | 2013-02-11 09:41:29 -0800 | [diff] [blame] | 1368 | if (!omap_obj) |
Tomi Valkeinen | a903e3b | 2015-03-17 15:31:11 +0200 | [diff] [blame] | 1369 | return NULL; |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1370 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1371 | obj = &omap_obj->base; |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1372 | omap_obj->flags = flags; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1373 | |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1374 | if (flags & OMAP_BO_TILED) { |
| 1375 | /* |
| 1376 | * For tiled buffers align dimensions to slot boundaries and |
| 1377 | * calculate size based on aligned dimensions. |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 1378 | */ |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1379 | tiler_align(gem2fmt(flags), &gsize.tiled.width, |
| 1380 | &gsize.tiled.height); |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1381 | |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1382 | size = tiler_size(gem2fmt(flags), gsize.tiled.width, |
| 1383 | gsize.tiled.height); |
Tomi Valkeinen | a903e3b | 2015-03-17 15:31:11 +0200 | [diff] [blame] | 1384 | |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1385 | omap_obj->width = gsize.tiled.width; |
| 1386 | omap_obj->height = gsize.tiled.height; |
| 1387 | } else { |
| 1388 | size = PAGE_ALIGN(gsize.bytes); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1389 | } |
| 1390 | |
Laurent Pinchart | c2eb77f | 2016-03-02 12:51:19 +0200 | [diff] [blame] | 1391 | /* Initialize the GEM object. */ |
| 1392 | if (!(flags & OMAP_BO_MEM_SHMEM)) { |
| 1393 | drm_gem_private_object_init(dev, obj, size); |
| 1394 | } else { |
| 1395 | ret = drm_gem_object_init(dev, obj, size); |
| 1396 | if (ret) |
| 1397 | goto err_free; |
| 1398 | |
Al Viro | 93c76a3 | 2015-12-04 23:45:44 -0500 | [diff] [blame] | 1399 | mapping = obj->filp->f_mapping; |
Laurent Pinchart | c2eb77f | 2016-03-02 12:51:19 +0200 | [diff] [blame] | 1400 | mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); |
| 1401 | } |
Tomi Valkeinen | a903e3b | 2015-03-17 15:31:11 +0200 | [diff] [blame] | 1402 | |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1403 | /* Allocate memory if needed. */ |
| 1404 | if (flags & OMAP_BO_MEM_DMA_API) { |
Linus Torvalds | 266c73b | 2016-03-21 13:48:00 -0700 | [diff] [blame] | 1405 | omap_obj->vaddr = dma_alloc_wc(dev->dev, size, |
| 1406 | &omap_obj->paddr, |
| 1407 | GFP_KERNEL); |
Laurent Pinchart | 9cba3b9 | 2015-12-14 22:39:43 +0200 | [diff] [blame] | 1408 | if (!omap_obj->vaddr) |
Laurent Pinchart | c2eb77f | 2016-03-02 12:51:19 +0200 | [diff] [blame] | 1409 | goto err_release; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1410 | } |
| 1411 | |
| 1412 | spin_lock(&priv->list_lock); |
| 1413 | list_add(&omap_obj->mm_list, &priv->obj_list); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1414 | spin_unlock(&priv->list_lock); |
| 1415 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1416 | return obj; |
| 1417 | |
Laurent Pinchart | c2eb77f | 2016-03-02 12:51:19 +0200 | [diff] [blame] | 1418 | err_release: |
| 1419 | drm_gem_object_release(obj); |
| 1420 | err_free: |
| 1421 | kfree(omap_obj); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1422 | return NULL; |
| 1423 | } |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1424 | |
Laurent Pinchart | b22e669 | 2015-12-14 22:39:44 +0200 | [diff] [blame] | 1425 | struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, |
| 1426 | struct sg_table *sgt) |
| 1427 | { |
| 1428 | struct omap_drm_private *priv = dev->dev_private; |
| 1429 | struct omap_gem_object *omap_obj; |
| 1430 | struct drm_gem_object *obj; |
| 1431 | union omap_gem_size gsize; |
| 1432 | |
| 1433 | /* Without a DMM only physically contiguous buffers can be supported. */ |
| 1434 | if (sgt->orig_nents != 1 && !priv->has_dmm) |
| 1435 | return ERR_PTR(-EINVAL); |
| 1436 | |
| 1437 | mutex_lock(&dev->struct_mutex); |
| 1438 | |
| 1439 | gsize.bytes = PAGE_ALIGN(size); |
| 1440 | obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC); |
| 1441 | if (!obj) { |
| 1442 | obj = ERR_PTR(-ENOMEM); |
| 1443 | goto done; |
| 1444 | } |
| 1445 | |
| 1446 | omap_obj = to_omap_bo(obj); |
| 1447 | omap_obj->sgt = sgt; |
| 1448 | |
| 1449 | if (sgt->orig_nents == 1) { |
| 1450 | omap_obj->paddr = sg_dma_address(sgt->sgl); |
| 1451 | } else { |
| 1452 | /* Create pages list from sgt */ |
| 1453 | struct sg_page_iter iter; |
| 1454 | struct page **pages; |
| 1455 | unsigned int npages; |
| 1456 | unsigned int i = 0; |
| 1457 | |
| 1458 | npages = DIV_ROUND_UP(size, PAGE_SIZE); |
| 1459 | pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); |
| 1460 | if (!pages) { |
| 1461 | omap_gem_free_object(obj); |
| 1462 | obj = ERR_PTR(-ENOMEM); |
| 1463 | goto done; |
| 1464 | } |
| 1465 | |
| 1466 | omap_obj->pages = pages; |
| 1467 | |
| 1468 | for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { |
| 1469 | pages[i++] = sg_page_iter_page(&iter); |
| 1470 | if (i > npages) |
| 1471 | break; |
| 1472 | } |
| 1473 | |
| 1474 | if (WARN_ON(i != npages)) { |
| 1475 | omap_gem_free_object(obj); |
| 1476 | obj = ERR_PTR(-ENOMEM); |
| 1477 | goto done; |
| 1478 | } |
| 1479 | } |
| 1480 | |
| 1481 | done: |
| 1482 | mutex_unlock(&dev->struct_mutex); |
| 1483 | return obj; |
| 1484 | } |
| 1485 | |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 1486 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
| 1487 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
| 1488 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle) |
| 1489 | { |
| 1490 | struct drm_gem_object *obj; |
| 1491 | int ret; |
| 1492 | |
| 1493 | obj = omap_gem_new(dev, gsize, flags); |
| 1494 | if (!obj) |
| 1495 | return -ENOMEM; |
| 1496 | |
| 1497 | ret = drm_gem_handle_create(file, obj, handle); |
| 1498 | if (ret) { |
Laurent Pinchart | 74128a2 | 2015-12-14 22:39:39 +0200 | [diff] [blame] | 1499 | omap_gem_free_object(obj); |
Laurent Pinchart | 7ef93b0 | 2015-12-14 22:39:33 +0200 | [diff] [blame] | 1500 | return ret; |
| 1501 | } |
| 1502 | |
| 1503 | /* drop reference from allocate - handle holds it now */ |
| 1504 | drm_gem_object_unreference_unlocked(obj); |
| 1505 | |
| 1506 | return 0; |
| 1507 | } |
| 1508 | |
| 1509 | /* ----------------------------------------------------------------------------- |
| 1510 | * Init & Cleanup |
| 1511 | */ |
| 1512 | |
| 1513 | /* If DMM is used, we need to set some stuff up.. */ |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1514 | void omap_gem_init(struct drm_device *dev) |
| 1515 | { |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 1516 | struct omap_drm_private *priv = dev->dev_private; |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 1517 | struct omap_drm_usergart *usergart; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1518 | const enum tiler_fmt fmts[] = { |
| 1519 | TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT |
| 1520 | }; |
Andy Gross | 5c13779 | 2012-03-05 10:48:39 -0600 | [diff] [blame] | 1521 | int i, j; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1522 | |
Andy Gross | e5e4e9b | 2012-10-17 00:30:03 -0500 | [diff] [blame] | 1523 | if (!dmm_is_available()) { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1524 | /* DMM only supported on OMAP4 and later, so this isn't fatal */ |
Andy Gross | 5c13779 | 2012-03-05 10:48:39 -0600 | [diff] [blame] | 1525 | dev_warn(dev->dev, "DMM not available, disable DMM support\n"); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1526 | return; |
| 1527 | } |
| 1528 | |
Joe Perches | 78110bb | 2013-02-11 09:41:29 -0800 | [diff] [blame] | 1529 | usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); |
| 1530 | if (!usergart) |
Rob Clark | b369839 | 2011-12-09 23:26:06 -0600 | [diff] [blame] | 1531 | return; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1532 | |
| 1533 | /* reserve 4k aligned/wide regions for userspace mappings: */ |
| 1534 | for (i = 0; i < ARRAY_SIZE(fmts); i++) { |
| 1535 | uint16_t h = 1, w = PAGE_SIZE >> i; |
| 1536 | tiler_align(fmts[i], &w, &h); |
| 1537 | /* note: since each region is 1 4kb page wide, and minimum |
| 1538 | * number of rows, the height ends up being the same as the |
| 1539 | * # of pages in the region |
| 1540 | */ |
| 1541 | usergart[i].height = h; |
| 1542 | usergart[i].height_shift = ilog2(h); |
Rob Clark | 3c810c6 | 2012-08-15 15:18:01 -0500 | [diff] [blame] | 1543 | usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1544 | usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); |
| 1545 | for (j = 0; j < NUM_USERGART_ENTRIES; j++) { |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 1546 | struct omap_drm_usergart_entry *entry; |
| 1547 | struct tiler_block *block; |
| 1548 | |
| 1549 | entry = &usergart[i].entry[j]; |
| 1550 | block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1551 | if (IS_ERR(block)) { |
| 1552 | dev_err(dev->dev, |
| 1553 | "reserve failed: %d, %d, %ld\n", |
| 1554 | i, j, PTR_ERR(block)); |
| 1555 | return; |
| 1556 | } |
| 1557 | entry->paddr = tiler_ssptr(block); |
| 1558 | entry->block = block; |
| 1559 | |
Russell King | 2d31ca3 | 2014-07-12 10:53:41 +0100 | [diff] [blame] | 1560 | DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h, |
| 1561 | &entry->paddr, |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1562 | usergart[i].stride_pfn << PAGE_SHIFT); |
| 1563 | } |
| 1564 | } |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 1565 | |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 1566 | priv->usergart = usergart; |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 1567 | priv->has_dmm = true; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1568 | } |
| 1569 | |
| 1570 | void omap_gem_deinit(struct drm_device *dev) |
| 1571 | { |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 1572 | struct omap_drm_private *priv = dev->dev_private; |
| 1573 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1574 | /* I believe we can rely on there being no more outstanding GEM |
| 1575 | * objects which could depend on usergart/dmm at this point. |
| 1576 | */ |
Laurent Pinchart | f430274 | 2015-12-14 22:39:34 +0200 | [diff] [blame] | 1577 | kfree(priv->usergart); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1578 | } |