Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 1 | /* |
| 2 | * vsp1_dl.h -- R-Car VSP1 Display List |
| 3 | * |
| 4 | * Copyright (C) 2015 Renesas Corporation |
| 5 | * |
| 6 | * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2 of the License, or |
| 11 | * (at your option) any later version. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/device.h> |
| 15 | #include <linux/dma-mapping.h> |
| 16 | #include <linux/gfp.h> |
| 17 | #include <linux/slab.h> |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 18 | #include <linux/workqueue.h> |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 19 | |
| 20 | #include "vsp1.h" |
| 21 | #include "vsp1_dl.h" |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 22 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 23 | #define VSP1_DL_NUM_ENTRIES 256 |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 24 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 25 | #define VSP1_DLH_INT_ENABLE (1 << 1) |
| 26 | #define VSP1_DLH_AUTO_START (1 << 0) |
| 27 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 28 | struct vsp1_dl_header_list { |
| 29 | u32 num_bytes; |
| 30 | u32 addr; |
| 31 | } __attribute__((__packed__)); |
| 32 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 33 | struct vsp1_dl_header { |
| 34 | u32 num_lists; |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 35 | struct vsp1_dl_header_list lists[8]; |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 36 | u32 next_header; |
| 37 | u32 flags; |
| 38 | } __attribute__((__packed__)); |
| 39 | |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 40 | struct vsp1_dl_entry { |
| 41 | u32 addr; |
| 42 | u32 data; |
| 43 | } __attribute__((__packed__)); |
| 44 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 45 | /** |
| 46 | * struct vsp1_dl_body - Display list body |
| 47 | * @list: entry in the display list list of bodies |
| 48 | * @vsp1: the VSP1 device |
| 49 | * @entries: array of entries |
| 50 | * @dma: DMA address of the entries |
| 51 | * @size: size of the DMA memory in bytes |
| 52 | * @num_entries: number of stored entries |
| 53 | */ |
| 54 | struct vsp1_dl_body { |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 55 | struct list_head list; |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 56 | struct vsp1_device *vsp1; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 57 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 58 | struct vsp1_dl_entry *entries; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 59 | dma_addr_t dma; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 60 | size_t size; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 61 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 62 | unsigned int num_entries; |
| 63 | }; |
| 64 | |
| 65 | /** |
| 66 | * struct vsp1_dl_list - Display list |
| 67 | * @list: entry in the display list manager lists |
| 68 | * @dlm: the display list manager |
| 69 | * @header: display list header, NULL for headerless lists |
| 70 | * @dma: DMA address for the header |
| 71 | * @body0: first display list body |
| 72 | * @fragments: list of extra display list bodies |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 73 | * @chain: entry in the display list partition chain |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 74 | */ |
| 75 | struct vsp1_dl_list { |
| 76 | struct list_head list; |
| 77 | struct vsp1_dl_manager *dlm; |
| 78 | |
| 79 | struct vsp1_dl_header *header; |
| 80 | dma_addr_t dma; |
| 81 | |
| 82 | struct vsp1_dl_body body0; |
| 83 | struct list_head fragments; |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 84 | |
| 85 | bool has_chain; |
| 86 | struct list_head chain; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 87 | }; |
| 88 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 89 | enum vsp1_dl_mode { |
| 90 | VSP1_DL_MODE_HEADER, |
| 91 | VSP1_DL_MODE_HEADERLESS, |
| 92 | }; |
| 93 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 94 | /** |
| 95 | * struct vsp1_dl_manager - Display List manager |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 96 | * @index: index of the related WPF |
| 97 | * @mode: display list operation mode (header or headerless) |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 98 | * @singleshot: execute the display list in single-shot mode |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 99 | * @vsp1: the VSP1 device |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 100 | * @lock: protects the free, active, queued, pending and gc_fragments lists |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 101 | * @free: array of all free display lists |
| 102 | * @active: list currently being processed (loaded) by hardware |
| 103 | * @queued: list queued to the hardware (written to the DL registers) |
| 104 | * @pending: list waiting to be queued to the hardware |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 105 | * @gc_work: fragments garbage collector work struct |
| 106 | * @gc_fragments: array of display list fragments waiting to be freed |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 107 | */ |
| 108 | struct vsp1_dl_manager { |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 109 | unsigned int index; |
| 110 | enum vsp1_dl_mode mode; |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 111 | bool singleshot; |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 112 | struct vsp1_device *vsp1; |
| 113 | |
| 114 | spinlock_t lock; |
| 115 | struct list_head free; |
| 116 | struct vsp1_dl_list *active; |
| 117 | struct vsp1_dl_list *queued; |
| 118 | struct vsp1_dl_list *pending; |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 119 | |
| 120 | struct work_struct gc_work; |
| 121 | struct list_head gc_fragments; |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 122 | }; |
| 123 | |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 124 | /* ----------------------------------------------------------------------------- |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 125 | * Display List Body Management |
| 126 | */ |
| 127 | |
| 128 | /* |
| 129 | * Initialize a display list body object and allocate DMA memory for the body |
| 130 | * data. The display list body object is expected to have been initialized to |
| 131 | * 0 when allocated. |
| 132 | */ |
| 133 | static int vsp1_dl_body_init(struct vsp1_device *vsp1, |
| 134 | struct vsp1_dl_body *dlb, unsigned int num_entries, |
| 135 | size_t extra_size) |
| 136 | { |
| 137 | size_t size = num_entries * sizeof(*dlb->entries) + extra_size; |
| 138 | |
| 139 | dlb->vsp1 = vsp1; |
| 140 | dlb->size = size; |
| 141 | |
Magnus Damm | 2cc2137 | 2017-05-17 02:20:05 +0300 | [diff] [blame] | 142 | dlb->entries = dma_alloc_wc(vsp1->bus_master, dlb->size, &dlb->dma, |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 143 | GFP_KERNEL); |
| 144 | if (!dlb->entries) |
| 145 | return -ENOMEM; |
| 146 | |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * Cleanup a display list body and free allocated DMA memory allocated. |
| 152 | */ |
| 153 | static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb) |
| 154 | { |
Magnus Damm | 2cc2137 | 2017-05-17 02:20:05 +0300 | [diff] [blame] | 155 | dma_free_wc(dlb->vsp1->bus_master, dlb->size, dlb->entries, dlb->dma); |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | /** |
| 159 | * vsp1_dl_fragment_alloc - Allocate a display list fragment |
| 160 | * @vsp1: The VSP1 device |
| 161 | * @num_entries: The maximum number of entries that the fragment can contain |
| 162 | * |
| 163 | * Allocate a display list fragment with enough memory to contain the requested |
| 164 | * number of entries. |
| 165 | * |
| 166 | * Return a pointer to a fragment on success or NULL if memory can't be |
| 167 | * allocated. |
| 168 | */ |
| 169 | struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1, |
| 170 | unsigned int num_entries) |
| 171 | { |
| 172 | struct vsp1_dl_body *dlb; |
| 173 | int ret; |
| 174 | |
| 175 | dlb = kzalloc(sizeof(*dlb), GFP_KERNEL); |
| 176 | if (!dlb) |
| 177 | return NULL; |
| 178 | |
| 179 | ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0); |
| 180 | if (ret < 0) { |
| 181 | kfree(dlb); |
| 182 | return NULL; |
| 183 | } |
| 184 | |
| 185 | return dlb; |
| 186 | } |
| 187 | |
| 188 | /** |
| 189 | * vsp1_dl_fragment_free - Free a display list fragment |
| 190 | * @dlb: The fragment |
| 191 | * |
| 192 | * Free the given display list fragment and the associated DMA memory. |
| 193 | * |
| 194 | * Fragments must only be freed explicitly if they are not added to a display |
| 195 | * list, as the display list will take ownership of them and free them |
| 196 | * otherwise. Manual free typically happens at cleanup time for fragments that |
| 197 | * have been allocated but not used. |
| 198 | * |
| 199 | * Passing a NULL pointer to this function is safe, in that case no operation |
| 200 | * will be performed. |
| 201 | */ |
| 202 | void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb) |
| 203 | { |
| 204 | if (!dlb) |
| 205 | return; |
| 206 | |
| 207 | vsp1_dl_body_cleanup(dlb); |
| 208 | kfree(dlb); |
| 209 | } |
| 210 | |
| 211 | /** |
| 212 | * vsp1_dl_fragment_write - Write a register to a display list fragment |
| 213 | * @dlb: The fragment |
| 214 | * @reg: The register address |
| 215 | * @data: The register value |
| 216 | * |
| 217 | * Write the given register and value to the display list fragment. The maximum |
| 218 | * number of entries that can be written in a fragment is specified when the |
| 219 | * fragment is allocated by vsp1_dl_fragment_alloc(). |
| 220 | */ |
| 221 | void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data) |
| 222 | { |
| 223 | dlb->entries[dlb->num_entries].addr = reg; |
| 224 | dlb->entries[dlb->num_entries].data = data; |
| 225 | dlb->num_entries++; |
| 226 | } |
| 227 | |
| 228 | /* ----------------------------------------------------------------------------- |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 229 | * Display List Transaction Management |
| 230 | */ |
| 231 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 232 | static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 233 | { |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 234 | struct vsp1_dl_list *dl; |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 235 | size_t header_size; |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 236 | int ret; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 237 | |
| 238 | dl = kzalloc(sizeof(*dl), GFP_KERNEL); |
| 239 | if (!dl) |
| 240 | return NULL; |
| 241 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 242 | INIT_LIST_HEAD(&dl->fragments); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 243 | dl->dlm = dlm; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 244 | |
Laurent Pinchart | 9dbed95 | 2017-02-26 10:29:50 -0300 | [diff] [blame] | 245 | /* |
| 246 | * Initialize the display list body and allocate DMA memory for the body |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 247 | * and the optional header. Both are allocated together to avoid memory |
| 248 | * fragmentation, with the header located right after the body in |
| 249 | * memory. |
| 250 | */ |
| 251 | header_size = dlm->mode == VSP1_DL_MODE_HEADER |
| 252 | ? ALIGN(sizeof(struct vsp1_dl_header), 8) |
| 253 | : 0; |
| 254 | |
| 255 | ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES, |
| 256 | header_size); |
| 257 | if (ret < 0) { |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 258 | kfree(dl); |
| 259 | return NULL; |
| 260 | } |
| 261 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 262 | if (dlm->mode == VSP1_DL_MODE_HEADER) { |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 263 | size_t header_offset = VSP1_DL_NUM_ENTRIES |
| 264 | * sizeof(*dl->body0.entries); |
| 265 | |
| 266 | dl->header = ((void *)dl->body0.entries) + header_offset; |
| 267 | dl->dma = dl->body0.dma + header_offset; |
| 268 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 269 | memset(dl->header, 0, sizeof(*dl->header)); |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 270 | dl->header->lists[0].addr = dl->body0.dma; |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 271 | } |
| 272 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 273 | return dl; |
| 274 | } |
| 275 | |
| 276 | static void vsp1_dl_list_free(struct vsp1_dl_list *dl) |
| 277 | { |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 278 | vsp1_dl_body_cleanup(&dl->body0); |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 279 | list_splice_init(&dl->fragments, &dl->dlm->gc_fragments); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 280 | kfree(dl); |
| 281 | } |
| 282 | |
| 283 | /** |
| 284 | * vsp1_dl_list_get - Get a free display list |
| 285 | * @dlm: The display list manager |
| 286 | * |
| 287 | * Get a display list from the pool of free lists and return it. |
| 288 | * |
| 289 | * This function must be called without the display list manager lock held. |
| 290 | */ |
| 291 | struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm) |
| 292 | { |
| 293 | struct vsp1_dl_list *dl = NULL; |
| 294 | unsigned long flags; |
| 295 | |
| 296 | spin_lock_irqsave(&dlm->lock, flags); |
| 297 | |
| 298 | if (!list_empty(&dlm->free)) { |
| 299 | dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list); |
| 300 | list_del(&dl->list); |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 301 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame] | 302 | /* |
| 303 | * The display list chain must be initialised to ensure every |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 304 | * display list can assert list_empty() if it is not in a chain. |
| 305 | */ |
| 306 | INIT_LIST_HEAD(&dl->chain); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 307 | } |
| 308 | |
| 309 | spin_unlock_irqrestore(&dlm->lock, flags); |
| 310 | |
| 311 | return dl; |
| 312 | } |
| 313 | |
Laurent Pinchart | d2c1b02 | 2016-03-03 09:26:47 -0300 | [diff] [blame] | 314 | /* This function must be called with the display list manager lock held.*/ |
| 315 | static void __vsp1_dl_list_put(struct vsp1_dl_list *dl) |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 316 | { |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 317 | struct vsp1_dl_list *dl_child; |
| 318 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 319 | if (!dl) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 320 | return; |
| 321 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame] | 322 | /* |
| 323 | * Release any linked display-lists which were chained for a single |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 324 | * hardware operation. |
| 325 | */ |
| 326 | if (dl->has_chain) { |
| 327 | list_for_each_entry(dl_child, &dl->chain, chain) |
| 328 | __vsp1_dl_list_put(dl_child); |
| 329 | } |
| 330 | |
| 331 | dl->has_chain = false; |
| 332 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame] | 333 | /* |
| 334 | * We can't free fragments here as DMA memory can only be freed in |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 335 | * interruptible context. Move all fragments to the display list |
| 336 | * manager's list of fragments to be freed, they will be |
| 337 | * garbage-collected by the work queue. |
| 338 | */ |
| 339 | if (!list_empty(&dl->fragments)) { |
| 340 | list_splice_init(&dl->fragments, &dl->dlm->gc_fragments); |
| 341 | schedule_work(&dl->dlm->gc_work); |
| 342 | } |
| 343 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 344 | dl->body0.num_entries = 0; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 345 | |
| 346 | list_add_tail(&dl->list, &dl->dlm->free); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 347 | } |
| 348 | |
Laurent Pinchart | d2c1b02 | 2016-03-03 09:26:47 -0300 | [diff] [blame] | 349 | /** |
| 350 | * vsp1_dl_list_put - Release a display list |
| 351 | * @dl: The display list |
| 352 | * |
| 353 | * Release the display list and return it to the pool of free lists. |
| 354 | * |
| 355 | * Passing a NULL pointer to this function is safe, in that case no operation |
| 356 | * will be performed. |
| 357 | */ |
| 358 | void vsp1_dl_list_put(struct vsp1_dl_list *dl) |
| 359 | { |
| 360 | unsigned long flags; |
| 361 | |
| 362 | if (!dl) |
| 363 | return; |
| 364 | |
| 365 | spin_lock_irqsave(&dl->dlm->lock, flags); |
| 366 | __vsp1_dl_list_put(dl); |
| 367 | spin_unlock_irqrestore(&dl->dlm->lock, flags); |
| 368 | } |
| 369 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 370 | /** |
| 371 | * vsp1_dl_list_write - Write a register to the display list |
| 372 | * @dl: The display list |
| 373 | * @reg: The register address |
| 374 | * @data: The register value |
| 375 | * |
| 376 | * Write the given register and value to the display list. Up to 256 registers |
| 377 | * can be written per display list. |
| 378 | */ |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 379 | void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 380 | { |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 381 | vsp1_dl_fragment_write(&dl->body0, reg, data); |
| 382 | } |
| 383 | |
| 384 | /** |
| 385 | * vsp1_dl_list_add_fragment - Add a fragment to the display list |
| 386 | * @dl: The display list |
| 387 | * @dlb: The fragment |
| 388 | * |
| 389 | * Add a display list body as a fragment to a display list. Registers contained |
| 390 | * in fragments are processed after registers contained in the main display |
| 391 | * list, in the order in which fragments are added. |
| 392 | * |
| 393 | * Adding a fragment to a display list passes ownership of the fragment to the |
| 394 | * list. The caller must not touch the fragment after this call, and must not |
| 395 | * free it explicitly with vsp1_dl_fragment_free(). |
| 396 | * |
| 397 | * Fragments are only usable for display lists in header mode. Attempt to |
| 398 | * add a fragment to a header-less display list will return an error. |
| 399 | */ |
| 400 | int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl, |
| 401 | struct vsp1_dl_body *dlb) |
| 402 | { |
| 403 | /* Multi-body lists are only available in header mode. */ |
| 404 | if (dl->dlm->mode != VSP1_DL_MODE_HEADER) |
| 405 | return -EINVAL; |
| 406 | |
| 407 | list_add_tail(&dlb->list, &dl->fragments); |
| 408 | return 0; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 409 | } |
| 410 | |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 411 | /** |
| 412 | * vsp1_dl_list_add_chain - Add a display list to a chain |
| 413 | * @head: The head display list |
| 414 | * @dl: The new display list |
| 415 | * |
| 416 | * Add a display list to an existing display list chain. The chained lists |
| 417 | * will be automatically processed by the hardware without intervention from |
| 418 | * the CPU. A display list end interrupt will only complete after the last |
| 419 | * display list in the chain has completed processing. |
| 420 | * |
| 421 | * Adding a display list to a chain passes ownership of the display list to |
| 422 | * the head display list item. The chain is released when the head dl item is |
| 423 | * put back with __vsp1_dl_list_put(). |
| 424 | * |
| 425 | * Chained display lists are only usable in header mode. Attempts to add a |
| 426 | * display list to a chain in header-less mode will return an error. |
| 427 | */ |
| 428 | int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, |
| 429 | struct vsp1_dl_list *dl) |
| 430 | { |
| 431 | /* Chained lists are only available in header mode. */ |
| 432 | if (head->dlm->mode != VSP1_DL_MODE_HEADER) |
| 433 | return -EINVAL; |
| 434 | |
| 435 | head->has_chain = true; |
| 436 | list_add_tail(&dl->chain, &head->chain); |
| 437 | return 0; |
| 438 | } |
| 439 | |
| 440 | static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last) |
| 441 | { |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 442 | struct vsp1_dl_manager *dlm = dl->dlm; |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 443 | struct vsp1_dl_header_list *hdr = dl->header->lists; |
| 444 | struct vsp1_dl_body *dlb; |
| 445 | unsigned int num_lists = 0; |
| 446 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame] | 447 | /* |
| 448 | * Fill the header with the display list bodies addresses and sizes. The |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 449 | * address of the first body has already been filled when the display |
| 450 | * list was allocated. |
| 451 | */ |
| 452 | |
| 453 | hdr->num_bytes = dl->body0.num_entries |
| 454 | * sizeof(*dl->header->lists); |
| 455 | |
| 456 | list_for_each_entry(dlb, &dl->fragments, list) { |
| 457 | num_lists++; |
| 458 | hdr++; |
| 459 | |
| 460 | hdr->addr = dlb->dma; |
| 461 | hdr->num_bytes = dlb->num_entries |
| 462 | * sizeof(*dl->header->lists); |
| 463 | } |
| 464 | |
| 465 | dl->header->num_lists = num_lists; |
| 466 | |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 467 | if (!list_empty(&dl->chain) && !is_last) { |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 468 | /* |
| 469 | * If this display list's chain is not empty, we are on a list, |
| 470 | * and the next item is the display list that we must queue for |
| 471 | * automatic processing by the hardware. |
| 472 | */ |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 473 | struct vsp1_dl_list *next = list_next_entry(dl, chain); |
| 474 | |
| 475 | dl->header->next_header = next->dma; |
| 476 | dl->header->flags = VSP1_DLH_AUTO_START; |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 477 | } else if (!dlm->singleshot) { |
| 478 | /* |
| 479 | * if the display list manager works in continuous mode, the VSP |
| 480 | * should loop over the display list continuously until |
| 481 | * instructed to do otherwise. |
| 482 | */ |
| 483 | dl->header->next_header = dl->dma; |
| 484 | dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START; |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 485 | } else { |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 486 | /* |
| 487 | * Otherwise, in mem-to-mem mode, we work in single-shot mode |
| 488 | * and the next display list must not be started automatically. |
| 489 | */ |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 490 | dl->header->flags = VSP1_DLH_INT_ENABLE; |
| 491 | } |
| 492 | } |
| 493 | |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 494 | static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm) |
| 495 | { |
| 496 | struct vsp1_device *vsp1 = dlm->vsp1; |
| 497 | |
| 498 | if (!dlm->queued) |
| 499 | return false; |
| 500 | |
| 501 | /* |
| 502 | * Check whether the VSP1 has taken the update. In headerless mode the |
| 503 | * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE |
| 504 | * register, and in header mode by clearing the UPDHDR bit in the CMD |
| 505 | * register. |
| 506 | */ |
| 507 | if (dlm->mode == VSP1_DL_MODE_HEADERLESS) |
| 508 | return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) |
| 509 | & VI6_DL_BODY_SIZE_UPD); |
| 510 | else |
| 511 | return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR)); |
| 512 | } |
| 513 | |
| 514 | static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl) |
| 515 | { |
| 516 | struct vsp1_dl_manager *dlm = dl->dlm; |
| 517 | struct vsp1_device *vsp1 = dlm->vsp1; |
| 518 | |
| 519 | if (dlm->mode == VSP1_DL_MODE_HEADERLESS) { |
| 520 | /* |
| 521 | * In headerless mode, program the hardware directly with the |
| 522 | * display list body address and size and set the UPD bit. The |
| 523 | * bit will be cleared by the hardware when the display list |
| 524 | * processing starts. |
| 525 | */ |
| 526 | vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); |
| 527 | vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | |
| 528 | (dl->body0.num_entries * sizeof(*dl->header->lists))); |
| 529 | } else { |
| 530 | /* |
| 531 | * In header mode, program the display list header address. If |
| 532 | * the hardware is idle (single-shot mode or first frame in |
| 533 | * continuous mode) it will then be started independently. If |
| 534 | * the hardware is operating, the VI6_DL_HDR_REF_ADDR register |
| 535 | * will be updated with the display list address. |
| 536 | */ |
| 537 | vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma); |
| 538 | } |
| 539 | } |
| 540 | |
| 541 | static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl) |
| 542 | { |
| 543 | struct vsp1_dl_manager *dlm = dl->dlm; |
| 544 | |
| 545 | /* |
| 546 | * If a previous display list has been queued to the hardware but not |
| 547 | * processed yet, the VSP can start processing it at any time. In that |
| 548 | * case we can't replace the queued list by the new one, as we could |
| 549 | * race with the hardware. We thus mark the update as pending, it will |
| 550 | * be queued up to the hardware by the frame end interrupt handler. |
| 551 | */ |
| 552 | if (vsp1_dl_list_hw_update_pending(dlm)) { |
| 553 | __vsp1_dl_list_put(dlm->pending); |
| 554 | dlm->pending = dl; |
| 555 | return; |
| 556 | } |
| 557 | |
| 558 | /* |
| 559 | * Pass the new display list to the hardware and mark it as queued. It |
| 560 | * will become active when the hardware starts processing it. |
| 561 | */ |
| 562 | vsp1_dl_list_hw_enqueue(dl); |
| 563 | |
| 564 | __vsp1_dl_list_put(dlm->queued); |
| 565 | dlm->queued = dl; |
| 566 | } |
| 567 | |
| 568 | static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl) |
| 569 | { |
| 570 | struct vsp1_dl_manager *dlm = dl->dlm; |
| 571 | |
| 572 | /* |
| 573 | * When working in single-shot mode, the caller guarantees that the |
| 574 | * hardware is idle at this point. Just commit the head display list |
| 575 | * to hardware. Chained lists will be started automatically. |
| 576 | */ |
| 577 | vsp1_dl_list_hw_enqueue(dl); |
| 578 | |
| 579 | dlm->active = dl; |
| 580 | } |
| 581 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 582 | void vsp1_dl_list_commit(struct vsp1_dl_list *dl) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 583 | { |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 584 | struct vsp1_dl_manager *dlm = dl->dlm; |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 585 | struct vsp1_dl_list *dl_child; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 586 | unsigned long flags; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 587 | |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 588 | if (dlm->mode == VSP1_DL_MODE_HEADER) { |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 589 | /* Fill the header for the head and chained display lists. */ |
| 590 | vsp1_dl_list_fill_header(dl, list_empty(&dl->chain)); |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 591 | |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 592 | list_for_each_entry(dl_child, &dl->chain, chain) { |
| 593 | bool last = list_is_last(&dl_child->chain, &dl->chain); |
| 594 | |
| 595 | vsp1_dl_list_fill_header(dl_child, last); |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 596 | } |
Laurent Pinchart | 1dd48e4 | 2017-05-30 03:40:35 +0300 | [diff] [blame] | 597 | } |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 598 | |
Laurent Pinchart | 1dd48e4 | 2017-05-30 03:40:35 +0300 | [diff] [blame] | 599 | spin_lock_irqsave(&dlm->lock, flags); |
| 600 | |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 601 | if (dlm->singleshot) |
| 602 | vsp1_dl_list_commit_singleshot(dl); |
| 603 | else |
| 604 | vsp1_dl_list_commit_continuous(dl); |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 605 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 606 | spin_unlock_irqrestore(&dlm->lock, flags); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 607 | } |
| 608 | |
| 609 | /* ----------------------------------------------------------------------------- |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 610 | * Display List Manager |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 611 | */ |
| 612 | |
Kieran Bingham | 348a003 | 2017-03-04 02:01:18 +0000 | [diff] [blame] | 613 | /** |
| 614 | * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt |
| 615 | * @dlm: the display list manager |
| 616 | * |
| 617 | * Return true if the previous display list has completed at frame end, or false |
| 618 | * if it has been delayed by one frame because the display list commit raced |
| 619 | * with the frame end interrupt. The function always returns true in header mode |
| 620 | * as display list processing is then not continuous and races never occur. |
| 621 | */ |
| 622 | bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 623 | { |
Kieran Bingham | 348a003 | 2017-03-04 02:01:18 +0000 | [diff] [blame] | 624 | bool completed = false; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 625 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 626 | spin_lock(&dlm->lock); |
| 627 | |
Laurent Pinchart | 9dbed95 | 2017-02-26 10:29:50 -0300 | [diff] [blame] | 628 | /* |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 629 | * The mem-to-mem pipelines work in single-shot mode. No new display |
| 630 | * list can be queued, we don't have to do anything. |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 631 | */ |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 632 | if (dlm->singleshot) { |
| 633 | __vsp1_dl_list_put(dlm->active); |
| 634 | dlm->active = NULL; |
Kieran Bingham | 348a003 | 2017-03-04 02:01:18 +0000 | [diff] [blame] | 635 | completed = true; |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 636 | goto done; |
Kieran Bingham | 348a003 | 2017-03-04 02:01:18 +0000 | [diff] [blame] | 637 | } |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 638 | |
Laurent Pinchart | 9dbed95 | 2017-02-26 10:29:50 -0300 | [diff] [blame] | 639 | /* |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 640 | * If the commit operation raced with the interrupt and occurred after |
| 641 | * the frame end event but before interrupt processing, the hardware |
| 642 | * hasn't taken the update into account yet. We have to skip one frame |
| 643 | * and retry. |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 644 | */ |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 645 | if (vsp1_dl_list_hw_update_pending(dlm)) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 646 | goto done; |
| 647 | |
Laurent Pinchart | 9dbed95 | 2017-02-26 10:29:50 -0300 | [diff] [blame] | 648 | /* |
| 649 | * The device starts processing the queued display list right after the |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 650 | * frame end interrupt. The display list thus becomes active. |
| 651 | */ |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 652 | if (dlm->queued) { |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 653 | __vsp1_dl_list_put(dlm->active); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 654 | dlm->active = dlm->queued; |
| 655 | dlm->queued = NULL; |
Kieran Bingham | 348a003 | 2017-03-04 02:01:18 +0000 | [diff] [blame] | 656 | completed = true; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 657 | } |
| 658 | |
Laurent Pinchart | 9dbed95 | 2017-02-26 10:29:50 -0300 | [diff] [blame] | 659 | /* |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 660 | * Now that the VSP has started processing the queued display list, we |
| 661 | * can queue the pending display list to the hardware if one has been |
| 662 | * prepared. |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 663 | */ |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 664 | if (dlm->pending) { |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 665 | vsp1_dl_list_hw_enqueue(dlm->pending); |
| 666 | dlm->queued = dlm->pending; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 667 | dlm->pending = NULL; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 668 | } |
| 669 | |
| 670 | done: |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 671 | spin_unlock(&dlm->lock); |
Kieran Bingham | 348a003 | 2017-03-04 02:01:18 +0000 | [diff] [blame] | 672 | |
| 673 | return completed; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 674 | } |
| 675 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 676 | /* Hardware Setup */ |
| 677 | void vsp1_dlm_setup(struct vsp1_device *vsp1) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 678 | { |
Laurent Pinchart | 351bbf9 | 2015-11-01 15:18:56 -0200 | [diff] [blame] | 679 | u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT) |
| 680 | | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0 |
| 681 | | VI6_DL_CTRL_DLE; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 682 | |
Laurent Pinchart | 9dbed95 | 2017-02-26 10:29:50 -0300 | [diff] [blame] | 683 | /* |
| 684 | * The DRM pipeline operates with display lists in Continuous Frame |
Laurent Pinchart | 351bbf9 | 2015-11-01 15:18:56 -0200 | [diff] [blame] | 685 | * Mode, all other pipelines use manual start. |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 686 | */ |
| 687 | if (vsp1->drm) |
Laurent Pinchart | 351bbf9 | 2015-11-01 15:18:56 -0200 | [diff] [blame] | 688 | ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 689 | |
| 690 | vsp1_write(vsp1, VI6_DL_CTRL, ctrl); |
| 691 | vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS); |
| 692 | } |
| 693 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 694 | void vsp1_dlm_reset(struct vsp1_dl_manager *dlm) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 695 | { |
Laurent Pinchart | d2c1b02 | 2016-03-03 09:26:47 -0300 | [diff] [blame] | 696 | unsigned long flags; |
| 697 | |
| 698 | spin_lock_irqsave(&dlm->lock, flags); |
| 699 | |
| 700 | __vsp1_dl_list_put(dlm->active); |
| 701 | __vsp1_dl_list_put(dlm->queued); |
| 702 | __vsp1_dl_list_put(dlm->pending); |
| 703 | |
| 704 | spin_unlock_irqrestore(&dlm->lock, flags); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 705 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 706 | dlm->active = NULL; |
| 707 | dlm->queued = NULL; |
| 708 | dlm->pending = NULL; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 709 | } |
| 710 | |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 711 | /* |
| 712 | * Free all fragments awaiting to be garbage-collected. |
| 713 | * |
| 714 | * This function must be called without the display list manager lock held. |
| 715 | */ |
| 716 | static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm) |
| 717 | { |
| 718 | unsigned long flags; |
| 719 | |
| 720 | spin_lock_irqsave(&dlm->lock, flags); |
| 721 | |
| 722 | while (!list_empty(&dlm->gc_fragments)) { |
| 723 | struct vsp1_dl_body *dlb; |
| 724 | |
| 725 | dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body, |
| 726 | list); |
| 727 | list_del(&dlb->list); |
| 728 | |
| 729 | spin_unlock_irqrestore(&dlm->lock, flags); |
| 730 | vsp1_dl_fragment_free(dlb); |
| 731 | spin_lock_irqsave(&dlm->lock, flags); |
| 732 | } |
| 733 | |
| 734 | spin_unlock_irqrestore(&dlm->lock, flags); |
| 735 | } |
| 736 | |
| 737 | static void vsp1_dlm_garbage_collect(struct work_struct *work) |
| 738 | { |
| 739 | struct vsp1_dl_manager *dlm = |
| 740 | container_of(work, struct vsp1_dl_manager, gc_work); |
| 741 | |
| 742 | vsp1_dlm_fragments_free(dlm); |
| 743 | } |
| 744 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 745 | struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1, |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 746 | unsigned int index, |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 747 | unsigned int prealloc) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 748 | { |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 749 | struct vsp1_dl_manager *dlm; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 750 | unsigned int i; |
| 751 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 752 | dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL); |
| 753 | if (!dlm) |
| 754 | return NULL; |
| 755 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 756 | dlm->index = index; |
| 757 | dlm->mode = index == 0 && !vsp1->info->uapi |
| 758 | ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER; |
Laurent Pinchart | eaf4bfa | 2017-05-29 13:41:31 +0300 | [diff] [blame] | 759 | dlm->singleshot = vsp1->info->uapi; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 760 | dlm->vsp1 = vsp1; |
| 761 | |
| 762 | spin_lock_init(&dlm->lock); |
| 763 | INIT_LIST_HEAD(&dlm->free); |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 764 | INIT_LIST_HEAD(&dlm->gc_fragments); |
| 765 | INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 766 | |
| 767 | for (i = 0; i < prealloc; ++i) { |
| 768 | struct vsp1_dl_list *dl; |
| 769 | |
| 770 | dl = vsp1_dl_list_alloc(dlm); |
| 771 | if (!dl) |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 772 | return NULL; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 773 | |
| 774 | list_add_tail(&dl->list, &dlm->free); |
| 775 | } |
| 776 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 777 | return dlm; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 778 | } |
| 779 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 780 | void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm) |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 781 | { |
| 782 | struct vsp1_dl_list *dl, *next; |
| 783 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 784 | if (!dlm) |
| 785 | return; |
| 786 | |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 787 | cancel_work_sync(&dlm->gc_work); |
| 788 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 789 | list_for_each_entry_safe(dl, next, &dlm->free, list) { |
| 790 | list_del(&dl->list); |
| 791 | vsp1_dl_list_free(dl); |
| 792 | } |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 793 | |
| 794 | vsp1_dlm_fragments_free(dlm); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 795 | } |