Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 1 | /* |
| 2 | * vsp1_dl.h -- R-Car VSP1 Display List |
| 3 | * |
| 4 | * Copyright (C) 2015 Renesas Corporation |
| 5 | * |
| 6 | * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2 of the License, or |
| 11 | * (at your option) any later version. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/device.h> |
| 15 | #include <linux/dma-mapping.h> |
| 16 | #include <linux/gfp.h> |
| 17 | #include <linux/slab.h> |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 18 | #include <linux/workqueue.h> |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 19 | |
| 20 | #include "vsp1.h" |
| 21 | #include "vsp1_dl.h" |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 22 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 23 | #define VSP1_DL_NUM_ENTRIES 256 |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 24 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 25 | #define VSP1_DLH_INT_ENABLE (1 << 1) |
| 26 | #define VSP1_DLH_AUTO_START (1 << 0) |
| 27 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 28 | struct vsp1_dl_header_list { |
| 29 | u32 num_bytes; |
| 30 | u32 addr; |
| 31 | } __attribute__((__packed__)); |
| 32 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 33 | struct vsp1_dl_header { |
| 34 | u32 num_lists; |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 35 | struct vsp1_dl_header_list lists[8]; |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 36 | u32 next_header; |
| 37 | u32 flags; |
| 38 | } __attribute__((__packed__)); |
| 39 | |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 40 | struct vsp1_dl_entry { |
| 41 | u32 addr; |
| 42 | u32 data; |
| 43 | } __attribute__((__packed__)); |
| 44 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 45 | /** |
| 46 | * struct vsp1_dl_body - Display list body |
| 47 | * @list: entry in the display list list of bodies |
| 48 | * @vsp1: the VSP1 device |
| 49 | * @entries: array of entries |
| 50 | * @dma: DMA address of the entries |
| 51 | * @size: size of the DMA memory in bytes |
| 52 | * @num_entries: number of stored entries |
| 53 | */ |
| 54 | struct vsp1_dl_body { |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 55 | struct list_head list; |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 56 | struct vsp1_device *vsp1; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 57 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 58 | struct vsp1_dl_entry *entries; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 59 | dma_addr_t dma; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 60 | size_t size; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 61 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 62 | unsigned int num_entries; |
| 63 | }; |
| 64 | |
| 65 | /** |
| 66 | * struct vsp1_dl_list - Display list |
| 67 | * @list: entry in the display list manager lists |
| 68 | * @dlm: the display list manager |
| 69 | * @header: display list header, NULL for headerless lists |
| 70 | * @dma: DMA address for the header |
| 71 | * @body0: first display list body |
| 72 | * @fragments: list of extra display list bodies |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 73 | * @chain: entry in the display list partition chain |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 74 | */ |
| 75 | struct vsp1_dl_list { |
| 76 | struct list_head list; |
| 77 | struct vsp1_dl_manager *dlm; |
| 78 | |
| 79 | struct vsp1_dl_header *header; |
| 80 | dma_addr_t dma; |
| 81 | |
| 82 | struct vsp1_dl_body body0; |
| 83 | struct list_head fragments; |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 84 | |
| 85 | bool has_chain; |
| 86 | struct list_head chain; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 87 | }; |
| 88 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 89 | enum vsp1_dl_mode { |
| 90 | VSP1_DL_MODE_HEADER, |
| 91 | VSP1_DL_MODE_HEADERLESS, |
| 92 | }; |
| 93 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 94 | /** |
| 95 | * struct vsp1_dl_manager - Display List manager |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 96 | * @index: index of the related WPF |
| 97 | * @mode: display list operation mode (header or headerless) |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 98 | * @vsp1: the VSP1 device |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 99 | * @lock: protects the free, active, queued, pending and gc_fragments lists |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 100 | * @free: array of all free display lists |
| 101 | * @active: list currently being processed (loaded) by hardware |
| 102 | * @queued: list queued to the hardware (written to the DL registers) |
| 103 | * @pending: list waiting to be queued to the hardware |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 104 | * @gc_work: fragments garbage collector work struct |
| 105 | * @gc_fragments: array of display list fragments waiting to be freed |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 106 | */ |
| 107 | struct vsp1_dl_manager { |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 108 | unsigned int index; |
| 109 | enum vsp1_dl_mode mode; |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 110 | struct vsp1_device *vsp1; |
| 111 | |
| 112 | spinlock_t lock; |
| 113 | struct list_head free; |
| 114 | struct vsp1_dl_list *active; |
| 115 | struct vsp1_dl_list *queued; |
| 116 | struct vsp1_dl_list *pending; |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 117 | |
| 118 | struct work_struct gc_work; |
| 119 | struct list_head gc_fragments; |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 120 | }; |
| 121 | |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 122 | /* ----------------------------------------------------------------------------- |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 123 | * Display List Body Management |
| 124 | */ |
| 125 | |
| 126 | /* |
| 127 | * Initialize a display list body object and allocate DMA memory for the body |
| 128 | * data. The display list body object is expected to have been initialized to |
| 129 | * 0 when allocated. |
| 130 | */ |
| 131 | static int vsp1_dl_body_init(struct vsp1_device *vsp1, |
| 132 | struct vsp1_dl_body *dlb, unsigned int num_entries, |
| 133 | size_t extra_size) |
| 134 | { |
| 135 | size_t size = num_entries * sizeof(*dlb->entries) + extra_size; |
| 136 | |
| 137 | dlb->vsp1 = vsp1; |
| 138 | dlb->size = size; |
| 139 | |
| 140 | dlb->entries = dma_alloc_wc(vsp1->dev, dlb->size, &dlb->dma, |
| 141 | GFP_KERNEL); |
| 142 | if (!dlb->entries) |
| 143 | return -ENOMEM; |
| 144 | |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * Cleanup a display list body and free allocated DMA memory allocated. |
| 150 | */ |
| 151 | static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb) |
| 152 | { |
| 153 | dma_free_wc(dlb->vsp1->dev, dlb->size, dlb->entries, dlb->dma); |
| 154 | } |
| 155 | |
| 156 | /** |
| 157 | * vsp1_dl_fragment_alloc - Allocate a display list fragment |
| 158 | * @vsp1: The VSP1 device |
| 159 | * @num_entries: The maximum number of entries that the fragment can contain |
| 160 | * |
| 161 | * Allocate a display list fragment with enough memory to contain the requested |
| 162 | * number of entries. |
| 163 | * |
| 164 | * Return a pointer to a fragment on success or NULL if memory can't be |
| 165 | * allocated. |
| 166 | */ |
| 167 | struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1, |
| 168 | unsigned int num_entries) |
| 169 | { |
| 170 | struct vsp1_dl_body *dlb; |
| 171 | int ret; |
| 172 | |
| 173 | dlb = kzalloc(sizeof(*dlb), GFP_KERNEL); |
| 174 | if (!dlb) |
| 175 | return NULL; |
| 176 | |
| 177 | ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0); |
| 178 | if (ret < 0) { |
| 179 | kfree(dlb); |
| 180 | return NULL; |
| 181 | } |
| 182 | |
| 183 | return dlb; |
| 184 | } |
| 185 | |
| 186 | /** |
| 187 | * vsp1_dl_fragment_free - Free a display list fragment |
| 188 | * @dlb: The fragment |
| 189 | * |
| 190 | * Free the given display list fragment and the associated DMA memory. |
| 191 | * |
| 192 | * Fragments must only be freed explicitly if they are not added to a display |
| 193 | * list, as the display list will take ownership of them and free them |
| 194 | * otherwise. Manual free typically happens at cleanup time for fragments that |
| 195 | * have been allocated but not used. |
| 196 | * |
| 197 | * Passing a NULL pointer to this function is safe, in that case no operation |
| 198 | * will be performed. |
| 199 | */ |
| 200 | void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb) |
| 201 | { |
| 202 | if (!dlb) |
| 203 | return; |
| 204 | |
| 205 | vsp1_dl_body_cleanup(dlb); |
| 206 | kfree(dlb); |
| 207 | } |
| 208 | |
| 209 | /** |
| 210 | * vsp1_dl_fragment_write - Write a register to a display list fragment |
| 211 | * @dlb: The fragment |
| 212 | * @reg: The register address |
| 213 | * @data: The register value |
| 214 | * |
| 215 | * Write the given register and value to the display list fragment. The maximum |
| 216 | * number of entries that can be written in a fragment is specified when the |
| 217 | * fragment is allocated by vsp1_dl_fragment_alloc(). |
| 218 | */ |
| 219 | void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data) |
| 220 | { |
| 221 | dlb->entries[dlb->num_entries].addr = reg; |
| 222 | dlb->entries[dlb->num_entries].data = data; |
| 223 | dlb->num_entries++; |
| 224 | } |
| 225 | |
| 226 | /* ----------------------------------------------------------------------------- |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 227 | * Display List Transaction Management |
| 228 | */ |
| 229 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 230 | static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 231 | { |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 232 | struct vsp1_dl_list *dl; |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 233 | size_t header_size; |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 234 | int ret; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 235 | |
| 236 | dl = kzalloc(sizeof(*dl), GFP_KERNEL); |
| 237 | if (!dl) |
| 238 | return NULL; |
| 239 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 240 | INIT_LIST_HEAD(&dl->fragments); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 241 | dl->dlm = dlm; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 242 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 243 | /* Initialize the display list body and allocate DMA memory for the body |
| 244 | * and the optional header. Both are allocated together to avoid memory |
| 245 | * fragmentation, with the header located right after the body in |
| 246 | * memory. |
| 247 | */ |
| 248 | header_size = dlm->mode == VSP1_DL_MODE_HEADER |
| 249 | ? ALIGN(sizeof(struct vsp1_dl_header), 8) |
| 250 | : 0; |
| 251 | |
| 252 | ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES, |
| 253 | header_size); |
| 254 | if (ret < 0) { |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 255 | kfree(dl); |
| 256 | return NULL; |
| 257 | } |
| 258 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 259 | if (dlm->mode == VSP1_DL_MODE_HEADER) { |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 260 | size_t header_offset = VSP1_DL_NUM_ENTRIES |
| 261 | * sizeof(*dl->body0.entries); |
| 262 | |
| 263 | dl->header = ((void *)dl->body0.entries) + header_offset; |
| 264 | dl->dma = dl->body0.dma + header_offset; |
| 265 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 266 | memset(dl->header, 0, sizeof(*dl->header)); |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 267 | dl->header->lists[0].addr = dl->body0.dma; |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 268 | } |
| 269 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 270 | return dl; |
| 271 | } |
| 272 | |
| 273 | static void vsp1_dl_list_free(struct vsp1_dl_list *dl) |
| 274 | { |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 275 | vsp1_dl_body_cleanup(&dl->body0); |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 276 | list_splice_init(&dl->fragments, &dl->dlm->gc_fragments); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 277 | kfree(dl); |
| 278 | } |
| 279 | |
| 280 | /** |
| 281 | * vsp1_dl_list_get - Get a free display list |
| 282 | * @dlm: The display list manager |
| 283 | * |
| 284 | * Get a display list from the pool of free lists and return it. |
| 285 | * |
| 286 | * This function must be called without the display list manager lock held. |
| 287 | */ |
| 288 | struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm) |
| 289 | { |
| 290 | struct vsp1_dl_list *dl = NULL; |
| 291 | unsigned long flags; |
| 292 | |
| 293 | spin_lock_irqsave(&dlm->lock, flags); |
| 294 | |
| 295 | if (!list_empty(&dlm->free)) { |
| 296 | dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list); |
| 297 | list_del(&dl->list); |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 298 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame^] | 299 | /* |
| 300 | * The display list chain must be initialised to ensure every |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 301 | * display list can assert list_empty() if it is not in a chain. |
| 302 | */ |
| 303 | INIT_LIST_HEAD(&dl->chain); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | spin_unlock_irqrestore(&dlm->lock, flags); |
| 307 | |
| 308 | return dl; |
| 309 | } |
| 310 | |
Laurent Pinchart | d2c1b02 | 2016-03-03 09:26:47 -0300 | [diff] [blame] | 311 | /* This function must be called with the display list manager lock held.*/ |
| 312 | static void __vsp1_dl_list_put(struct vsp1_dl_list *dl) |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 313 | { |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 314 | struct vsp1_dl_list *dl_child; |
| 315 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 316 | if (!dl) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 317 | return; |
| 318 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame^] | 319 | /* |
| 320 | * Release any linked display-lists which were chained for a single |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 321 | * hardware operation. |
| 322 | */ |
| 323 | if (dl->has_chain) { |
| 324 | list_for_each_entry(dl_child, &dl->chain, chain) |
| 325 | __vsp1_dl_list_put(dl_child); |
| 326 | } |
| 327 | |
| 328 | dl->has_chain = false; |
| 329 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame^] | 330 | /* |
| 331 | * We can't free fragments here as DMA memory can only be freed in |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 332 | * interruptible context. Move all fragments to the display list |
| 333 | * manager's list of fragments to be freed, they will be |
| 334 | * garbage-collected by the work queue. |
| 335 | */ |
| 336 | if (!list_empty(&dl->fragments)) { |
| 337 | list_splice_init(&dl->fragments, &dl->dlm->gc_fragments); |
| 338 | schedule_work(&dl->dlm->gc_work); |
| 339 | } |
| 340 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 341 | dl->body0.num_entries = 0; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 342 | |
| 343 | list_add_tail(&dl->list, &dl->dlm->free); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 344 | } |
| 345 | |
Laurent Pinchart | d2c1b02 | 2016-03-03 09:26:47 -0300 | [diff] [blame] | 346 | /** |
| 347 | * vsp1_dl_list_put - Release a display list |
| 348 | * @dl: The display list |
| 349 | * |
| 350 | * Release the display list and return it to the pool of free lists. |
| 351 | * |
| 352 | * Passing a NULL pointer to this function is safe, in that case no operation |
| 353 | * will be performed. |
| 354 | */ |
| 355 | void vsp1_dl_list_put(struct vsp1_dl_list *dl) |
| 356 | { |
| 357 | unsigned long flags; |
| 358 | |
| 359 | if (!dl) |
| 360 | return; |
| 361 | |
| 362 | spin_lock_irqsave(&dl->dlm->lock, flags); |
| 363 | __vsp1_dl_list_put(dl); |
| 364 | spin_unlock_irqrestore(&dl->dlm->lock, flags); |
| 365 | } |
| 366 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 367 | /** |
| 368 | * vsp1_dl_list_write - Write a register to the display list |
| 369 | * @dl: The display list |
| 370 | * @reg: The register address |
| 371 | * @data: The register value |
| 372 | * |
| 373 | * Write the given register and value to the display list. Up to 256 registers |
| 374 | * can be written per display list. |
| 375 | */ |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 376 | void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 377 | { |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 378 | vsp1_dl_fragment_write(&dl->body0, reg, data); |
| 379 | } |
| 380 | |
| 381 | /** |
| 382 | * vsp1_dl_list_add_fragment - Add a fragment to the display list |
| 383 | * @dl: The display list |
| 384 | * @dlb: The fragment |
| 385 | * |
| 386 | * Add a display list body as a fragment to a display list. Registers contained |
| 387 | * in fragments are processed after registers contained in the main display |
| 388 | * list, in the order in which fragments are added. |
| 389 | * |
| 390 | * Adding a fragment to a display list passes ownership of the fragment to the |
| 391 | * list. The caller must not touch the fragment after this call, and must not |
| 392 | * free it explicitly with vsp1_dl_fragment_free(). |
| 393 | * |
| 394 | * Fragments are only usable for display lists in header mode. Attempt to |
| 395 | * add a fragment to a header-less display list will return an error. |
| 396 | */ |
| 397 | int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl, |
| 398 | struct vsp1_dl_body *dlb) |
| 399 | { |
| 400 | /* Multi-body lists are only available in header mode. */ |
| 401 | if (dl->dlm->mode != VSP1_DL_MODE_HEADER) |
| 402 | return -EINVAL; |
| 403 | |
| 404 | list_add_tail(&dlb->list, &dl->fragments); |
| 405 | return 0; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 406 | } |
| 407 | |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 408 | /** |
| 409 | * vsp1_dl_list_add_chain - Add a display list to a chain |
| 410 | * @head: The head display list |
| 411 | * @dl: The new display list |
| 412 | * |
| 413 | * Add a display list to an existing display list chain. The chained lists |
| 414 | * will be automatically processed by the hardware without intervention from |
| 415 | * the CPU. A display list end interrupt will only complete after the last |
| 416 | * display list in the chain has completed processing. |
| 417 | * |
| 418 | * Adding a display list to a chain passes ownership of the display list to |
| 419 | * the head display list item. The chain is released when the head dl item is |
| 420 | * put back with __vsp1_dl_list_put(). |
| 421 | * |
| 422 | * Chained display lists are only usable in header mode. Attempts to add a |
| 423 | * display list to a chain in header-less mode will return an error. |
| 424 | */ |
| 425 | int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, |
| 426 | struct vsp1_dl_list *dl) |
| 427 | { |
| 428 | /* Chained lists are only available in header mode. */ |
| 429 | if (head->dlm->mode != VSP1_DL_MODE_HEADER) |
| 430 | return -EINVAL; |
| 431 | |
| 432 | head->has_chain = true; |
| 433 | list_add_tail(&dl->chain, &head->chain); |
| 434 | return 0; |
| 435 | } |
| 436 | |
| 437 | static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last) |
| 438 | { |
| 439 | struct vsp1_dl_header_list *hdr = dl->header->lists; |
| 440 | struct vsp1_dl_body *dlb; |
| 441 | unsigned int num_lists = 0; |
| 442 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame^] | 443 | /* |
| 444 | * Fill the header with the display list bodies addresses and sizes. The |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 445 | * address of the first body has already been filled when the display |
| 446 | * list was allocated. |
| 447 | */ |
| 448 | |
| 449 | hdr->num_bytes = dl->body0.num_entries |
| 450 | * sizeof(*dl->header->lists); |
| 451 | |
| 452 | list_for_each_entry(dlb, &dl->fragments, list) { |
| 453 | num_lists++; |
| 454 | hdr++; |
| 455 | |
| 456 | hdr->addr = dlb->dma; |
| 457 | hdr->num_bytes = dlb->num_entries |
| 458 | * sizeof(*dl->header->lists); |
| 459 | } |
| 460 | |
| 461 | dl->header->num_lists = num_lists; |
| 462 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame^] | 463 | /* |
| 464 | * If this display list's chain is not empty, we are on a list, where |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 465 | * the next item in the list is the display list entity which should be |
| 466 | * automatically queued by the hardware. |
| 467 | */ |
| 468 | if (!list_empty(&dl->chain) && !is_last) { |
| 469 | struct vsp1_dl_list *next = list_next_entry(dl, chain); |
| 470 | |
| 471 | dl->header->next_header = next->dma; |
| 472 | dl->header->flags = VSP1_DLH_AUTO_START; |
| 473 | } else { |
| 474 | dl->header->flags = VSP1_DLH_INT_ENABLE; |
| 475 | } |
| 476 | } |
| 477 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 478 | void vsp1_dl_list_commit(struct vsp1_dl_list *dl) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 479 | { |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 480 | struct vsp1_dl_manager *dlm = dl->dlm; |
| 481 | struct vsp1_device *vsp1 = dlm->vsp1; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 482 | unsigned long flags; |
| 483 | bool update; |
| 484 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 485 | spin_lock_irqsave(&dlm->lock, flags); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 486 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 487 | if (dl->dlm->mode == VSP1_DL_MODE_HEADER) { |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 488 | struct vsp1_dl_list *dl_child; |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 489 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame^] | 490 | /* |
| 491 | * In header mode the caller guarantees that the hardware is |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 492 | * idle at this point. |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 493 | */ |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 494 | |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 495 | /* Fill the header for the head and chained display lists. */ |
| 496 | vsp1_dl_list_fill_header(dl, list_empty(&dl->chain)); |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 497 | |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 498 | list_for_each_entry(dl_child, &dl->chain, chain) { |
| 499 | bool last = list_is_last(&dl_child->chain, &dl->chain); |
| 500 | |
| 501 | vsp1_dl_list_fill_header(dl_child, last); |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 502 | } |
| 503 | |
Mauro Carvalho Chehab | b618739 | 2016-09-19 15:18:01 -0300 | [diff] [blame^] | 504 | /* |
| 505 | * Commit the head display list to hardware. Chained headers |
Kieran Bingham | 76e4889 | 2016-07-12 13:49:46 -0300 | [diff] [blame] | 506 | * will auto-start. |
| 507 | */ |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 508 | vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma); |
| 509 | |
| 510 | dlm->active = dl; |
| 511 | goto done; |
| 512 | } |
| 513 | |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 514 | /* Once the UPD bit has been set the hardware can start processing the |
| 515 | * display list at any time and we can't touch the address and size |
| 516 | * registers. In that case mark the update as pending, it will be |
| 517 | * queued up to the hardware by the frame end interrupt handler. |
| 518 | */ |
| 519 | update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD); |
| 520 | if (update) { |
Laurent Pinchart | d2c1b02 | 2016-03-03 09:26:47 -0300 | [diff] [blame] | 521 | __vsp1_dl_list_put(dlm->pending); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 522 | dlm->pending = dl; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 523 | goto done; |
| 524 | } |
| 525 | |
| 526 | /* Program the hardware with the display list body address and size. |
| 527 | * The UPD bit will be cleared by the device when the display list is |
| 528 | * processed. |
| 529 | */ |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 530 | vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 531 | vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 532 | (dl->body0.num_entries * sizeof(*dl->header->lists))); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 533 | |
Laurent Pinchart | d2c1b02 | 2016-03-03 09:26:47 -0300 | [diff] [blame] | 534 | __vsp1_dl_list_put(dlm->queued); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 535 | dlm->queued = dl; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 536 | |
| 537 | done: |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 538 | spin_unlock_irqrestore(&dlm->lock, flags); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 539 | } |
| 540 | |
| 541 | /* ----------------------------------------------------------------------------- |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 542 | * Display List Manager |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 543 | */ |
| 544 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 545 | /* Interrupt Handling */ |
| 546 | void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 547 | { |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 548 | spin_lock(&dlm->lock); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 549 | |
| 550 | /* The display start interrupt signals the end of the display list |
| 551 | * processing by the device. The active display list, if any, won't be |
| 552 | * accessed anymore and can be reused. |
| 553 | */ |
Laurent Pinchart | d2c1b02 | 2016-03-03 09:26:47 -0300 | [diff] [blame] | 554 | __vsp1_dl_list_put(dlm->active); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 555 | dlm->active = NULL; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 556 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 557 | spin_unlock(&dlm->lock); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 558 | } |
| 559 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 560 | void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 561 | { |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 562 | struct vsp1_device *vsp1 = dlm->vsp1; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 563 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 564 | spin_lock(&dlm->lock); |
| 565 | |
Laurent Pinchart | d2c1b02 | 2016-03-03 09:26:47 -0300 | [diff] [blame] | 566 | __vsp1_dl_list_put(dlm->active); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 567 | dlm->active = NULL; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 568 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 569 | /* Header mode is used for mem-to-mem pipelines only. We don't need to |
| 570 | * perform any operation as there can't be any new display list queued |
| 571 | * in that case. |
| 572 | */ |
| 573 | if (dlm->mode == VSP1_DL_MODE_HEADER) |
| 574 | goto done; |
| 575 | |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 576 | /* The UPD bit set indicates that the commit operation raced with the |
| 577 | * interrupt and occurred after the frame end event and UPD clear but |
| 578 | * before interrupt processing. The hardware hasn't taken the update |
| 579 | * into account yet, we'll thus skip one frame and retry. |
| 580 | */ |
| 581 | if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD) |
| 582 | goto done; |
| 583 | |
| 584 | /* The device starts processing the queued display list right after the |
| 585 | * frame end interrupt. The display list thus becomes active. |
| 586 | */ |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 587 | if (dlm->queued) { |
| 588 | dlm->active = dlm->queued; |
| 589 | dlm->queued = NULL; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 590 | } |
| 591 | |
| 592 | /* Now that the UPD bit has been cleared we can queue the next display |
| 593 | * list to the hardware if one has been prepared. |
| 594 | */ |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 595 | if (dlm->pending) { |
| 596 | struct vsp1_dl_list *dl = dlm->pending; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 597 | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 598 | vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 599 | vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | |
Laurent Pinchart | f81e83c | 2016-03-03 13:36:34 -0300 | [diff] [blame] | 600 | (dl->body0.num_entries * |
| 601 | sizeof(*dl->header->lists))); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 602 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 603 | dlm->queued = dl; |
| 604 | dlm->pending = NULL; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 605 | } |
| 606 | |
| 607 | done: |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 608 | spin_unlock(&dlm->lock); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 609 | } |
| 610 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 611 | /* Hardware Setup */ |
| 612 | void vsp1_dlm_setup(struct vsp1_device *vsp1) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 613 | { |
Laurent Pinchart | 351bbf9 | 2015-11-01 15:18:56 -0200 | [diff] [blame] | 614 | u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT) |
| 615 | | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0 |
| 616 | | VI6_DL_CTRL_DLE; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 617 | |
Laurent Pinchart | 351bbf9 | 2015-11-01 15:18:56 -0200 | [diff] [blame] | 618 | /* The DRM pipeline operates with display lists in Continuous Frame |
| 619 | * Mode, all other pipelines use manual start. |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 620 | */ |
| 621 | if (vsp1->drm) |
Laurent Pinchart | 351bbf9 | 2015-11-01 15:18:56 -0200 | [diff] [blame] | 622 | ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 623 | |
| 624 | vsp1_write(vsp1, VI6_DL_CTRL, ctrl); |
| 625 | vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS); |
| 626 | } |
| 627 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 628 | void vsp1_dlm_reset(struct vsp1_dl_manager *dlm) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 629 | { |
Laurent Pinchart | d2c1b02 | 2016-03-03 09:26:47 -0300 | [diff] [blame] | 630 | unsigned long flags; |
| 631 | |
| 632 | spin_lock_irqsave(&dlm->lock, flags); |
| 633 | |
| 634 | __vsp1_dl_list_put(dlm->active); |
| 635 | __vsp1_dl_list_put(dlm->queued); |
| 636 | __vsp1_dl_list_put(dlm->pending); |
| 637 | |
| 638 | spin_unlock_irqrestore(&dlm->lock, flags); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 639 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 640 | dlm->active = NULL; |
| 641 | dlm->queued = NULL; |
| 642 | dlm->pending = NULL; |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 643 | } |
| 644 | |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 645 | /* |
| 646 | * Free all fragments awaiting to be garbage-collected. |
| 647 | * |
| 648 | * This function must be called without the display list manager lock held. |
| 649 | */ |
| 650 | static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm) |
| 651 | { |
| 652 | unsigned long flags; |
| 653 | |
| 654 | spin_lock_irqsave(&dlm->lock, flags); |
| 655 | |
| 656 | while (!list_empty(&dlm->gc_fragments)) { |
| 657 | struct vsp1_dl_body *dlb; |
| 658 | |
| 659 | dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body, |
| 660 | list); |
| 661 | list_del(&dlb->list); |
| 662 | |
| 663 | spin_unlock_irqrestore(&dlm->lock, flags); |
| 664 | vsp1_dl_fragment_free(dlb); |
| 665 | spin_lock_irqsave(&dlm->lock, flags); |
| 666 | } |
| 667 | |
| 668 | spin_unlock_irqrestore(&dlm->lock, flags); |
| 669 | } |
| 670 | |
| 671 | static void vsp1_dlm_garbage_collect(struct work_struct *work) |
| 672 | { |
| 673 | struct vsp1_dl_manager *dlm = |
| 674 | container_of(work, struct vsp1_dl_manager, gc_work); |
| 675 | |
| 676 | vsp1_dlm_fragments_free(dlm); |
| 677 | } |
| 678 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 679 | struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1, |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 680 | unsigned int index, |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 681 | unsigned int prealloc) |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 682 | { |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 683 | struct vsp1_dl_manager *dlm; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 684 | unsigned int i; |
| 685 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 686 | dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL); |
| 687 | if (!dlm) |
| 688 | return NULL; |
| 689 | |
Laurent Pinchart | 1216198 | 2015-11-14 22:48:27 -0200 | [diff] [blame] | 690 | dlm->index = index; |
| 691 | dlm->mode = index == 0 && !vsp1->info->uapi |
| 692 | ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 693 | dlm->vsp1 = vsp1; |
| 694 | |
| 695 | spin_lock_init(&dlm->lock); |
| 696 | INIT_LIST_HEAD(&dlm->free); |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 697 | INIT_LIST_HEAD(&dlm->gc_fragments); |
| 698 | INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect); |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 699 | |
| 700 | for (i = 0; i < prealloc; ++i) { |
| 701 | struct vsp1_dl_list *dl; |
| 702 | |
| 703 | dl = vsp1_dl_list_alloc(dlm); |
| 704 | if (!dl) |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 705 | return NULL; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 706 | |
| 707 | list_add_tail(&dl->list, &dlm->free); |
| 708 | } |
| 709 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 710 | return dlm; |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 711 | } |
| 712 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 713 | void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm) |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 714 | { |
| 715 | struct vsp1_dl_list *dl, *next; |
| 716 | |
Laurent Pinchart | ef9621b | 2015-11-14 22:27:52 -0200 | [diff] [blame] | 717 | if (!dlm) |
| 718 | return; |
| 719 | |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 720 | cancel_work_sync(&dlm->gc_work); |
| 721 | |
Laurent Pinchart | c2dd2513 | 2015-11-08 20:06:57 -0200 | [diff] [blame] | 722 | list_for_each_entry_safe(dl, next, &dlm->free, list) { |
| 723 | list_del(&dl->list); |
| 724 | vsp1_dl_list_free(dl); |
| 725 | } |
Laurent Pinchart | 9489a8f | 2016-05-13 19:17:02 -0300 | [diff] [blame] | 726 | |
| 727 | vsp1_dlm_fragments_free(dlm); |
Takashi Saito | 1517b03 | 2015-09-07 01:40:25 -0300 | [diff] [blame] | 728 | } |