blob: 8b5cbb6b7a704651117960d2ff7bd95a24ca4e7d [file] [log] [blame]
Takashi Saito1517b032015-09-07 01:40:25 -03001/*
2 * vsp1_dl.h -- R-Car VSP1 Display List
3 *
4 * Copyright (C) 2015 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/device.h>
15#include <linux/dma-mapping.h>
16#include <linux/gfp.h>
17#include <linux/slab.h>
Laurent Pinchart9489a8f2016-05-13 19:17:02 -030018#include <linux/workqueue.h>
Takashi Saito1517b032015-09-07 01:40:25 -030019
20#include "vsp1.h"
21#include "vsp1_dl.h"
Takashi Saito1517b032015-09-07 01:40:25 -030022
Laurent Pinchartf81e83c2016-03-03 13:36:34 -030023#define VSP1_DL_NUM_ENTRIES 256
Takashi Saito1517b032015-09-07 01:40:25 -030024
Laurent Pinchart12161982015-11-14 22:48:27 -020025#define VSP1_DLH_INT_ENABLE (1 << 1)
26#define VSP1_DLH_AUTO_START (1 << 0)
27
Laurent Pinchartf81e83c2016-03-03 13:36:34 -030028struct vsp1_dl_header_list {
29 u32 num_bytes;
30 u32 addr;
31} __attribute__((__packed__));
32
Laurent Pinchart12161982015-11-14 22:48:27 -020033struct vsp1_dl_header {
34 u32 num_lists;
Laurent Pinchartf81e83c2016-03-03 13:36:34 -030035 struct vsp1_dl_header_list lists[8];
Laurent Pinchart12161982015-11-14 22:48:27 -020036 u32 next_header;
37 u32 flags;
38} __attribute__((__packed__));
39
Takashi Saito1517b032015-09-07 01:40:25 -030040struct vsp1_dl_entry {
41 u32 addr;
42 u32 data;
43} __attribute__((__packed__));
44
Laurent Pinchartf81e83c2016-03-03 13:36:34 -030045/**
46 * struct vsp1_dl_body - Display list body
47 * @list: entry in the display list list of bodies
48 * @vsp1: the VSP1 device
49 * @entries: array of entries
50 * @dma: DMA address of the entries
51 * @size: size of the DMA memory in bytes
52 * @num_entries: number of stored entries
53 */
54struct vsp1_dl_body {
Laurent Pinchartc2dd25132015-11-08 20:06:57 -020055 struct list_head list;
Laurent Pinchartf81e83c2016-03-03 13:36:34 -030056 struct vsp1_device *vsp1;
Takashi Saito1517b032015-09-07 01:40:25 -030057
Laurent Pinchartf81e83c2016-03-03 13:36:34 -030058 struct vsp1_dl_entry *entries;
Takashi Saito1517b032015-09-07 01:40:25 -030059 dma_addr_t dma;
Takashi Saito1517b032015-09-07 01:40:25 -030060 size_t size;
Takashi Saito1517b032015-09-07 01:40:25 -030061
Laurent Pinchartf81e83c2016-03-03 13:36:34 -030062 unsigned int num_entries;
63};
64
65/**
66 * struct vsp1_dl_list - Display list
67 * @list: entry in the display list manager lists
68 * @dlm: the display list manager
69 * @header: display list header, NULL for headerless lists
70 * @dma: DMA address for the header
71 * @body0: first display list body
72 * @fragments: list of extra display list bodies
Kieran Bingham76e48892016-07-12 13:49:46 -030073 * @chain: entry in the display list partition chain
Laurent Pinchartf81e83c2016-03-03 13:36:34 -030074 */
75struct vsp1_dl_list {
76 struct list_head list;
77 struct vsp1_dl_manager *dlm;
78
79 struct vsp1_dl_header *header;
80 dma_addr_t dma;
81
82 struct vsp1_dl_body body0;
83 struct list_head fragments;
Kieran Bingham76e48892016-07-12 13:49:46 -030084
85 bool has_chain;
86 struct list_head chain;
Takashi Saito1517b032015-09-07 01:40:25 -030087};
88
Laurent Pinchart12161982015-11-14 22:48:27 -020089enum vsp1_dl_mode {
90 VSP1_DL_MODE_HEADER,
91 VSP1_DL_MODE_HEADERLESS,
92};
93
Laurent Pinchartef9621b2015-11-14 22:27:52 -020094/**
95 * struct vsp1_dl_manager - Display List manager
Laurent Pinchart12161982015-11-14 22:48:27 -020096 * @index: index of the related WPF
97 * @mode: display list operation mode (header or headerless)
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +030098 * @singleshot: execute the display list in single-shot mode
Laurent Pinchartef9621b2015-11-14 22:27:52 -020099 * @vsp1: the VSP1 device
Laurent Pinchart9489a8f2016-05-13 19:17:02 -0300100 * @lock: protects the free, active, queued, pending and gc_fragments lists
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200101 * @free: array of all free display lists
102 * @active: list currently being processed (loaded) by hardware
103 * @queued: list queued to the hardware (written to the DL registers)
104 * @pending: list waiting to be queued to the hardware
Laurent Pinchart9489a8f2016-05-13 19:17:02 -0300105 * @gc_work: fragments garbage collector work struct
106 * @gc_fragments: array of display list fragments waiting to be freed
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200107 */
108struct vsp1_dl_manager {
Laurent Pinchart12161982015-11-14 22:48:27 -0200109 unsigned int index;
110 enum vsp1_dl_mode mode;
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300111 bool singleshot;
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200112 struct vsp1_device *vsp1;
113
114 spinlock_t lock;
115 struct list_head free;
116 struct vsp1_dl_list *active;
117 struct vsp1_dl_list *queued;
118 struct vsp1_dl_list *pending;
Laurent Pinchart9489a8f2016-05-13 19:17:02 -0300119
120 struct work_struct gc_work;
121 struct list_head gc_fragments;
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200122};
123
Takashi Saito1517b032015-09-07 01:40:25 -0300124/* -----------------------------------------------------------------------------
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300125 * Display List Body Management
126 */
127
128/*
129 * Initialize a display list body object and allocate DMA memory for the body
130 * data. The display list body object is expected to have been initialized to
131 * 0 when allocated.
132 */
133static int vsp1_dl_body_init(struct vsp1_device *vsp1,
134 struct vsp1_dl_body *dlb, unsigned int num_entries,
135 size_t extra_size)
136{
137 size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
138
139 dlb->vsp1 = vsp1;
140 dlb->size = size;
141
Magnus Damm2cc21372017-05-17 02:20:05 +0300142 dlb->entries = dma_alloc_wc(vsp1->bus_master, dlb->size, &dlb->dma,
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300143 GFP_KERNEL);
144 if (!dlb->entries)
145 return -ENOMEM;
146
147 return 0;
148}
149
150/*
151 * Cleanup a display list body and free allocated DMA memory allocated.
152 */
153static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
154{
Magnus Damm2cc21372017-05-17 02:20:05 +0300155 dma_free_wc(dlb->vsp1->bus_master, dlb->size, dlb->entries, dlb->dma);
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300156}
157
158/**
159 * vsp1_dl_fragment_alloc - Allocate a display list fragment
160 * @vsp1: The VSP1 device
161 * @num_entries: The maximum number of entries that the fragment can contain
162 *
163 * Allocate a display list fragment with enough memory to contain the requested
164 * number of entries.
165 *
166 * Return a pointer to a fragment on success or NULL if memory can't be
167 * allocated.
168 */
169struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
170 unsigned int num_entries)
171{
172 struct vsp1_dl_body *dlb;
173 int ret;
174
175 dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
176 if (!dlb)
177 return NULL;
178
179 ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
180 if (ret < 0) {
181 kfree(dlb);
182 return NULL;
183 }
184
185 return dlb;
186}
187
188/**
189 * vsp1_dl_fragment_free - Free a display list fragment
190 * @dlb: The fragment
191 *
192 * Free the given display list fragment and the associated DMA memory.
193 *
194 * Fragments must only be freed explicitly if they are not added to a display
195 * list, as the display list will take ownership of them and free them
196 * otherwise. Manual free typically happens at cleanup time for fragments that
197 * have been allocated but not used.
198 *
199 * Passing a NULL pointer to this function is safe, in that case no operation
200 * will be performed.
201 */
202void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
203{
204 if (!dlb)
205 return;
206
207 vsp1_dl_body_cleanup(dlb);
208 kfree(dlb);
209}
210
211/**
212 * vsp1_dl_fragment_write - Write a register to a display list fragment
213 * @dlb: The fragment
214 * @reg: The register address
215 * @data: The register value
216 *
217 * Write the given register and value to the display list fragment. The maximum
218 * number of entries that can be written in a fragment is specified when the
219 * fragment is allocated by vsp1_dl_fragment_alloc().
220 */
221void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
222{
223 dlb->entries[dlb->num_entries].addr = reg;
224 dlb->entries[dlb->num_entries].data = data;
225 dlb->num_entries++;
226}
227
228/* -----------------------------------------------------------------------------
Takashi Saito1517b032015-09-07 01:40:25 -0300229 * Display List Transaction Management
230 */
231
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200232static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
Takashi Saito1517b032015-09-07 01:40:25 -0300233{
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200234 struct vsp1_dl_list *dl;
Laurent Pinchart12161982015-11-14 22:48:27 -0200235 size_t header_size;
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300236 int ret;
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200237
238 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
239 if (!dl)
240 return NULL;
241
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300242 INIT_LIST_HEAD(&dl->fragments);
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200243 dl->dlm = dlm;
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200244
Laurent Pinchart9dbed952017-02-26 10:29:50 -0300245 /*
246 * Initialize the display list body and allocate DMA memory for the body
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300247 * and the optional header. Both are allocated together to avoid memory
248 * fragmentation, with the header located right after the body in
249 * memory.
250 */
251 header_size = dlm->mode == VSP1_DL_MODE_HEADER
252 ? ALIGN(sizeof(struct vsp1_dl_header), 8)
253 : 0;
254
255 ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
256 header_size);
257 if (ret < 0) {
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200258 kfree(dl);
259 return NULL;
260 }
261
Laurent Pinchart12161982015-11-14 22:48:27 -0200262 if (dlm->mode == VSP1_DL_MODE_HEADER) {
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300263 size_t header_offset = VSP1_DL_NUM_ENTRIES
264 * sizeof(*dl->body0.entries);
265
266 dl->header = ((void *)dl->body0.entries) + header_offset;
267 dl->dma = dl->body0.dma + header_offset;
268
Laurent Pinchart12161982015-11-14 22:48:27 -0200269 memset(dl->header, 0, sizeof(*dl->header));
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300270 dl->header->lists[0].addr = dl->body0.dma;
Laurent Pinchart12161982015-11-14 22:48:27 -0200271 }
272
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200273 return dl;
274}
275
276static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
277{
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300278 vsp1_dl_body_cleanup(&dl->body0);
Laurent Pinchart9489a8f2016-05-13 19:17:02 -0300279 list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200280 kfree(dl);
281}
282
283/**
284 * vsp1_dl_list_get - Get a free display list
285 * @dlm: The display list manager
286 *
287 * Get a display list from the pool of free lists and return it.
288 *
289 * This function must be called without the display list manager lock held.
290 */
291struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
292{
293 struct vsp1_dl_list *dl = NULL;
294 unsigned long flags;
295
296 spin_lock_irqsave(&dlm->lock, flags);
297
298 if (!list_empty(&dlm->free)) {
299 dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
300 list_del(&dl->list);
Kieran Bingham76e48892016-07-12 13:49:46 -0300301
Mauro Carvalho Chehabb6187392016-09-19 15:18:01 -0300302 /*
303 * The display list chain must be initialised to ensure every
Kieran Bingham76e48892016-07-12 13:49:46 -0300304 * display list can assert list_empty() if it is not in a chain.
305 */
306 INIT_LIST_HEAD(&dl->chain);
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200307 }
308
309 spin_unlock_irqrestore(&dlm->lock, flags);
310
311 return dl;
312}
313
Laurent Pinchartd2c1b022016-03-03 09:26:47 -0300314/* This function must be called with the display list manager lock held.*/
315static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200316{
Kieran Bingham76e48892016-07-12 13:49:46 -0300317 struct vsp1_dl_list *dl_child;
318
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200319 if (!dl)
Takashi Saito1517b032015-09-07 01:40:25 -0300320 return;
321
Mauro Carvalho Chehabb6187392016-09-19 15:18:01 -0300322 /*
323 * Release any linked display-lists which were chained for a single
Kieran Bingham76e48892016-07-12 13:49:46 -0300324 * hardware operation.
325 */
326 if (dl->has_chain) {
327 list_for_each_entry(dl_child, &dl->chain, chain)
328 __vsp1_dl_list_put(dl_child);
329 }
330
331 dl->has_chain = false;
332
Mauro Carvalho Chehabb6187392016-09-19 15:18:01 -0300333 /*
334 * We can't free fragments here as DMA memory can only be freed in
Laurent Pinchart9489a8f2016-05-13 19:17:02 -0300335 * interruptible context. Move all fragments to the display list
336 * manager's list of fragments to be freed, they will be
337 * garbage-collected by the work queue.
338 */
339 if (!list_empty(&dl->fragments)) {
340 list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
341 schedule_work(&dl->dlm->gc_work);
342 }
343
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300344 dl->body0.num_entries = 0;
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200345
346 list_add_tail(&dl->list, &dl->dlm->free);
Takashi Saito1517b032015-09-07 01:40:25 -0300347}
348
Laurent Pinchartd2c1b022016-03-03 09:26:47 -0300349/**
350 * vsp1_dl_list_put - Release a display list
351 * @dl: The display list
352 *
353 * Release the display list and return it to the pool of free lists.
354 *
355 * Passing a NULL pointer to this function is safe, in that case no operation
356 * will be performed.
357 */
358void vsp1_dl_list_put(struct vsp1_dl_list *dl)
359{
360 unsigned long flags;
361
362 if (!dl)
363 return;
364
365 spin_lock_irqsave(&dl->dlm->lock, flags);
366 __vsp1_dl_list_put(dl);
367 spin_unlock_irqrestore(&dl->dlm->lock, flags);
368}
369
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300370/**
371 * vsp1_dl_list_write - Write a register to the display list
372 * @dl: The display list
373 * @reg: The register address
374 * @data: The register value
375 *
376 * Write the given register and value to the display list. Up to 256 registers
377 * can be written per display list.
378 */
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200379void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
Takashi Saito1517b032015-09-07 01:40:25 -0300380{
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300381 vsp1_dl_fragment_write(&dl->body0, reg, data);
382}
383
384/**
385 * vsp1_dl_list_add_fragment - Add a fragment to the display list
386 * @dl: The display list
387 * @dlb: The fragment
388 *
389 * Add a display list body as a fragment to a display list. Registers contained
390 * in fragments are processed after registers contained in the main display
391 * list, in the order in which fragments are added.
392 *
393 * Adding a fragment to a display list passes ownership of the fragment to the
394 * list. The caller must not touch the fragment after this call, and must not
395 * free it explicitly with vsp1_dl_fragment_free().
396 *
397 * Fragments are only usable for display lists in header mode. Attempt to
398 * add a fragment to a header-less display list will return an error.
399 */
400int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
401 struct vsp1_dl_body *dlb)
402{
403 /* Multi-body lists are only available in header mode. */
404 if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
405 return -EINVAL;
406
407 list_add_tail(&dlb->list, &dl->fragments);
408 return 0;
Takashi Saito1517b032015-09-07 01:40:25 -0300409}
410
Kieran Bingham76e48892016-07-12 13:49:46 -0300411/**
412 * vsp1_dl_list_add_chain - Add a display list to a chain
413 * @head: The head display list
414 * @dl: The new display list
415 *
416 * Add a display list to an existing display list chain. The chained lists
417 * will be automatically processed by the hardware without intervention from
418 * the CPU. A display list end interrupt will only complete after the last
419 * display list in the chain has completed processing.
420 *
421 * Adding a display list to a chain passes ownership of the display list to
422 * the head display list item. The chain is released when the head dl item is
423 * put back with __vsp1_dl_list_put().
424 *
425 * Chained display lists are only usable in header mode. Attempts to add a
426 * display list to a chain in header-less mode will return an error.
427 */
428int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
429 struct vsp1_dl_list *dl)
430{
431 /* Chained lists are only available in header mode. */
432 if (head->dlm->mode != VSP1_DL_MODE_HEADER)
433 return -EINVAL;
434
435 head->has_chain = true;
436 list_add_tail(&dl->chain, &head->chain);
437 return 0;
438}
439
440static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
441{
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300442 struct vsp1_dl_manager *dlm = dl->dlm;
Kieran Bingham76e48892016-07-12 13:49:46 -0300443 struct vsp1_dl_header_list *hdr = dl->header->lists;
444 struct vsp1_dl_body *dlb;
445 unsigned int num_lists = 0;
446
Mauro Carvalho Chehabb6187392016-09-19 15:18:01 -0300447 /*
448 * Fill the header with the display list bodies addresses and sizes. The
Kieran Bingham76e48892016-07-12 13:49:46 -0300449 * address of the first body has already been filled when the display
450 * list was allocated.
451 */
452
453 hdr->num_bytes = dl->body0.num_entries
454 * sizeof(*dl->header->lists);
455
456 list_for_each_entry(dlb, &dl->fragments, list) {
457 num_lists++;
458 hdr++;
459
460 hdr->addr = dlb->dma;
461 hdr->num_bytes = dlb->num_entries
462 * sizeof(*dl->header->lists);
463 }
464
465 dl->header->num_lists = num_lists;
466
Kieran Bingham76e48892016-07-12 13:49:46 -0300467 if (!list_empty(&dl->chain) && !is_last) {
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300468 /*
469 * If this display list's chain is not empty, we are on a list,
470 * and the next item is the display list that we must queue for
471 * automatic processing by the hardware.
472 */
Kieran Bingham76e48892016-07-12 13:49:46 -0300473 struct vsp1_dl_list *next = list_next_entry(dl, chain);
474
475 dl->header->next_header = next->dma;
476 dl->header->flags = VSP1_DLH_AUTO_START;
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300477 } else if (!dlm->singleshot) {
478 /*
479 * if the display list manager works in continuous mode, the VSP
480 * should loop over the display list continuously until
481 * instructed to do otherwise.
482 */
483 dl->header->next_header = dl->dma;
484 dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START;
Kieran Bingham76e48892016-07-12 13:49:46 -0300485 } else {
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300486 /*
487 * Otherwise, in mem-to-mem mode, we work in single-shot mode
488 * and the next display list must not be started automatically.
489 */
Kieran Bingham76e48892016-07-12 13:49:46 -0300490 dl->header->flags = VSP1_DLH_INT_ENABLE;
491 }
492}
493
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300494static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
495{
496 struct vsp1_device *vsp1 = dlm->vsp1;
497
498 if (!dlm->queued)
499 return false;
500
501 /*
502 * Check whether the VSP1 has taken the update. In headerless mode the
503 * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE
504 * register, and in header mode by clearing the UPDHDR bit in the CMD
505 * register.
506 */
507 if (dlm->mode == VSP1_DL_MODE_HEADERLESS)
508 return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
509 & VI6_DL_BODY_SIZE_UPD);
510 else
511 return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR));
512}
513
514static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
515{
516 struct vsp1_dl_manager *dlm = dl->dlm;
517 struct vsp1_device *vsp1 = dlm->vsp1;
518
519 if (dlm->mode == VSP1_DL_MODE_HEADERLESS) {
520 /*
521 * In headerless mode, program the hardware directly with the
522 * display list body address and size and set the UPD bit. The
523 * bit will be cleared by the hardware when the display list
524 * processing starts.
525 */
526 vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
527 vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
528 (dl->body0.num_entries * sizeof(*dl->header->lists)));
529 } else {
530 /*
531 * In header mode, program the display list header address. If
532 * the hardware is idle (single-shot mode or first frame in
533 * continuous mode) it will then be started independently. If
534 * the hardware is operating, the VI6_DL_HDR_REF_ADDR register
535 * will be updated with the display list address.
536 */
537 vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
538 }
539}
540
541static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
542{
543 struct vsp1_dl_manager *dlm = dl->dlm;
544
545 /*
546 * If a previous display list has been queued to the hardware but not
547 * processed yet, the VSP can start processing it at any time. In that
548 * case we can't replace the queued list by the new one, as we could
549 * race with the hardware. We thus mark the update as pending, it will
550 * be queued up to the hardware by the frame end interrupt handler.
551 */
552 if (vsp1_dl_list_hw_update_pending(dlm)) {
553 __vsp1_dl_list_put(dlm->pending);
554 dlm->pending = dl;
555 return;
556 }
557
558 /*
559 * Pass the new display list to the hardware and mark it as queued. It
560 * will become active when the hardware starts processing it.
561 */
562 vsp1_dl_list_hw_enqueue(dl);
563
564 __vsp1_dl_list_put(dlm->queued);
565 dlm->queued = dl;
566}
567
568static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
569{
570 struct vsp1_dl_manager *dlm = dl->dlm;
571
572 /*
573 * When working in single-shot mode, the caller guarantees that the
574 * hardware is idle at this point. Just commit the head display list
575 * to hardware. Chained lists will be started automatically.
576 */
577 vsp1_dl_list_hw_enqueue(dl);
578
579 dlm->active = dl;
580}
581
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200582void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
Takashi Saito1517b032015-09-07 01:40:25 -0300583{
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200584 struct vsp1_dl_manager *dlm = dl->dlm;
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300585 struct vsp1_dl_list *dl_child;
Takashi Saito1517b032015-09-07 01:40:25 -0300586 unsigned long flags;
Takashi Saito1517b032015-09-07 01:40:25 -0300587
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300588 if (dlm->mode == VSP1_DL_MODE_HEADER) {
Kieran Bingham76e48892016-07-12 13:49:46 -0300589 /* Fill the header for the head and chained display lists. */
590 vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300591
Kieran Bingham76e48892016-07-12 13:49:46 -0300592 list_for_each_entry(dl_child, &dl->chain, chain) {
593 bool last = list_is_last(&dl_child->chain, &dl->chain);
594
595 vsp1_dl_list_fill_header(dl_child, last);
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300596 }
Laurent Pinchart1dd48e42017-05-30 03:40:35 +0300597 }
Laurent Pinchartf81e83c2016-03-03 13:36:34 -0300598
Laurent Pinchart1dd48e42017-05-30 03:40:35 +0300599 spin_lock_irqsave(&dlm->lock, flags);
600
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300601 if (dlm->singleshot)
602 vsp1_dl_list_commit_singleshot(dl);
603 else
604 vsp1_dl_list_commit_continuous(dl);
Laurent Pinchart12161982015-11-14 22:48:27 -0200605
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200606 spin_unlock_irqrestore(&dlm->lock, flags);
Takashi Saito1517b032015-09-07 01:40:25 -0300607}
608
609/* -----------------------------------------------------------------------------
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200610 * Display List Manager
Takashi Saito1517b032015-09-07 01:40:25 -0300611 */
612
Kieran Bingham348a0032017-03-04 02:01:18 +0000613/**
614 * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
615 * @dlm: the display list manager
616 *
617 * Return true if the previous display list has completed at frame end, or false
618 * if it has been delayed by one frame because the display list commit raced
619 * with the frame end interrupt. The function always returns true in header mode
620 * as display list processing is then not continuous and races never occur.
621 */
622bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
Takashi Saito1517b032015-09-07 01:40:25 -0300623{
Kieran Bingham348a0032017-03-04 02:01:18 +0000624 bool completed = false;
Takashi Saito1517b032015-09-07 01:40:25 -0300625
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200626 spin_lock(&dlm->lock);
627
Laurent Pinchart9dbed952017-02-26 10:29:50 -0300628 /*
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300629 * The mem-to-mem pipelines work in single-shot mode. No new display
630 * list can be queued, we don't have to do anything.
Laurent Pinchart12161982015-11-14 22:48:27 -0200631 */
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300632 if (dlm->singleshot) {
633 __vsp1_dl_list_put(dlm->active);
634 dlm->active = NULL;
Kieran Bingham348a0032017-03-04 02:01:18 +0000635 completed = true;
Laurent Pinchart12161982015-11-14 22:48:27 -0200636 goto done;
Kieran Bingham348a0032017-03-04 02:01:18 +0000637 }
Laurent Pinchart12161982015-11-14 22:48:27 -0200638
Laurent Pinchart9dbed952017-02-26 10:29:50 -0300639 /*
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300640 * If the commit operation raced with the interrupt and occurred after
641 * the frame end event but before interrupt processing, the hardware
642 * hasn't taken the update into account yet. We have to skip one frame
643 * and retry.
Takashi Saito1517b032015-09-07 01:40:25 -0300644 */
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300645 if (vsp1_dl_list_hw_update_pending(dlm))
Takashi Saito1517b032015-09-07 01:40:25 -0300646 goto done;
647
Laurent Pinchart9dbed952017-02-26 10:29:50 -0300648 /*
649 * The device starts processing the queued display list right after the
Takashi Saito1517b032015-09-07 01:40:25 -0300650 * frame end interrupt. The display list thus becomes active.
651 */
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200652 if (dlm->queued) {
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300653 __vsp1_dl_list_put(dlm->active);
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200654 dlm->active = dlm->queued;
655 dlm->queued = NULL;
Kieran Bingham348a0032017-03-04 02:01:18 +0000656 completed = true;
Takashi Saito1517b032015-09-07 01:40:25 -0300657 }
658
Laurent Pinchart9dbed952017-02-26 10:29:50 -0300659 /*
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300660 * Now that the VSP has started processing the queued display list, we
661 * can queue the pending display list to the hardware if one has been
662 * prepared.
Takashi Saito1517b032015-09-07 01:40:25 -0300663 */
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200664 if (dlm->pending) {
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300665 vsp1_dl_list_hw_enqueue(dlm->pending);
666 dlm->queued = dlm->pending;
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200667 dlm->pending = NULL;
Takashi Saito1517b032015-09-07 01:40:25 -0300668 }
669
670done:
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200671 spin_unlock(&dlm->lock);
Kieran Bingham348a0032017-03-04 02:01:18 +0000672
673 return completed;
Takashi Saito1517b032015-09-07 01:40:25 -0300674}
675
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200676/* Hardware Setup */
677void vsp1_dlm_setup(struct vsp1_device *vsp1)
Takashi Saito1517b032015-09-07 01:40:25 -0300678{
Laurent Pinchart351bbf92015-11-01 15:18:56 -0200679 u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
680 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
681 | VI6_DL_CTRL_DLE;
Takashi Saito1517b032015-09-07 01:40:25 -0300682
Laurent Pinchart9dbed952017-02-26 10:29:50 -0300683 /*
684 * The DRM pipeline operates with display lists in Continuous Frame
Laurent Pinchart351bbf92015-11-01 15:18:56 -0200685 * Mode, all other pipelines use manual start.
Takashi Saito1517b032015-09-07 01:40:25 -0300686 */
687 if (vsp1->drm)
Laurent Pinchart351bbf92015-11-01 15:18:56 -0200688 ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
Takashi Saito1517b032015-09-07 01:40:25 -0300689
690 vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
691 vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
692}
693
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200694void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
Takashi Saito1517b032015-09-07 01:40:25 -0300695{
Laurent Pinchartd2c1b022016-03-03 09:26:47 -0300696 unsigned long flags;
697
698 spin_lock_irqsave(&dlm->lock, flags);
699
700 __vsp1_dl_list_put(dlm->active);
701 __vsp1_dl_list_put(dlm->queued);
702 __vsp1_dl_list_put(dlm->pending);
703
704 spin_unlock_irqrestore(&dlm->lock, flags);
Takashi Saito1517b032015-09-07 01:40:25 -0300705
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200706 dlm->active = NULL;
707 dlm->queued = NULL;
708 dlm->pending = NULL;
Takashi Saito1517b032015-09-07 01:40:25 -0300709}
710
Laurent Pinchart9489a8f2016-05-13 19:17:02 -0300711/*
712 * Free all fragments awaiting to be garbage-collected.
713 *
714 * This function must be called without the display list manager lock held.
715 */
716static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
717{
718 unsigned long flags;
719
720 spin_lock_irqsave(&dlm->lock, flags);
721
722 while (!list_empty(&dlm->gc_fragments)) {
723 struct vsp1_dl_body *dlb;
724
725 dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
726 list);
727 list_del(&dlb->list);
728
729 spin_unlock_irqrestore(&dlm->lock, flags);
730 vsp1_dl_fragment_free(dlb);
731 spin_lock_irqsave(&dlm->lock, flags);
732 }
733
734 spin_unlock_irqrestore(&dlm->lock, flags);
735}
736
737static void vsp1_dlm_garbage_collect(struct work_struct *work)
738{
739 struct vsp1_dl_manager *dlm =
740 container_of(work, struct vsp1_dl_manager, gc_work);
741
742 vsp1_dlm_fragments_free(dlm);
743}
744
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200745struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
Laurent Pinchart12161982015-11-14 22:48:27 -0200746 unsigned int index,
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200747 unsigned int prealloc)
Takashi Saito1517b032015-09-07 01:40:25 -0300748{
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200749 struct vsp1_dl_manager *dlm;
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200750 unsigned int i;
751
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200752 dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
753 if (!dlm)
754 return NULL;
755
Laurent Pinchart12161982015-11-14 22:48:27 -0200756 dlm->index = index;
757 dlm->mode = index == 0 && !vsp1->info->uapi
758 ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
Laurent Pincharteaf4bfa2017-05-29 13:41:31 +0300759 dlm->singleshot = vsp1->info->uapi;
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200760 dlm->vsp1 = vsp1;
761
762 spin_lock_init(&dlm->lock);
763 INIT_LIST_HEAD(&dlm->free);
Laurent Pinchart9489a8f2016-05-13 19:17:02 -0300764 INIT_LIST_HEAD(&dlm->gc_fragments);
765 INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200766
767 for (i = 0; i < prealloc; ++i) {
768 struct vsp1_dl_list *dl;
769
770 dl = vsp1_dl_list_alloc(dlm);
771 if (!dl)
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200772 return NULL;
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200773
774 list_add_tail(&dl->list, &dlm->free);
775 }
776
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200777 return dlm;
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200778}
779
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200780void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200781{
782 struct vsp1_dl_list *dl, *next;
783
Laurent Pinchartef9621b2015-11-14 22:27:52 -0200784 if (!dlm)
785 return;
786
Laurent Pinchart9489a8f2016-05-13 19:17:02 -0300787 cancel_work_sync(&dlm->gc_work);
788
Laurent Pinchartc2dd25132015-11-08 20:06:57 -0200789 list_for_each_entry_safe(dl, next, &dlm->free, list) {
790 list_del(&dl->list);
791 vsp1_dl_list_free(dl);
792 }
Laurent Pinchart9489a8f2016-05-13 19:17:02 -0300793
794 vsp1_dlm_fragments_free(dlm);
Takashi Saito1517b032015-09-07 01:40:25 -0300795}