blob: 85235e53cf94a43d280df3a9084b512de4cb30a9 [file] [log] [blame]
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Alan Kwong9487de22016-01-16 22:06:36 -05002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
20#include <linux/sync.h>
21#include <linux/delay.h>
22#include <linux/debugfs.h>
23#include <linux/interrupt.h>
24#include <linux/dma-mapping.h>
25#include <linux/dma-buf.h>
26#include <linux/msm_ion.h>
Alan Kwong315cd772016-08-03 22:29:42 -040027#include <linux/clk/msm-clk.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
Benjamin Chan53e3bce2016-08-31 14:43:29 -040037#include "sde_rotator_debug.h"
Alan Kwong9487de22016-01-16 22:06:36 -050038
Benjamin Chan99eb63b2016-12-21 15:45:26 -050039#define RES_UHD (3840*2160)
40
41/* traffic shaping clock ticks = finish_time x 19.2MHz */
42#define TRAFFIC_SHAPE_CLKTICK_14MS 268800
43#define TRAFFIC_SHAPE_CLKTICK_12MS 230400
44
Alan Kwong9487de22016-01-16 22:06:36 -050045/* XIN mapping */
46#define XIN_SSPP 0
47#define XIN_WRITEBACK 1
48
49/* wait for at most 2 vsync for lowest refresh rate (24hz) */
50#define KOFF_TIMEOUT msecs_to_jiffies(42 * 32)
51
52/* Macro for constructing the REGDMA command */
53#define SDE_REGDMA_WRITE(p, off, data) \
54 do { \
55 *p++ = REGDMA_OP_REGWRITE | \
56 ((off) & REGDMA_ADDR_OFFSET_MASK); \
57 *p++ = (data); \
58 } while (0)
59
60#define SDE_REGDMA_MODIFY(p, off, mask, data) \
61 do { \
62 *p++ = REGDMA_OP_REGMODIFY | \
63 ((off) & REGDMA_ADDR_OFFSET_MASK); \
64 *p++ = (mask); \
65 *p++ = (data); \
66 } while (0)
67
68#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
69 do { \
70 *p++ = REGDMA_OP_BLKWRITE_INC | \
71 ((off) & REGDMA_ADDR_OFFSET_MASK); \
72 *p++ = (len); \
73 } while (0)
74
75#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
76 do { \
77 *(p) = (data); \
78 (p)++; \
79 } while (0)
80
81/* Macro for directly accessing mapped registers */
82#define SDE_ROTREG_WRITE(base, off, data) \
83 writel_relaxed(data, (base + (off)))
84
85#define SDE_ROTREG_READ(base, off) \
86 readl_relaxed(base + (off))
87
Alan Kwongda16e442016-08-14 20:47:18 -040088static u32 sde_hw_rotator_input_pixfmts[] = {
89 SDE_PIX_FMT_XRGB_8888,
90 SDE_PIX_FMT_ARGB_8888,
91 SDE_PIX_FMT_ABGR_8888,
92 SDE_PIX_FMT_RGBA_8888,
93 SDE_PIX_FMT_BGRA_8888,
94 SDE_PIX_FMT_RGBX_8888,
95 SDE_PIX_FMT_BGRX_8888,
96 SDE_PIX_FMT_XBGR_8888,
97 SDE_PIX_FMT_RGBA_5551,
98 SDE_PIX_FMT_ARGB_1555,
99 SDE_PIX_FMT_ABGR_1555,
100 SDE_PIX_FMT_BGRA_5551,
101 SDE_PIX_FMT_BGRX_5551,
102 SDE_PIX_FMT_RGBX_5551,
103 SDE_PIX_FMT_XBGR_1555,
104 SDE_PIX_FMT_XRGB_1555,
105 SDE_PIX_FMT_ARGB_4444,
106 SDE_PIX_FMT_RGBA_4444,
107 SDE_PIX_FMT_BGRA_4444,
108 SDE_PIX_FMT_ABGR_4444,
109 SDE_PIX_FMT_RGBX_4444,
110 SDE_PIX_FMT_XRGB_4444,
111 SDE_PIX_FMT_BGRX_4444,
112 SDE_PIX_FMT_XBGR_4444,
113 SDE_PIX_FMT_RGB_888,
114 SDE_PIX_FMT_BGR_888,
115 SDE_PIX_FMT_RGB_565,
116 SDE_PIX_FMT_BGR_565,
117 SDE_PIX_FMT_Y_CB_CR_H2V2,
118 SDE_PIX_FMT_Y_CR_CB_H2V2,
119 SDE_PIX_FMT_Y_CR_CB_GH2V2,
120 SDE_PIX_FMT_Y_CBCR_H2V2,
121 SDE_PIX_FMT_Y_CRCB_H2V2,
122 SDE_PIX_FMT_Y_CBCR_H1V2,
123 SDE_PIX_FMT_Y_CRCB_H1V2,
124 SDE_PIX_FMT_Y_CBCR_H2V1,
125 SDE_PIX_FMT_Y_CRCB_H2V1,
126 SDE_PIX_FMT_YCBYCR_H2V1,
127 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
128 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
129 SDE_PIX_FMT_RGBA_8888_UBWC,
130 SDE_PIX_FMT_RGBX_8888_UBWC,
131 SDE_PIX_FMT_RGB_565_UBWC,
132 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
133 SDE_PIX_FMT_RGBA_1010102,
134 SDE_PIX_FMT_RGBX_1010102,
135 SDE_PIX_FMT_ARGB_2101010,
136 SDE_PIX_FMT_XRGB_2101010,
137 SDE_PIX_FMT_BGRA_1010102,
138 SDE_PIX_FMT_BGRX_1010102,
139 SDE_PIX_FMT_ABGR_2101010,
140 SDE_PIX_FMT_XBGR_2101010,
141 SDE_PIX_FMT_RGBA_1010102_UBWC,
142 SDE_PIX_FMT_RGBX_1010102_UBWC,
143 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
144 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
145 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
146};
147
148static u32 sde_hw_rotator_output_pixfmts[] = {
149 SDE_PIX_FMT_XRGB_8888,
150 SDE_PIX_FMT_ARGB_8888,
151 SDE_PIX_FMT_ABGR_8888,
152 SDE_PIX_FMT_RGBA_8888,
153 SDE_PIX_FMT_BGRA_8888,
154 SDE_PIX_FMT_RGBX_8888,
155 SDE_PIX_FMT_BGRX_8888,
156 SDE_PIX_FMT_XBGR_8888,
157 SDE_PIX_FMT_RGBA_5551,
158 SDE_PIX_FMT_ARGB_1555,
159 SDE_PIX_FMT_ABGR_1555,
160 SDE_PIX_FMT_BGRA_5551,
161 SDE_PIX_FMT_BGRX_5551,
162 SDE_PIX_FMT_RGBX_5551,
163 SDE_PIX_FMT_XBGR_1555,
164 SDE_PIX_FMT_XRGB_1555,
165 SDE_PIX_FMT_ARGB_4444,
166 SDE_PIX_FMT_RGBA_4444,
167 SDE_PIX_FMT_BGRA_4444,
168 SDE_PIX_FMT_ABGR_4444,
169 SDE_PIX_FMT_RGBX_4444,
170 SDE_PIX_FMT_XRGB_4444,
171 SDE_PIX_FMT_BGRX_4444,
172 SDE_PIX_FMT_XBGR_4444,
173 SDE_PIX_FMT_RGB_888,
174 SDE_PIX_FMT_BGR_888,
175 SDE_PIX_FMT_RGB_565,
176 SDE_PIX_FMT_BGR_565,
177 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
178 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
179 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
180 SDE_PIX_FMT_Y_CBCR_H2V2,
181 SDE_PIX_FMT_Y_CRCB_H2V2,
182 SDE_PIX_FMT_Y_CBCR_H1V2,
183 SDE_PIX_FMT_Y_CRCB_H1V2,
184 SDE_PIX_FMT_Y_CBCR_H2V1,
185 SDE_PIX_FMT_Y_CRCB_H2V1,
186 /* SDE_PIX_FMT_YCBYCR_H2V1 */
187 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
188 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
189 SDE_PIX_FMT_RGBA_8888_UBWC,
190 SDE_PIX_FMT_RGBX_8888_UBWC,
191 SDE_PIX_FMT_RGB_565_UBWC,
192 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
193 SDE_PIX_FMT_RGBA_1010102,
194 SDE_PIX_FMT_RGBX_1010102,
195 /* SDE_PIX_FMT_ARGB_2101010 */
196 /* SDE_PIX_FMT_XRGB_2101010 */
197 SDE_PIX_FMT_BGRA_1010102,
198 SDE_PIX_FMT_BGRX_1010102,
199 /* SDE_PIX_FMT_ABGR_2101010 */
200 /* SDE_PIX_FMT_XBGR_2101010 */
201 SDE_PIX_FMT_RGBA_1010102_UBWC,
202 SDE_PIX_FMT_RGBX_1010102_UBWC,
203 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
204 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
205 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
206};
207
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400208static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
209 {0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
210 {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
211 {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
212};
213
214static struct sde_rot_regdump sde_rot_r3_regdump[] = {
215 { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
216 { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
217 { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
218 { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
219 SDE_ROT_REGDUMP_READ },
220 /*
221 * Need to perform a SW reset to REGDMA in order to access the
222 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
223 * REGDMA RAM should be dump at last.
224 */
225 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
226 SDE_ROT_REGDUMP_WRITE },
227 { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
228 SDE_ROT_REGDUMP_READ },
229};
230
Alan Kwong818b7fc2016-07-24 22:07:41 -0400231/* Invalid software timestamp value for initialization */
232#define SDE_REGDMA_SWTS_INVALID (~0)
233
234/**
235 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
236 * @ts_curr: current software timestamp
237 * @ts_prev: previous software timestamp
238 * @return: the amount ts_curr is ahead of ts_prev
239 */
240static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
241{
242 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
243
244 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
245}
246
247/**
248 * sde_hw_rotator_pending_swts - Check if the given context is still pending
249 * @rot: Pointer to hw rotator
250 * @ctx: Pointer to rotator context
251 * @pswts: Pointer to returned reference software timestamp, optional
252 * @return: true if context has pending requests
253 */
254static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
255 struct sde_hw_rotator_context *ctx, u32 *pswts)
256{
257 u32 swts;
258 int ts_diff;
259 bool pending;
260
261 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
262 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
263 else
264 swts = ctx->last_regdma_timestamp;
265
266 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
267 swts >>= SDE_REGDMA_SWTS_SHIFT;
268
269 swts &= SDE_REGDMA_SWTS_MASK;
270
271 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
272
273 if (pswts)
274 *pswts = swts;
275
276 pending = (ts_diff > 0) ? true : false;
277
278 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
279 ctx->timestamp, ctx->q_id, swts, pending);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -0400280 SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400281 return pending;
282}
283
284/**
285 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
286 * Also, clear rotator/regdma irq status.
287 * @rot: Pointer to hw rotator
288 */
289static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
290{
291 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
292 atomic_read(&rot->irq_enabled));
293
294 if (!atomic_read(&rot->irq_enabled)) {
295 if (rot->mode == ROT_REGDMA_OFF)
296 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
297 ROT_DONE_MASK);
298 else
299 SDE_ROTREG_WRITE(rot->mdss_base,
300 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
301
302 enable_irq(rot->irq_num);
303 }
304 atomic_inc(&rot->irq_enabled);
305}
306
307/**
308 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
309 * Also, clear rotator/regdma irq enable masks.
310 * @rot: Pointer to hw rotator
311 */
312static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
313{
314 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
315 atomic_read(&rot->irq_enabled));
316
317 if (!atomic_read(&rot->irq_enabled)) {
318 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
319 return;
320 }
321
322 if (!atomic_dec_return(&rot->irq_enabled)) {
323 if (rot->mode == ROT_REGDMA_OFF)
324 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
325 else
326 SDE_ROTREG_WRITE(rot->mdss_base,
327 REGDMA_CSR_REGDMA_INT_EN, 0);
328 /* disable irq after last pending irq is handled, if any */
329 synchronize_irq(rot->irq_num);
330 disable_irq_nosync(rot->irq_num);
331 }
332}
333
334/**
335 * sde_hw_rotator_dump_status - Dump hw rotator status on error
336 * @rot: Pointer to hw rotator
337 */
338static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
339{
340 SDEROT_ERR(
341 "op_mode = %x, int_en = %x, int_status = %x\n",
342 SDE_ROTREG_READ(rot->mdss_base,
343 REGDMA_CSR_REGDMA_OP_MODE),
344 SDE_ROTREG_READ(rot->mdss_base,
345 REGDMA_CSR_REGDMA_INT_EN),
346 SDE_ROTREG_READ(rot->mdss_base,
347 REGDMA_CSR_REGDMA_INT_STATUS));
348
349 SDEROT_ERR(
350 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
351 SDE_ROTREG_READ(rot->mdss_base,
352 REGDMA_TIMESTAMP_REG),
353 SDE_ROTREG_READ(rot->mdss_base,
354 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
355 SDE_ROTREG_READ(rot->mdss_base,
356 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
357 SDE_ROTREG_READ(rot->mdss_base,
358 REGDMA_CSR_REGDMA_BLOCK_STATUS));
359
360 SDEROT_ERR(
361 "invalid_cmd_offset = %x, fsm_state = %x\n",
362 SDE_ROTREG_READ(rot->mdss_base,
363 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
364 SDE_ROTREG_READ(rot->mdss_base,
365 REGDMA_CSR_REGDMA_FSM_STATE));
366}
367
Alan Kwong9487de22016-01-16 22:06:36 -0500368/**
369 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
370 * on provided session_id. Each rotator has a different session_id.
371 */
372static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
373 struct sde_hw_rotator *rot, u32 session_id,
374 enum sde_rot_queue_prio q_id)
375{
376 int i;
377 struct sde_hw_rotator_context *ctx = NULL;
378
379 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
380 ctx = rot->rotCtx[q_id][i];
381
382 if (ctx && (ctx->session_id == session_id)) {
383 SDEROT_DBG(
384 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d\n",
385 q_id, i, ctx, ctx->session_id);
386 return ctx;
387 }
388 }
389
390 return NULL;
391}
392
393/*
394 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
395 * @dbgbuf: Pointer to debug buffer
396 * @buf: Pointer to layer buffer structure
397 * @data: Pointer to h/w mapped buffer structure
398 */
399static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
400 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
401{
402 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
403 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
404
405 dbgbuf->vaddr = NULL;
406 dbgbuf->width = buf->width;
407 dbgbuf->height = buf->height;
408
409 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
410 dma_buf_begin_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
411 DMA_FROM_DEVICE);
412 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
413 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
414 dbgbuf->vaddr, dbgbuf->buflen,
415 dbgbuf->width, dbgbuf->height);
416 }
417}
418
419/*
420 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
421 * @dbgbuf: Pointer to debug buffer
422 */
423static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
424{
425 if (dbgbuf->vaddr) {
426 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
427 dma_buf_end_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
428 DMA_FROM_DEVICE);
429 }
430
431 dbgbuf->vaddr = NULL;
432 dbgbuf->dmabuf = NULL;
433 dbgbuf->buflen = 0;
434 dbgbuf->width = 0;
435 dbgbuf->height = 0;
436}
437
438/*
439 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
440 * @ctx: Pointer to rotator context
441 * @mask: Bit mask location of the timestamp
442 * @swts: Software timestamp
443 */
444static void sde_hw_rotator_setup_timestamp_packet(
445 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
446{
447 u32 *wrptr;
448
449 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
450
451 /*
452 * Create a dummy packet write out to 1 location for timestamp
453 * generation.
454 */
455 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
456 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
457 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
458 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
459 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
460 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
461 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
462 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
463 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
464 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
465 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
466 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
467 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
Benjamin Chan15c93d82016-08-29 10:04:22 -0400468 /*
469 * Must clear secure buffer setting for SW timestamp because
470 * SW timstamp buffer allocation is always non-secure region.
471 */
472 if (ctx->is_secure) {
473 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
474 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
475 }
Alan Kwong9487de22016-01-16 22:06:36 -0500476 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
477 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
478 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
479 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
480 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
481 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
482 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
483 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
484 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
485 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
486 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
487 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
488 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
489
490 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
491}
492
493/*
494 * sde_hw_rotator_setup_fetchengine - setup fetch engine
495 * @ctx: Pointer to rotator context
496 * @queue_id: Priority queue identifier
497 * @cfg: Fetch configuration
498 * @danger_lut: real-time QoS LUT for danger setting (not used)
499 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400500 * @dnsc_factor_w: downscale factor for width
501 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -0500502 * @flags: Control flag
503 */
504static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
505 enum sde_rot_queue_prio queue_id,
506 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400507 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -0500508{
509 struct sde_hw_rotator *rot = ctx->rot;
510 struct sde_mdp_format_params *fmt;
511 struct sde_mdp_data *data;
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400512 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -0500513 u32 *wrptr;
514 u32 opmode = 0;
515 u32 chroma_samp = 0;
516 u32 src_format = 0;
517 u32 unpack = 0;
518 u32 width = cfg->img_width;
519 u32 height = cfg->img_height;
520 u32 fetch_blocksize = 0;
521 int i;
522
523 if (ctx->rot->mode == ROT_REGDMA_ON) {
524 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_EN,
525 REGDMA_INT_MASK);
526 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
527 REGDMA_EN);
528 }
529
530 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
531
532 /* source image setup */
533 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
534 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
535 for (i = 0; i < cfg->src_plane.num_planes; i++)
536 cfg->src_plane.ystride[i] *= 2;
537 width *= 2;
538 height /= 2;
539 }
540
541 /*
542 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
543 */
544 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
545
546 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
547 SDE_REGDMA_BLKWRITE_DATA(wrptr,
548 cfg->src_rect->w | (cfg->src_rect->h << 16));
549 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
550 SDE_REGDMA_BLKWRITE_DATA(wrptr,
551 cfg->src_rect->x | (cfg->src_rect->y << 16));
552 SDE_REGDMA_BLKWRITE_DATA(wrptr,
553 cfg->src_rect->w | (cfg->src_rect->h << 16));
554 SDE_REGDMA_BLKWRITE_DATA(wrptr,
555 cfg->src_rect->x | (cfg->src_rect->y << 16));
556
557 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
558 data = cfg->data;
559 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
560 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
561 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
562 (cfg->src_plane.ystride[1] << 16));
563 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
564 (cfg->src_plane.ystride[3] << 16));
565
566 /* UNUSED, write 0 */
567 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
568
569 /* setup source format */
570 fmt = cfg->fmt;
571
572 chroma_samp = fmt->chroma_sample;
573 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
574 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
575 chroma_samp = SDE_MDP_CHROMA_H1V2;
576 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
577 chroma_samp = SDE_MDP_CHROMA_H2V1;
578 }
579
580 src_format = (chroma_samp << 23) |
581 (fmt->fetch_planes << 19) |
582 (fmt->bits[C3_ALPHA] << 6) |
583 (fmt->bits[C2_R_Cr] << 4) |
584 (fmt->bits[C1_B_Cb] << 2) |
585 (fmt->bits[C0_G_Y] << 0);
586
587 if (fmt->alpha_enable &&
588 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
589 src_format |= BIT(8); /* SRCC3_EN */
590
591 src_format |= ((fmt->unpack_count - 1) << 12) |
592 (fmt->unpack_tight << 17) |
593 (fmt->unpack_align_msb << 18) |
594 ((fmt->bpp - 1) << 9) |
595 ((fmt->frame_format & 3) << 30);
596
597 if (flags & SDE_ROT_FLAG_ROT_90)
598 src_format |= BIT(11); /* ROT90 */
599
600 if (sde_mdp_is_ubwc_format(fmt))
601 opmode |= BIT(0); /* BWC_DEC_EN */
602
603 /* if this is YUV pixel format, enable CSC */
604 if (sde_mdp_is_yuv_format(fmt))
605 src_format |= BIT(15); /* SRC_COLOR_SPACE */
606
607 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
608 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
609
610 /* SRC_FORMAT */
611 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
612
613 /* setup source unpack pattern */
614 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
615 (fmt->element[1] << 8) | (fmt->element[0] << 0);
616
617 /* SRC_UNPACK_PATTERN */
618 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
619
620 /* setup source op mode */
621 if (flags & SDE_ROT_FLAG_FLIP_LR)
622 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
623 if (flags & SDE_ROT_FLAG_FLIP_UD)
624 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
625 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
626
627 /* SRC_OP_MODE */
628 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
629
630 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400631 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
632 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
633 if (sde_mdp_is_tp10_format(fmt))
634 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
635 else
636 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
637 } else {
638 if (sde_mdp_is_tp10_format(fmt))
639 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
640 else
641 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
642 }
643
Alan Kwong9487de22016-01-16 22:06:36 -0500644 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
645 fetch_blocksize |
646 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
647 ((rot->highest_bank & 0x3) << 18));
648
649 /* setup source buffer plane security status */
Abhijit Kulkarni298c8232016-09-26 22:32:10 -0700650 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
651 SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
Alan Kwong9487de22016-01-16 22:06:36 -0500652 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
653 ctx->is_secure = true;
Benjamin Chan15c93d82016-08-29 10:04:22 -0400654 } else {
655 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
656 ctx->is_secure = false;
Alan Kwong9487de22016-01-16 22:06:36 -0500657 }
658
Benjamin Chan99eb63b2016-12-21 15:45:26 -0500659 /*
660 * Determine if traffic shaping is required. Only enable traffic
661 * shaping when content is 4k@30fps. The actual traffic shaping
662 * bandwidth calculation is done in output setup.
663 */
664 if (((cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD) &&
665 (cfg->fps <= 30)) {
666 SDEROT_DBG("Enable Traffic Shaper\n");
667 ctx->is_traffic_shaping = true;
668 } else {
669 SDEROT_DBG("Disable Traffic Shaper\n");
670 ctx->is_traffic_shaping = false;
671 }
672
Alan Kwong9487de22016-01-16 22:06:36 -0500673 /* Update command queue write ptr */
674 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
675}
676
677/*
678 * sde_hw_rotator_setup_wbengine - setup writeback engine
679 * @ctx: Pointer to rotator context
680 * @queue_id: Priority queue identifier
681 * @cfg: Writeback configuration
682 * @flags: Control flag
683 */
684static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
685 enum sde_rot_queue_prio queue_id,
686 struct sde_hw_rot_wb_cfg *cfg,
687 u32 flags)
688{
689 struct sde_mdp_format_params *fmt;
690 u32 *wrptr;
691 u32 pack = 0;
692 u32 dst_format = 0;
693 int i;
694
695 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
696
697 fmt = cfg->fmt;
698
699 /* setup WB DST format */
700 dst_format |= (fmt->chroma_sample << 23) |
701 (fmt->fetch_planes << 19) |
702 (fmt->bits[C3_ALPHA] << 6) |
703 (fmt->bits[C2_R_Cr] << 4) |
704 (fmt->bits[C1_B_Cb] << 2) |
705 (fmt->bits[C0_G_Y] << 0);
706
707 /* alpha control */
708 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
709 dst_format |= BIT(8);
710 if (!fmt->alpha_enable) {
711 dst_format |= BIT(14);
712 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
713 }
714 }
715
716 dst_format |= ((fmt->unpack_count - 1) << 12) |
717 (fmt->unpack_tight << 17) |
718 (fmt->unpack_align_msb << 18) |
719 ((fmt->bpp - 1) << 9) |
720 ((fmt->frame_format & 3) << 30);
721
722 if (sde_mdp_is_yuv_format(fmt))
723 dst_format |= BIT(15);
724
725 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
726 dst_format |= BIT(21); /* PACK_DX_FORMAT */
727
728 /*
729 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
730 */
731 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
732
733 /* DST_FORMAT */
734 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
735
736 /* DST_OP_MODE */
737 if (sde_mdp_is_ubwc_format(fmt))
738 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
739 else
740 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
741
742 /* DST_PACK_PATTERN */
743 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
744 (fmt->element[1] << 8) | (fmt->element[0] << 0);
745 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
746
747 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
748 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
749 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
750 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
751 (cfg->dst_plane.ystride[1] << 16));
752 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
753 (cfg->dst_plane.ystride[3] << 16));
754
755 /* setup WB out image size and ROI */
756 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
757 cfg->img_width | (cfg->img_height << 16));
758 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
759 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
760 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
761 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
762
Abhijit Kulkarni298c8232016-09-26 22:32:10 -0700763 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
764 SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
Benjamin Chan15c93d82016-08-29 10:04:22 -0400765 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
766 else
767 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
768
Alan Kwong9487de22016-01-16 22:06:36 -0500769 /*
770 * setup Downscale factor
771 */
772 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
773 cfg->v_downscale_factor |
774 (cfg->h_downscale_factor << 16));
775
776 /* write config setup for bank configration */
777 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
778 (ctx->rot->highest_bank & 0x3) << 8);
779
780 if (flags & SDE_ROT_FLAG_ROT_90)
781 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x3);
782 else
783 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x1);
784
Benjamin Chan99eb63b2016-12-21 15:45:26 -0500785 /* setup traffic shaper for 4k 30fps content */
786 if (ctx->is_traffic_shaping) {
787 u32 bw;
788
789 /*
790 * Target to finish in 12ms, and we need to set number of bytes
791 * per clock tick for traffic shaping.
792 * Each clock tick run @ 19.2MHz, so we need we know total of
793 * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
794 * Finally, calcualte the byte count per clock tick based on
795 * resolution, bpp and compression ratio.
796 */
797 bw = cfg->dst_rect->w * cfg->dst_rect->h;
798
799 if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
800 bw = (bw * 3) / 2;
801 else
802 bw *= fmt->bpp;
803
804 bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
805 if (bw > 0xFF)
806 bw = 0xFF;
807 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
808 BIT(31) | bw);
809 SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
810 } else {
811 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
812 SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
813 }
814
Alan Kwong9487de22016-01-16 22:06:36 -0500815 /* Update command queue write ptr */
816 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
817}
818
819/*
820 * sde_hw_rotator_start_no_regdma - start non-regdma operation
821 * @ctx: Pointer to rotator context
822 * @queue_id: Priority queue identifier
823 */
824static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
825 enum sde_rot_queue_prio queue_id)
826{
827 struct sde_hw_rotator *rot = ctx->rot;
828 u32 *wrptr;
829 u32 *rdptr;
830 u8 *addr;
831 u32 mask;
832 u32 blksize;
833
834 rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
835 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
836
837 if (rot->irq_num >= 0) {
838 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
839 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
840 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400841 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -0500842 }
843
844 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
845
846 /* Update command queue write ptr */
847 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
848
849 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
850 /* Write all command stream to Rotator blocks */
851 /* Rotator will start right away after command stream finish writing */
852 while (rdptr < wrptr) {
853 u32 op = REGDMA_OP_MASK & *rdptr;
854
855 switch (op) {
856 case REGDMA_OP_NOP:
857 SDEROT_DBG("NOP\n");
858 rdptr++;
859 break;
860 case REGDMA_OP_REGWRITE:
861 SDEROT_DBG("REGW %6.6x %8.8x\n",
862 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
863 rdptr[1]);
864 addr = rot->mdss_base +
865 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
866 writel_relaxed(*rdptr++, addr);
867 break;
868 case REGDMA_OP_REGMODIFY:
869 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
870 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
871 rdptr[1], rdptr[2]);
872 addr = rot->mdss_base +
873 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
874 mask = *rdptr++;
875 writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
876 addr);
877 break;
878 case REGDMA_OP_BLKWRITE_SINGLE:
879 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
880 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
881 rdptr[1]);
882 addr = rot->mdss_base +
883 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
884 blksize = *rdptr++;
885 while (blksize--) {
886 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
887 writel_relaxed(*rdptr++, addr);
888 }
889 break;
890 case REGDMA_OP_BLKWRITE_INC:
891 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
892 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
893 rdptr[1]);
894 addr = rot->mdss_base +
895 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
896 blksize = *rdptr++;
897 while (blksize--) {
898 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
899 writel_relaxed(*rdptr++, addr);
900 addr += 4;
901 }
902 break;
903 default:
904 /* Other not supported OP mode
905 * Skip data for now for unregonized OP mode
906 */
907 SDEROT_DBG("UNDEFINED\n");
908 rdptr++;
909 break;
910 }
911 }
912 SDEROT_DBG("END %d\n", ctx->timestamp);
913
914 return ctx->timestamp;
915}
916
917/*
918 * sde_hw_rotator_start_regdma - start regdma operation
919 * @ctx: Pointer to rotator context
920 * @queue_id: Priority queue identifier
921 */
922static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
923 enum sde_rot_queue_prio queue_id)
924{
925 struct sde_hw_rotator *rot = ctx->rot;
926 u32 *wrptr;
927 u32 regdmaSlot;
928 u32 offset;
929 long length;
930 long ts_length;
931 u32 enableInt;
932 u32 swts = 0;
933 u32 mask = 0;
934
935 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
936
Alan Kwong9487de22016-01-16 22:06:36 -0500937 /*
938 * Last ROT command must be ROT_START before REGDMA start
939 */
940 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
941 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
942
943 /*
944 * Start REGDMA with command offset and size
945 */
946 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
947 length = ((long)wrptr - (long)ctx->regdma_base) / 4;
948 offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
949 REGDMA_RAM_REGDMA_CMD_RAM));
950 enableInt = ((ctx->timestamp & 1) + 1) << 30;
951
952 SDEROT_DBG(
953 "regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
954 queue_id, regdmaSlot, enableInt, length, offset,
955 ctx->timestamp);
956
957 /* ensure the command packet is issued before the submit command */
958 wmb();
959
960 /* REGDMA submission for current context */
961 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
962 SDE_ROTREG_WRITE(rot->mdss_base,
963 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
964 (length << 14) | offset);
965 swts = ctx->timestamp;
966 mask = ~SDE_REGDMA_SWTS_MASK;
967 } else {
968 SDE_ROTREG_WRITE(rot->mdss_base,
969 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
970 (length << 14) | offset);
971 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
972 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
973 }
974
975 /* Write timestamp after previous rotator job finished */
976 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
977 offset += length;
978 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
979 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
980
981 /* ensure command packet is issue before the submit command */
982 wmb();
983
984 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
985 SDE_ROTREG_WRITE(rot->mdss_base,
986 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
987 enableInt | (ts_length << 14) | offset);
988 } else {
989 SDE_ROTREG_WRITE(rot->mdss_base,
990 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
991 enableInt | (ts_length << 14) | offset);
992 }
993
Alan Kwong9487de22016-01-16 22:06:36 -0500994 /* Update command queue write ptr */
995 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
996
997 return ctx->timestamp;
998}
999
1000/*
1001 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
1002 * @ctx: Pointer to rotator context
1003 * @queue_id: Priority queue identifier
1004 * @flags: Option flag
1005 */
1006static u32 sde_hw_rotator_wait_done_no_regdma(
1007 struct sde_hw_rotator_context *ctx,
1008 enum sde_rot_queue_prio queue_id, u32 flag)
1009{
1010 struct sde_hw_rotator *rot = ctx->rot;
1011 int rc = 0;
1012 u32 sts = 0;
1013 u32 status;
1014 unsigned long flags;
1015
1016 if (rot->irq_num >= 0) {
1017 SDEROT_DBG("Wait for Rotator completion\n");
1018 rc = wait_for_completion_timeout(&ctx->rot_comp,
1019 KOFF_TIMEOUT);
1020
1021 spin_lock_irqsave(&rot->rotisr_lock, flags);
1022 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1023 if (rc == 0) {
1024 /*
1025 * Timeout, there might be error,
1026 * or rotator still busy
1027 */
1028 if (status & ROT_BUSY_BIT)
1029 SDEROT_ERR(
1030 "Timeout waiting for rotator done\n");
1031 else if (status & ROT_ERROR_BIT)
1032 SDEROT_ERR(
1033 "Rotator report error status\n");
1034 else
1035 SDEROT_WARN(
1036 "Timeout waiting, but rotator job is done!!\n");
1037
Alan Kwong818b7fc2016-07-24 22:07:41 -04001038 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001039 }
1040 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1041 } else {
1042 int cnt = 200;
1043
1044 do {
1045 udelay(500);
1046 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1047 cnt--;
1048 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
1049 && ((status & ROT_ERROR_BIT) == 0));
1050
1051 if (status & ROT_ERROR_BIT)
1052 SDEROT_ERR("Rotator error\n");
1053 else if (status & ROT_BUSY_BIT)
1054 SDEROT_ERR("Rotator busy\n");
1055
1056 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1057 ROT_DONE_CLEAR);
1058 }
1059
1060 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1061
1062 return sts;
1063}
1064
1065/*
1066 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
1067 * @ctx: Pointer to rotator context
1068 * @queue_id: Priority queue identifier
1069 * @flags: Option flag
1070 */
1071static u32 sde_hw_rotator_wait_done_regdma(
1072 struct sde_hw_rotator_context *ctx,
1073 enum sde_rot_queue_prio queue_id, u32 flag)
1074{
1075 struct sde_hw_rotator *rot = ctx->rot;
1076 int rc = 0;
1077 u32 status;
1078 u32 last_isr;
1079 u32 last_ts;
1080 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001081 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -05001082 u32 sts = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001083 unsigned long flags;
1084
1085 if (rot->irq_num >= 0) {
1086 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
1087 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001088 rc = wait_event_timeout(ctx->regdma_waitq,
1089 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Alan Kwong9487de22016-01-16 22:06:36 -05001090 KOFF_TIMEOUT);
1091
1092 spin_lock_irqsave(&rot->rotisr_lock, flags);
1093
1094 last_isr = ctx->last_regdma_isr_status;
1095 last_ts = ctx->last_regdma_timestamp;
1096 status = last_isr & REGDMA_INT_MASK;
1097 int_id = last_ts & 1;
1098 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
1099 status, int_id, last_ts);
1100
1101 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
Alan Kwong818b7fc2016-07-24 22:07:41 -04001102 bool pending;
1103
1104 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001105 SDEROT_ERR(
Alan Kwong818b7fc2016-07-24 22:07:41 -04001106 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
1107 ctx->timestamp, swts, pending);
Alan Kwong9487de22016-01-16 22:06:36 -05001108
1109 if (status & REGDMA_WATCHDOG_INT)
1110 SDEROT_ERR("REGDMA watchdog interrupt\n");
1111 else if (status & REGDMA_INVALID_DESCRIPTOR)
1112 SDEROT_ERR("REGDMA invalid descriptor\n");
1113 else if (status & REGDMA_INCOMPLETE_CMD)
1114 SDEROT_ERR("REGDMA incomplete command\n");
1115 else if (status & REGDMA_INVALID_CMD)
1116 SDEROT_ERR("REGDMA invalid command\n");
1117
Alan Kwong818b7fc2016-07-24 22:07:41 -04001118 sde_hw_rotator_dump_status(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001119 status = ROT_ERROR_BIT;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001120 } else {
1121 if (rc == 1)
1122 SDEROT_WARN(
1123 "REGDMA done but no irq, ts:0x%X/0x%X\n",
1124 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001125 status = 0;
1126 }
1127
Alan Kwong9487de22016-01-16 22:06:36 -05001128 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1129 } else {
1130 int cnt = 200;
Alan Kwongb0679602016-11-27 17:04:13 -08001131 bool pending;
Alan Kwong9487de22016-01-16 22:06:36 -05001132
1133 do {
1134 udelay(500);
Alan Kwongb0679602016-11-27 17:04:13 -08001135 last_isr = SDE_ROTREG_READ(rot->mdss_base,
1136 REGDMA_CSR_REGDMA_INT_STATUS);
1137 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001138 cnt--;
Alan Kwongb0679602016-11-27 17:04:13 -08001139 } while ((cnt > 0) && pending &&
1140 ((last_isr & REGDMA_INT_ERR_MASK) == 0));
Alan Kwong9487de22016-01-16 22:06:36 -05001141
Alan Kwongb0679602016-11-27 17:04:13 -08001142 if (last_isr & REGDMA_INT_ERR_MASK) {
1143 SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
1144 ctx->timestamp, swts, last_isr);
1145 sde_hw_rotator_dump_status(rot);
1146 status = ROT_ERROR_BIT;
1147 } else if (pending) {
1148 SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
1149 ctx->timestamp, swts, last_isr);
1150 sde_hw_rotator_dump_status(rot);
1151 status = ROT_ERROR_BIT;
1152 } else {
1153 status = 0;
1154 }
Alan Kwong9487de22016-01-16 22:06:36 -05001155
1156 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
Alan Kwongb0679602016-11-27 17:04:13 -08001157 last_isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001158 }
1159
1160 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1161
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001162 if (status & ROT_ERROR_BIT)
1163 SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus", "panic");
1164
Alan Kwong9487de22016-01-16 22:06:36 -05001165 return sts;
1166}
1167
1168/*
1169 * setup_rotator_ops - setup callback functions for the low-level HAL
1170 * @ops: Pointer to low-level ops callback
1171 * @mode: Operation mode (non-regdma or regdma)
1172 */
1173static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
1174 enum sde_rotator_regdma_mode mode)
1175{
1176 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
1177 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
1178 if (mode == ROT_REGDMA_ON) {
1179 ops->start_rotator = sde_hw_rotator_start_regdma;
1180 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
1181 } else {
1182 ops->start_rotator = sde_hw_rotator_start_no_regdma;
1183 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
1184 }
1185}
1186
1187/*
1188 * sde_hw_rotator_swts_create - create software timestamp buffer
1189 * @rot: Pointer to rotator hw
1190 *
1191 * This buffer is used by regdma to keep track of last completed command.
1192 */
1193static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
1194{
1195 int rc = 0;
1196 struct ion_handle *handle;
1197 struct sde_mdp_img_data *data;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001198 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001199 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
1200
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001201 rot->iclient = mdata->iclient;
Alan Kwong9487de22016-01-16 22:06:36 -05001202
1203 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
1204 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
1205 if (IS_ERR_OR_NULL(handle)) {
1206 SDEROT_ERR("ion memory allocation failed\n");
1207 return -ENOMEM;
1208 }
1209
1210 data = &rot->swts_buf;
1211 data->len = bufsize;
1212 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
1213 if (IS_ERR(data->srcp_dma_buf)) {
1214 SDEROT_ERR("ion_dma_buf setup failed\n");
1215 rc = -ENOMEM;
1216 goto imap_err;
1217 }
1218
1219 sde_smmu_ctrl(1);
1220
1221 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
1222 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
1223 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
1224 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
1225 rc = -ENOMEM;
1226 goto err_put;
1227 }
1228
1229 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
1230 DMA_BIDIRECTIONAL);
1231 if (IS_ERR_OR_NULL(data->srcp_table)) {
1232 SDEROT_ERR("dma_buf_map_attachment error\n");
1233 rc = -ENOMEM;
1234 goto err_detach;
1235 }
1236
1237 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
1238 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
1239 &data->len, DMA_BIDIRECTIONAL);
1240 if (IS_ERR_VALUE(rc)) {
1241 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
1242 goto err_unmap;
1243 }
1244
1245 dma_buf_begin_cpu_access(data->srcp_dma_buf, 0, data->len,
1246 DMA_FROM_DEVICE);
1247 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
1248 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
1249 SDEROT_ERR("ion kernel memory mapping failed\n");
1250 rc = IS_ERR(rot->swts_buffer);
1251 goto kmap_err;
1252 }
1253
1254 data->mapped = true;
1255 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
1256 data->len, rot->swts_buffer);
1257
1258 ion_free(rot->iclient, handle);
1259
1260 sde_smmu_ctrl(0);
1261
1262 return rc;
1263kmap_err:
1264 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1265 DMA_FROM_DEVICE, data->srcp_dma_buf);
1266err_unmap:
1267 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1268 DMA_FROM_DEVICE);
1269err_detach:
1270 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1271err_put:
1272 dma_buf_put(data->srcp_dma_buf);
1273 data->srcp_dma_buf = NULL;
1274imap_err:
1275 ion_free(rot->iclient, handle);
1276
1277 return rc;
1278}
1279
1280/*
1281 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
1282 * @rot: Pointer to rotator hw
1283 */
1284static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
1285{
1286 struct sde_mdp_img_data *data;
1287
1288 data = &rot->swts_buf;
1289
1290 dma_buf_end_cpu_access(data->srcp_dma_buf, 0, data->len,
1291 DMA_FROM_DEVICE);
1292 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
1293
1294 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1295 DMA_FROM_DEVICE, data->srcp_dma_buf);
1296 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1297 DMA_FROM_DEVICE);
1298 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1299 dma_buf_put(data->srcp_dma_buf);
1300 data->srcp_dma_buf = NULL;
1301}
1302
1303/*
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001304 * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
1305 * PM event occurs
1306 * @mgr: Pointer to rotator manager
1307 * @pmon: Boolean indicate an on/off power event
1308 */
1309void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1310{
1311 struct sde_hw_rotator *rot;
1312 u32 l_ts, h_ts, swts, hwts;
1313 u32 rotsts, regdmasts;
1314
1315 /*
1316 * Check last HW timestamp with SW timestamp before power off event.
1317 * If there is a mismatch, that will be quite possible the rotator HW
1318 * is either hang or not finishing last submitted job. In that case,
1319 * it is best to do a timeout eventlog to capture some good events
1320 * log data for analysis.
1321 */
1322 if (!pmon && mgr && mgr->hw_data) {
1323 rot = mgr->hw_data;
1324 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1325 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1326
1327 /* contruct the combined timstamp */
1328 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1329 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1330 SDE_REGDMA_SWTS_SHIFT);
1331
1332 /* Need to turn on clock to access rotator register */
1333 sde_rotator_clk_ctrl(mgr, true);
1334 hwts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1335 regdmasts = SDE_ROTREG_READ(rot->mdss_base,
1336 REGDMA_CSR_REGDMA_BLOCK_STATUS);
1337 rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1338
1339 SDEROT_DBG(
1340 "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1341 swts, hwts, regdmasts, rotsts);
1342 SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
1343
1344 if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
1345 (rotsts & ROT_STATUS_MASK))) {
1346 SDEROT_ERR(
1347 "Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1348 swts, hwts, regdmasts, rotsts);
1349 SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus",
1350 "panic");
1351 }
1352
1353 /* Turn off rotator clock after checking rotator registers */
1354 sde_rotator_clk_ctrl(mgr, false);
1355 }
1356}
1357
1358/*
1359 * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
1360 * PM event occurs
1361 * @mgr: Pointer to rotator manager
1362 * @pmon: Boolean indicate an on/off power event
1363 */
1364void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1365{
1366 struct sde_hw_rotator *rot;
1367 u32 l_ts, h_ts, swts;
1368
1369 /*
1370 * After a power on event, the rotator HW is reset to default setting.
1371 * It is necessary to synchronize the SW timestamp with the HW.
1372 */
1373 if (pmon && mgr && mgr->hw_data) {
1374 rot = mgr->hw_data;
1375 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1376 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1377
1378 /* contruct the combined timstamp */
1379 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1380 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1381 SDE_REGDMA_SWTS_SHIFT);
1382
1383 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
1384 swts, h_ts, l_ts);
1385 SDEROT_EVTLOG(swts, h_ts, l_ts);
1386 rot->reset_hw_ts = true;
1387 rot->last_hw_ts = swts;
1388 }
1389}
1390
1391/*
Alan Kwong9487de22016-01-16 22:06:36 -05001392 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
1393 * @mgr: Pointer to rotator manager
1394 */
1395static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
1396{
1397 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1398 struct sde_hw_rotator *rot;
1399
1400 if (!mgr || !mgr->pdev || !mgr->hw_data) {
1401 SDEROT_ERR("null parameters\n");
1402 return;
1403 }
1404
1405 rot = mgr->hw_data;
1406 if (rot->irq_num >= 0)
1407 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
1408
1409 if (rot->mode == ROT_REGDMA_ON)
1410 sde_hw_rotator_swtc_destroy(rot);
1411
1412 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
1413 mgr->hw_data = NULL;
1414}
1415
1416/*
1417 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
1418 * @mgr: Pointer to rotator manager
1419 * @pipe_id: pipe identifier (not used)
1420 * @wb_id: writeback identifier/priority queue identifier
1421 *
1422 * This function allocates a new hw rotator resource for the given priority.
1423 */
1424static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
1425 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
1426{
1427 struct sde_hw_rotator_resource_info *resinfo;
1428
1429 if (!mgr || !mgr->hw_data) {
1430 SDEROT_ERR("null parameters\n");
1431 return NULL;
1432 }
1433
1434 /*
1435 * Allocate rotator resource info. Each allocation is per
1436 * HW priority queue
1437 */
1438 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
1439 if (!resinfo) {
1440 SDEROT_ERR("Failed allocation HW rotator resource info\n");
1441 return NULL;
1442 }
1443
1444 resinfo->rot = mgr->hw_data;
1445 resinfo->hw.wb_id = wb_id;
1446 atomic_set(&resinfo->hw.num_active, 0);
1447 init_waitqueue_head(&resinfo->hw.wait_queue);
1448
1449 /* For non-regdma, only support one active session */
1450 if (resinfo->rot->mode == ROT_REGDMA_OFF)
1451 resinfo->hw.max_active = 1;
1452 else {
1453 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
1454
1455 if (resinfo->rot->iclient == NULL)
1456 sde_hw_rotator_swts_create(resinfo->rot);
1457 }
1458
Alan Kwongf987ea32016-07-06 12:11:44 -04001459 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001460 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04001461
Alan Kwong9487de22016-01-16 22:06:36 -05001462 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
1463 resinfo, wb_id);
1464
1465 return &resinfo->hw;
1466}
1467
1468/*
1469 * sde_hw_rotator_free_ext - free the given rotator resource
1470 * @mgr: Pointer to rotator manager
1471 * @hw: Pointer to rotator resource
1472 */
1473static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
1474 struct sde_rot_hw_resource *hw)
1475{
1476 struct sde_hw_rotator_resource_info *resinfo;
1477
1478 if (!mgr || !mgr->hw_data)
1479 return;
1480
1481 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1482
1483 SDEROT_DBG(
1484 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
1485 resinfo, hw->wb_id, atomic_read(&hw->num_active),
1486 hw->pending_count);
1487
Alan Kwongf987ea32016-07-06 12:11:44 -04001488 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001489 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04001490
Alan Kwong9487de22016-01-16 22:06:36 -05001491 devm_kfree(&mgr->pdev->dev, resinfo);
1492}
1493
1494/*
1495 * sde_hw_rotator_alloc_rotctx - allocate rotator context
1496 * @rot: Pointer to rotator hw
1497 * @hw: Pointer to rotator resource
1498 * @session_id: Session identifier of this context
1499 *
1500 * This function allocates a new rotator context for the given session id.
1501 */
1502static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
1503 struct sde_hw_rotator *rot,
1504 struct sde_rot_hw_resource *hw,
1505 u32 session_id)
1506{
1507 struct sde_hw_rotator_context *ctx;
1508
1509 /* Allocate rotator context */
1510 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1511 if (!ctx) {
1512 SDEROT_ERR("Failed allocation HW rotator context\n");
1513 return NULL;
1514 }
1515
1516 ctx->rot = rot;
1517 ctx->q_id = hw->wb_id;
1518 ctx->session_id = session_id;
1519 ctx->hwres = hw;
1520 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
1521 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
1522 ctx->is_secure = false;
1523
1524 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
1525 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
1526 ctx->regdma_wrptr = ctx->regdma_base;
1527 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
1528 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
1529 sde_hw_rotator_get_regdma_ctxidx(ctx));
1530
Alan Kwong818b7fc2016-07-24 22:07:41 -04001531 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
1532
Alan Kwong9487de22016-01-16 22:06:36 -05001533 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001534 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001535
1536 /* Store rotator context for lookup purpose */
1537 sde_hw_rotator_put_ctx(ctx);
1538
1539 SDEROT_DBG(
1540 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1541 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1542 ctx->q_id, ctx->timestamp,
1543 atomic_read(&ctx->hwres->num_active));
1544
1545 return ctx;
1546}
1547
1548/*
1549 * sde_hw_rotator_free_rotctx - free the given rotator context
1550 * @rot: Pointer to rotator hw
1551 * @ctx: Pointer to rotator context
1552 */
1553static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
1554 struct sde_hw_rotator_context *ctx)
1555{
1556 if (!rot || !ctx)
1557 return;
1558
1559 SDEROT_DBG(
1560 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1561 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1562 ctx->q_id, ctx->timestamp,
1563 atomic_read(&ctx->hwres->num_active));
1564
Benjamin Chanc3e185f2016-11-08 21:48:21 -05001565 /* Clear rotator context from lookup purpose */
1566 sde_hw_rotator_clr_ctx(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05001567
1568 devm_kfree(&rot->pdev->dev, ctx);
1569}
1570
1571/*
1572 * sde_hw_rotator_config - configure hw for the given rotation entry
1573 * @hw: Pointer to rotator resource
1574 * @entry: Pointer to rotation entry
1575 *
1576 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
1577 * based on the given rotation entry.
1578 */
1579static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
1580 struct sde_rot_entry *entry)
1581{
1582 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1583 struct sde_hw_rotator *rot;
1584 struct sde_hw_rotator_resource_info *resinfo;
1585 struct sde_hw_rotator_context *ctx;
1586 struct sde_hw_rot_sspp_cfg sspp_cfg;
1587 struct sde_hw_rot_wb_cfg wb_cfg;
1588 u32 danger_lut = 0; /* applicable for realtime client only */
1589 u32 safe_lut = 0; /* applicable for realtime client only */
1590 u32 flags = 0;
1591 struct sde_rotation_item *item;
1592
1593 if (!hw || !entry) {
1594 SDEROT_ERR("null hw resource/entry\n");
1595 return -EINVAL;
1596 }
1597
1598 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1599 rot = resinfo->rot;
1600 item = &entry->item;
1601
1602 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id);
1603 if (!ctx) {
1604 SDEROT_ERR("Failed allocating rotator context!!\n");
1605 return -EINVAL;
1606 }
1607
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001608 if (rot->reset_hw_ts) {
1609 SDEROT_EVTLOG(rot->last_hw_ts);
1610 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
1611 rot->last_hw_ts);
1612 /* ensure write is issued to the rotator HW */
1613 wmb();
1614 rot->reset_hw_ts = false;
1615 }
1616
Alan Kwong9487de22016-01-16 22:06:36 -05001617 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
1618 SDE_ROT_FLAG_FLIP_LR : 0;
1619 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
1620 SDE_ROT_FLAG_FLIP_UD : 0;
1621 flags |= (item->flags & SDE_ROTATION_90) ?
1622 SDE_ROT_FLAG_ROT_90 : 0;
1623 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
1624 SDE_ROT_FLAG_DEINTERLACE : 0;
1625 flags |= (item->flags & SDE_ROTATION_SECURE) ?
1626 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001627 flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
1628 SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
1629
Alan Kwong9487de22016-01-16 22:06:36 -05001630
1631 sspp_cfg.img_width = item->input.width;
1632 sspp_cfg.img_height = item->input.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001633 sspp_cfg.fps = entry->perf->config.frame_rate;
1634 sspp_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05001635 sspp_cfg.fmt = sde_get_format_params(item->input.format);
1636 if (!sspp_cfg.fmt) {
1637 SDEROT_ERR("null format\n");
1638 return -EINVAL;
1639 }
1640 sspp_cfg.src_rect = &item->src_rect;
1641 sspp_cfg.data = &entry->src_buf;
1642 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
1643 item->input.height, &sspp_cfg.src_plane,
1644 0, /* No bwc_mode */
1645 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
1646 true : false);
1647
1648 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001649 &sspp_cfg, danger_lut, safe_lut,
1650 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05001651
1652 wb_cfg.img_width = item->output.width;
1653 wb_cfg.img_height = item->output.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001654 wb_cfg.fps = entry->perf->config.frame_rate;
1655 wb_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05001656 wb_cfg.fmt = sde_get_format_params(item->output.format);
1657 wb_cfg.dst_rect = &item->dst_rect;
1658 wb_cfg.data = &entry->dst_buf;
1659 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
1660 item->output.height, &wb_cfg.dst_plane,
1661 0, /* No bwc_mode */
1662 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
1663
1664 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
1665 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
1666
1667 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
1668
1669 /* setup VA mapping for debugfs */
1670 if (rot->dbgmem) {
1671 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
1672 &item->input,
1673 &entry->src_buf);
1674
1675 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
1676 &item->output,
1677 &entry->dst_buf);
1678 }
1679
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001680 SDEROT_EVTLOG(ctx->timestamp, flags,
1681 item->input.width, item->input.height,
Benjamin Chan53e3bce2016-08-31 14:43:29 -04001682 item->output.width, item->output.height,
1683 entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr);
1684
Alan Kwong9487de22016-01-16 22:06:36 -05001685 if (mdata->default_ot_rd_limit) {
1686 struct sde_mdp_set_ot_params ot_params;
1687
1688 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1689 ot_params.xin_id = XIN_SSPP;
1690 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001691 ot_params.width = entry->perf->config.input.width;
1692 ot_params.height = entry->perf->config.input.height;
1693 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001694 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
1695 ot_params.reg_off_mdp_clk_ctrl =
1696 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1697 ot_params.bit_off_mdp_clk_ctrl =
1698 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001699 ot_params.fmt = ctx->is_traffic_shaping ?
1700 SDE_PIX_FMT_ABGR_8888 :
1701 entry->perf->config.input.format;
Alan Kwong9487de22016-01-16 22:06:36 -05001702 sde_mdp_set_ot_limit(&ot_params);
1703 }
1704
1705 if (mdata->default_ot_wr_limit) {
1706 struct sde_mdp_set_ot_params ot_params;
1707
1708 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1709 ot_params.xin_id = XIN_WRITEBACK;
1710 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001711 ot_params.width = entry->perf->config.input.width;
1712 ot_params.height = entry->perf->config.input.height;
1713 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001714 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
1715 ot_params.reg_off_mdp_clk_ctrl =
1716 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1717 ot_params.bit_off_mdp_clk_ctrl =
1718 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001719 ot_params.fmt = ctx->is_traffic_shaping ?
1720 SDE_PIX_FMT_ABGR_8888 :
1721 entry->perf->config.input.format;
Alan Kwong9487de22016-01-16 22:06:36 -05001722 sde_mdp_set_ot_limit(&ot_params);
1723 }
1724
1725 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
1726 u32 qos_lut = 0; /* low priority for nrt read client */
1727
1728 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
1729 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
1730
1731 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
1732 }
1733
1734 if (mdata->npriority_lvl > 0) {
1735 u32 mask, reg_val, i, vbif_qos;
1736
1737 for (i = 0; i < mdata->npriority_lvl; i++) {
1738 reg_val = SDE_VBIF_READ(mdata,
1739 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
1740 mask = 0x3 << (XIN_SSPP * 2);
1741 reg_val &= ~(mask);
1742 vbif_qos = mdata->vbif_nrt_qos[i];
1743 reg_val |= vbif_qos << (XIN_SSPP * 2);
1744 /* ensure write is issued after the read operation */
1745 mb();
1746 SDE_VBIF_WRITE(mdata,
1747 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
1748 reg_val);
1749 }
1750 }
1751
1752 /* Enable write gather for writeback to remove write gaps, which
1753 * may hang AXI/BIMC/SDE.
1754 */
1755 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
1756 BIT(XIN_WRITEBACK));
1757
1758 return 0;
1759}
1760
1761/*
1762 * sde_hw_rotator_kickoff - kickoff processing on the given entry
1763 * @hw: Pointer to rotator resource
1764 * @entry: Pointer to rotation entry
1765 */
1766static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
1767 struct sde_rot_entry *entry)
1768{
1769 struct sde_hw_rotator *rot;
1770 struct sde_hw_rotator_resource_info *resinfo;
1771 struct sde_hw_rotator_context *ctx;
1772 int ret = 0;
1773
1774 if (!hw || !entry) {
1775 SDEROT_ERR("null hw resource/entry\n");
1776 return -EINVAL;
1777 }
1778
1779 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1780 rot = resinfo->rot;
1781
1782 /* Lookup rotator context from session-id */
1783 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1784 if (!ctx) {
1785 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1786 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001787 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05001788 }
Alan Kwong9487de22016-01-16 22:06:36 -05001789
1790 ret = sde_smmu_ctrl(1);
1791 if (IS_ERR_VALUE(ret)) {
1792 SDEROT_ERR("IOMMU attach failed\n");
1793 return ret;
1794 }
1795
1796 rot->ops.start_rotator(ctx, ctx->q_id);
1797
1798 return 0;
1799}
1800
1801/*
1802 * sde_hw_rotator_wait4done - wait for completion notification
1803 * @hw: Pointer to rotator resource
1804 * @entry: Pointer to rotation entry
1805 *
1806 * This function blocks until the given entry is complete, error
1807 * is detected, or timeout.
1808 */
1809static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
1810 struct sde_rot_entry *entry)
1811{
1812 struct sde_hw_rotator *rot;
1813 struct sde_hw_rotator_resource_info *resinfo;
1814 struct sde_hw_rotator_context *ctx;
1815 int ret;
1816
1817 if (!hw || !entry) {
1818 SDEROT_ERR("null hw resource/entry\n");
1819 return -EINVAL;
1820 }
1821
1822 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1823 rot = resinfo->rot;
1824
1825 /* Lookup rotator context from session-id */
1826 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1827 if (!ctx) {
1828 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1829 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001830 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05001831 }
Alan Kwong9487de22016-01-16 22:06:36 -05001832
1833 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
1834
1835 sde_smmu_ctrl(0);
1836
1837 if (rot->dbgmem) {
1838 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
1839 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
1840 }
1841
1842 /* Current rotator context job is finished, time to free up*/
1843 sde_hw_rotator_free_rotctx(rot, ctx);
1844
1845 return ret;
1846}
1847
1848/*
1849 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
1850 * @rot: Pointer to hw rotator
1851 *
1852 * This function initializes feature and/or capability bitmask based on
1853 * h/w version read from the device.
1854 */
1855static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
1856{
1857 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1858 u32 hw_version;
1859
1860 if (!mdata) {
1861 SDEROT_ERR("null rotator data\n");
1862 return -EINVAL;
1863 }
1864
1865 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
1866 SDEROT_DBG("hw version %8.8x\n", hw_version);
1867
1868 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
1869 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
1870 clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
1871 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
1872 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
1873 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
1874
1875 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
1876
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001877 if (hw_version != SDE_ROT_TYPE_V1_0) {
1878 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
1879 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
1880 }
1881
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001882 set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
1883
Benjamin Chan53e3bce2016-08-31 14:43:29 -04001884 mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
1885 mdata->nrt_vbif_dbg_bus_size =
1886 ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
1887
1888 mdata->regdump = sde_rot_r3_regdump;
1889 mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001890 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001891 return 0;
1892}
1893
1894/*
1895 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
1896 * @irq: Interrupt number
1897 * @ptr: Pointer to private handle provided during registration
1898 *
1899 * This function services rotator interrupt and wakes up waiting client
1900 * with pending rotation requests already submitted to h/w.
1901 */
1902static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
1903{
1904 struct sde_hw_rotator *rot = ptr;
1905 struct sde_hw_rotator_context *ctx;
1906 irqreturn_t ret = IRQ_NONE;
1907 u32 isr;
1908
1909 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
1910
1911 SDEROT_DBG("intr_status = %8.8x\n", isr);
1912
1913 if (isr & ROT_DONE_MASK) {
1914 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001915 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001916 SDEROT_DBG("Notify rotator complete\n");
1917
1918 /* Normal rotator only 1 session, no need to lookup */
1919 ctx = rot->rotCtx[0][0];
1920 WARN_ON(ctx == NULL);
1921 complete_all(&ctx->rot_comp);
1922
1923 spin_lock(&rot->rotisr_lock);
1924 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1925 ROT_DONE_CLEAR);
1926 spin_unlock(&rot->rotisr_lock);
1927 ret = IRQ_HANDLED;
1928 }
1929
1930 return ret;
1931}
1932
1933/*
1934 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
1935 * @irq: Interrupt number
1936 * @ptr: Pointer to private handle provided during registration
1937 *
1938 * This function services rotator interrupt, decoding the source of
1939 * events (high/low priority queue), and wakes up all waiting clients
1940 * with pending rotation requests already submitted to h/w.
1941 */
1942static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
1943{
1944 struct sde_hw_rotator *rot = ptr;
1945 struct sde_hw_rotator_context *ctx;
1946 irqreturn_t ret = IRQ_NONE;
1947 u32 isr;
1948 u32 ts;
1949 u32 q_id;
1950
1951 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001952 /* acknowledge interrupt before reading latest timestamp */
1953 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001954 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1955
1956 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
1957
1958 /* Any REGDMA status, including error and watchdog timer, should
1959 * trigger and wake up waiting thread
1960 */
1961 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
1962 spin_lock(&rot->rotisr_lock);
1963
1964 /*
1965 * Obtain rotator context based on timestamp from regdma
1966 * and low/high interrupt status
1967 */
1968 if (isr & REGDMA_INT_HIGH_MASK) {
1969 q_id = ROT_QUEUE_HIGH_PRIORITY;
1970 ts = ts & SDE_REGDMA_SWTS_MASK;
1971 } else if (isr & REGDMA_INT_LOW_MASK) {
1972 q_id = ROT_QUEUE_LOW_PRIORITY;
1973 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
1974 SDE_REGDMA_SWTS_MASK;
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001975 } else {
1976 SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
1977 goto done_isr_handle;
Alan Kwong9487de22016-01-16 22:06:36 -05001978 }
Alan Kwong9487de22016-01-16 22:06:36 -05001979 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05001980
1981 /*
1982 * Wake up all waiting context from the current and previous
1983 * SW Timestamp.
1984 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04001985 while (ctx &&
1986 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05001987 ctx->last_regdma_isr_status = isr;
1988 ctx->last_regdma_timestamp = ts;
1989 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04001990 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001991 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001992
1993 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
1994 ctx = rot->rotCtx[q_id]
1995 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04001996 };
Alan Kwong9487de22016-01-16 22:06:36 -05001997
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001998done_isr_handle:
Alan Kwong9487de22016-01-16 22:06:36 -05001999 spin_unlock(&rot->rotisr_lock);
2000 ret = IRQ_HANDLED;
2001 } else if (isr & REGDMA_INT_ERR_MASK) {
2002 /*
2003 * For REGDMA Err, we save the isr info and wake up
2004 * all waiting contexts
2005 */
2006 int i, j;
2007
2008 SDEROT_ERR(
2009 "regdma err isr:%X, wake up all waiting contexts\n",
2010 isr);
2011
2012 spin_lock(&rot->rotisr_lock);
2013
2014 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2015 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
2016 ctx = rot->rotCtx[i][j];
2017 if (ctx && ctx->last_regdma_isr_status == 0) {
2018 ctx->last_regdma_isr_status = isr;
2019 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04002020 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002021 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
2022 i, j, ctx);
2023 }
2024 }
2025 }
2026
Alan Kwong9487de22016-01-16 22:06:36 -05002027 spin_unlock(&rot->rotisr_lock);
2028 ret = IRQ_HANDLED;
2029 }
2030
2031 return ret;
2032}
2033
2034/*
2035 * sde_hw_rotator_validate_entry - validate rotation entry
2036 * @mgr: Pointer to rotator manager
2037 * @entry: Pointer to rotation entry
2038 *
2039 * This function validates the given rotation entry and provides possible
2040 * fixup (future improvement) if available. This function returns 0 if
2041 * the entry is valid, and returns error code otherwise.
2042 */
2043static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
2044 struct sde_rot_entry *entry)
2045{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002046 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002047 int ret = 0;
2048 u16 src_w, src_h, dst_w, dst_h;
2049 struct sde_rotation_item *item = &entry->item;
2050 struct sde_mdp_format_params *fmt;
2051
2052 src_w = item->src_rect.w;
2053 src_h = item->src_rect.h;
2054
2055 if (item->flags & SDE_ROTATION_90) {
2056 dst_w = item->dst_rect.h;
2057 dst_h = item->dst_rect.w;
2058 } else {
2059 dst_w = item->dst_rect.w;
2060 dst_h = item->dst_rect.h;
2061 }
2062
2063 entry->dnsc_factor_w = 0;
2064 entry->dnsc_factor_h = 0;
2065
2066 if ((src_w != dst_w) || (src_h != dst_h)) {
2067 if ((src_w % dst_w) || (src_h % dst_h)) {
2068 SDEROT_DBG("non integral scale not support\n");
2069 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002070 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05002071 }
2072 entry->dnsc_factor_w = src_w / dst_w;
2073 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
2074 (entry->dnsc_factor_w > 64)) {
2075 SDEROT_DBG("non power-of-2 w_scale not support\n");
2076 ret = -EINVAL;
2077 goto dnsc_err;
2078 }
2079 entry->dnsc_factor_h = src_h / dst_h;
2080 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
2081 (entry->dnsc_factor_h > 64)) {
2082 SDEROT_DBG("non power-of-2 h_scale not support\n");
2083 ret = -EINVAL;
2084 goto dnsc_err;
2085 }
2086 }
2087
Benjamin Chan886ff672016-11-07 15:23:17 -05002088 fmt = sde_get_format_params(item->input.format);
2089 /*
2090 * Rotator downscale support max 4 times for UBWC format and
2091 * max 2 times for TP10/TP10_UBWC format
2092 */
2093 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
2094 SDEROT_DBG("max downscale for UBWC format is 4\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002095 ret = -EINVAL;
2096 goto dnsc_err;
2097 }
Benjamin Chan886ff672016-11-07 15:23:17 -05002098 if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
2099 SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002100 ret = -EINVAL;
2101 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002102 goto dnsc_err;
2103
2104dnsc_1p5_check:
2105 /* Check for 1.5 downscale that only applies to V2 HW */
2106 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
2107 entry->dnsc_factor_w = src_w / dst_w;
2108 if ((entry->dnsc_factor_w != 1) ||
2109 ((dst_w * 3) != (src_w * 2))) {
2110 SDEROT_DBG(
2111 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
2112 src_w, dst_w);
2113 ret = -EINVAL;
2114 goto dnsc_err;
2115 }
2116
2117 entry->dnsc_factor_h = src_h / dst_h;
2118 if ((entry->dnsc_factor_h != 1) ||
2119 ((dst_h * 3) != (src_h * 2))) {
2120 SDEROT_DBG(
2121 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
2122 src_h, dst_h);
2123 ret = -EINVAL;
2124 goto dnsc_err;
2125 }
2126 ret = 0;
2127 }
Alan Kwong9487de22016-01-16 22:06:36 -05002128
2129dnsc_err:
2130 /* Downscaler does not support asymmetrical dnsc */
2131 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
2132 SDEROT_DBG("asymmetric downscale not support\n");
2133 ret = -EINVAL;
2134 }
2135
2136 if (ret) {
2137 entry->dnsc_factor_w = 0;
2138 entry->dnsc_factor_h = 0;
2139 }
2140 return ret;
2141}
2142
2143/*
2144 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
2145 * @mgr: Pointer to rotator manager
2146 * @attr: Pointer to device attribute interface
2147 * @buf: Pointer to output buffer
2148 * @len: Length of output buffer
2149 */
2150static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
2151 struct device_attribute *attr, char *buf, ssize_t len)
2152{
2153 struct sde_hw_rotator *hw_data;
Benjamin Chan886ff672016-11-07 15:23:17 -05002154 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002155 int cnt = 0;
2156
2157 if (!mgr || !buf)
2158 return 0;
2159
2160 hw_data = mgr->hw_data;
2161
2162#define SPRINT(fmt, ...) \
2163 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2164
2165 /* insert capabilities here */
Benjamin Chan886ff672016-11-07 15:23:17 -05002166 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
2167 SPRINT("min_downscale=1.5\n");
2168 else
2169 SPRINT("min_downscale=2.0\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002170
Benjamin Chan42db2c92016-11-22 22:50:01 -05002171 SPRINT("downscale_compression=1\n");
2172
Alan Kwong9487de22016-01-16 22:06:36 -05002173#undef SPRINT
2174 return cnt;
2175}
2176
2177/*
2178 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
2179 * @mgr: Pointer to rotator manager
2180 * @attr: Pointer to device attribute interface
2181 * @buf: Pointer to output buffer
2182 * @len: Length of output buffer
2183 */
2184static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
2185 struct device_attribute *attr, char *buf, ssize_t len)
2186{
2187 struct sde_hw_rotator *rot;
2188 struct sde_hw_rotator_context *ctx;
2189 int cnt = 0;
2190 int num_active = 0;
2191 int i, j;
2192
2193 if (!mgr || !buf) {
2194 SDEROT_ERR("null parameters\n");
2195 return 0;
2196 }
2197
2198 rot = mgr->hw_data;
2199
2200#define SPRINT(fmt, ...) \
2201 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2202
2203 if (rot) {
2204 SPRINT("rot_mode=%d\n", rot->mode);
2205 SPRINT("irq_num=%d\n", rot->irq_num);
2206
2207 if (rot->mode == ROT_REGDMA_OFF) {
2208 SPRINT("max_active=1\n");
2209 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
2210 } else {
2211 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2212 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
2213 j++) {
2214 ctx = rot->rotCtx[i][j];
2215
2216 if (ctx) {
2217 SPRINT(
2218 "rotCtx[%d][%d]:%p\n",
2219 i, j, ctx);
2220 ++num_active;
2221 }
2222 }
2223 }
2224
2225 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
2226 SPRINT("num_active=%d\n", num_active);
2227 }
2228 }
2229
2230#undef SPRINT
2231 return cnt;
2232}
2233
2234/*
Alan Kwongda16e442016-08-14 20:47:18 -04002235 * sde_hw_rotator_get_pixfmt - get the indexed pixel format
2236 * @mgr: Pointer to rotator manager
2237 * @index: index of pixel format
2238 * @input: true for input port; false for output port
2239 */
2240static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
2241 int index, bool input)
2242{
2243 if (input) {
2244 if (index < ARRAY_SIZE(sde_hw_rotator_input_pixfmts))
2245 return sde_hw_rotator_input_pixfmts[index];
2246 else
2247 return 0;
2248 } else {
2249 if (index < ARRAY_SIZE(sde_hw_rotator_output_pixfmts))
2250 return sde_hw_rotator_output_pixfmts[index];
2251 else
2252 return 0;
2253 }
2254}
2255
2256/*
2257 * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
2258 * @mgr: Pointer to rotator manager
2259 * @pixfmt: pixel format to be verified
2260 * @input: true for input port; false for output port
2261 */
2262static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
2263 bool input)
2264{
2265 int i;
2266
2267 if (input) {
2268 for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_input_pixfmts); i++)
2269 if (sde_hw_rotator_input_pixfmts[i] == pixfmt)
2270 return true;
2271 } else {
2272 for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_output_pixfmts); i++)
2273 if (sde_hw_rotator_output_pixfmts[i] == pixfmt)
2274 return true;
2275 }
2276
2277 return false;
2278}
2279
2280/*
Alan Kwong9487de22016-01-16 22:06:36 -05002281 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
2282 * @hw_data: Pointer to rotator hw
2283 * @dev: Pointer to platform device
2284 */
2285static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
2286 struct platform_device *dev)
2287{
2288 int ret = 0;
2289 u32 data;
2290
2291 if (!hw_data || !dev)
2292 return -EINVAL;
2293
2294 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
2295 &data);
2296 if (ret) {
2297 SDEROT_DBG("default to regdma off\n");
2298 ret = 0;
2299 hw_data->mode = ROT_REGDMA_OFF;
2300 } else if (data < ROT_REGDMA_MAX) {
2301 SDEROT_DBG("set to regdma mode %d\n", data);
2302 hw_data->mode = data;
2303 } else {
2304 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
2305 hw_data->mode = ROT_REGDMA_OFF;
2306 }
2307
2308 ret = of_property_read_u32(dev->dev.of_node,
2309 "qcom,mdss-highest-bank-bit", &data);
2310 if (ret) {
2311 SDEROT_DBG("default to A5X bank\n");
2312 ret = 0;
2313 hw_data->highest_bank = 2;
2314 } else {
2315 SDEROT_DBG("set highest bank bit to %d\n", data);
2316 hw_data->highest_bank = data;
2317 }
2318
2319 return ret;
2320}
2321
2322/*
2323 * sde_rotator_r3_init - initialize the r3 module
2324 * @mgr: Pointer to rotator manager
2325 *
2326 * This function setup r3 callback functions, parses r3 specific
2327 * device tree settings, installs r3 specific interrupt handler,
2328 * as well as initializes r3 internal data structure.
2329 */
2330int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
2331{
2332 struct sde_hw_rotator *rot;
2333 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2334 int i;
2335 int ret;
2336
2337 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
2338 if (!rot)
2339 return -ENOMEM;
2340
2341 mgr->hw_data = rot;
2342 mgr->queue_count = ROT_QUEUE_MAX;
2343
2344 rot->mdss_base = mdata->sde_io.base;
2345 rot->pdev = mgr->pdev;
2346
2347 /* Assign ops */
2348 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
2349 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
2350 mgr->ops_hw_free = sde_hw_rotator_free_ext;
2351 mgr->ops_config_hw = sde_hw_rotator_config;
2352 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
2353 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
2354 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
2355 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
2356 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
2357 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
Alan Kwongda16e442016-08-14 20:47:18 -04002358 mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
2359 mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002360 mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
2361 mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
Alan Kwong9487de22016-01-16 22:06:36 -05002362
2363 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
2364 if (ret)
2365 goto error_parse_dt;
2366
2367 rot->irq_num = platform_get_irq(mgr->pdev, 0);
2368 if (rot->irq_num < 0) {
2369 SDEROT_ERR("fail to get rotator irq\n");
2370 } else {
2371 if (rot->mode == ROT_REGDMA_OFF)
2372 ret = devm_request_threaded_irq(&mgr->pdev->dev,
2373 rot->irq_num,
2374 sde_hw_rotator_rotirq_handler,
2375 NULL, 0, "sde_rotator_r3", rot);
2376 else
2377 ret = devm_request_threaded_irq(&mgr->pdev->dev,
2378 rot->irq_num,
2379 sde_hw_rotator_regdmairq_handler,
2380 NULL, 0, "sde_rotator_r3", rot);
2381 if (ret) {
2382 SDEROT_ERR("fail to request irq r:%d\n", ret);
2383 rot->irq_num = -1;
2384 } else {
2385 disable_irq(rot->irq_num);
2386 }
2387 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04002388 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05002389
2390 setup_rotator_ops(&rot->ops, rot->mode);
2391
2392 spin_lock_init(&rot->rotctx_lock);
2393 spin_lock_init(&rot->rotisr_lock);
2394
2395 /* REGDMA initialization */
2396 if (rot->mode == ROT_REGDMA_OFF) {
2397 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2398 rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
2399 SDE_HW_ROT_REGDMA_SEG_SIZE * i];
2400 } else {
2401 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2402 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
2403 (u32 *)(rot->mdss_base +
2404 REGDMA_RAM_REGDMA_CMD_RAM +
2405 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
2406
2407 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2408 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
2409 (u32 *)(rot->mdss_base +
2410 REGDMA_RAM_REGDMA_CMD_RAM +
2411 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
2412 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
2413 }
2414
2415 atomic_set(&rot->timestamp[0], 0);
2416 atomic_set(&rot->timestamp[1], 0);
Alan Kwong9487de22016-01-16 22:06:36 -05002417
2418 ret = sde_rotator_hw_rev_init(rot);
2419 if (ret)
2420 goto error_hw_rev_init;
2421
Alan Kwong315cd772016-08-03 22:29:42 -04002422 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
Benjamin Chan77aed192016-10-17 17:49:41 -04002423 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04002424 CLKFLAG_NORETAIN_MEM);
Benjamin Chan77aed192016-10-17 17:49:41 -04002425 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04002426 CLKFLAG_NORETAIN_PERIPH);
2427
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002428 mdata->sde_rot_hw = rot;
Alan Kwong9487de22016-01-16 22:06:36 -05002429 return 0;
2430error_hw_rev_init:
2431 if (rot->irq_num >= 0)
2432 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2433 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2434error_parse_dt:
2435 return ret;
2436}