blob: 0cc8c7cded4192f20500a53c83c1509671071ee6 [file] [log] [blame]
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Alan Kwong9487de22016-01-16 22:06:36 -05002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
20#include <linux/sync.h>
21#include <linux/delay.h>
22#include <linux/debugfs.h>
23#include <linux/interrupt.h>
24#include <linux/dma-mapping.h>
25#include <linux/dma-buf.h>
26#include <linux/msm_ion.h>
Alan Kwong315cd772016-08-03 22:29:42 -040027#include <linux/clk/msm-clk.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
Benjamin Chan53e3bce2016-08-31 14:43:29 -040037#include "sde_rotator_debug.h"
Alan Kwong9487de22016-01-16 22:06:36 -050038
Benjamin Chan99eb63b2016-12-21 15:45:26 -050039#define RES_UHD (3840*2160)
40
41/* traffic shaping clock ticks = finish_time x 19.2MHz */
42#define TRAFFIC_SHAPE_CLKTICK_14MS 268800
43#define TRAFFIC_SHAPE_CLKTICK_12MS 230400
44
Alan Kwong9487de22016-01-16 22:06:36 -050045/* XIN mapping */
46#define XIN_SSPP 0
47#define XIN_WRITEBACK 1
48
49/* wait for at most 2 vsync for lowest refresh rate (24hz) */
50#define KOFF_TIMEOUT msecs_to_jiffies(42 * 32)
51
52/* Macro for constructing the REGDMA command */
53#define SDE_REGDMA_WRITE(p, off, data) \
54 do { \
55 *p++ = REGDMA_OP_REGWRITE | \
56 ((off) & REGDMA_ADDR_OFFSET_MASK); \
57 *p++ = (data); \
58 } while (0)
59
60#define SDE_REGDMA_MODIFY(p, off, mask, data) \
61 do { \
62 *p++ = REGDMA_OP_REGMODIFY | \
63 ((off) & REGDMA_ADDR_OFFSET_MASK); \
64 *p++ = (mask); \
65 *p++ = (data); \
66 } while (0)
67
68#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
69 do { \
70 *p++ = REGDMA_OP_BLKWRITE_INC | \
71 ((off) & REGDMA_ADDR_OFFSET_MASK); \
72 *p++ = (len); \
73 } while (0)
74
75#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
76 do { \
77 *(p) = (data); \
78 (p)++; \
79 } while (0)
80
81/* Macro for directly accessing mapped registers */
82#define SDE_ROTREG_WRITE(base, off, data) \
83 writel_relaxed(data, (base + (off)))
84
85#define SDE_ROTREG_READ(base, off) \
86 readl_relaxed(base + (off))
87
Alan Kwongda16e442016-08-14 20:47:18 -040088static u32 sde_hw_rotator_input_pixfmts[] = {
89 SDE_PIX_FMT_XRGB_8888,
90 SDE_PIX_FMT_ARGB_8888,
91 SDE_PIX_FMT_ABGR_8888,
92 SDE_PIX_FMT_RGBA_8888,
93 SDE_PIX_FMT_BGRA_8888,
94 SDE_PIX_FMT_RGBX_8888,
95 SDE_PIX_FMT_BGRX_8888,
96 SDE_PIX_FMT_XBGR_8888,
97 SDE_PIX_FMT_RGBA_5551,
98 SDE_PIX_FMT_ARGB_1555,
99 SDE_PIX_FMT_ABGR_1555,
100 SDE_PIX_FMT_BGRA_5551,
101 SDE_PIX_FMT_BGRX_5551,
102 SDE_PIX_FMT_RGBX_5551,
103 SDE_PIX_FMT_XBGR_1555,
104 SDE_PIX_FMT_XRGB_1555,
105 SDE_PIX_FMT_ARGB_4444,
106 SDE_PIX_FMT_RGBA_4444,
107 SDE_PIX_FMT_BGRA_4444,
108 SDE_PIX_FMT_ABGR_4444,
109 SDE_PIX_FMT_RGBX_4444,
110 SDE_PIX_FMT_XRGB_4444,
111 SDE_PIX_FMT_BGRX_4444,
112 SDE_PIX_FMT_XBGR_4444,
113 SDE_PIX_FMT_RGB_888,
114 SDE_PIX_FMT_BGR_888,
115 SDE_PIX_FMT_RGB_565,
116 SDE_PIX_FMT_BGR_565,
117 SDE_PIX_FMT_Y_CB_CR_H2V2,
118 SDE_PIX_FMT_Y_CR_CB_H2V2,
119 SDE_PIX_FMT_Y_CR_CB_GH2V2,
120 SDE_PIX_FMT_Y_CBCR_H2V2,
121 SDE_PIX_FMT_Y_CRCB_H2V2,
122 SDE_PIX_FMT_Y_CBCR_H1V2,
123 SDE_PIX_FMT_Y_CRCB_H1V2,
124 SDE_PIX_FMT_Y_CBCR_H2V1,
125 SDE_PIX_FMT_Y_CRCB_H2V1,
126 SDE_PIX_FMT_YCBYCR_H2V1,
127 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
128 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
129 SDE_PIX_FMT_RGBA_8888_UBWC,
130 SDE_PIX_FMT_RGBX_8888_UBWC,
131 SDE_PIX_FMT_RGB_565_UBWC,
132 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
133 SDE_PIX_FMT_RGBA_1010102,
134 SDE_PIX_FMT_RGBX_1010102,
135 SDE_PIX_FMT_ARGB_2101010,
136 SDE_PIX_FMT_XRGB_2101010,
137 SDE_PIX_FMT_BGRA_1010102,
138 SDE_PIX_FMT_BGRX_1010102,
139 SDE_PIX_FMT_ABGR_2101010,
140 SDE_PIX_FMT_XBGR_2101010,
141 SDE_PIX_FMT_RGBA_1010102_UBWC,
142 SDE_PIX_FMT_RGBX_1010102_UBWC,
143 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
144 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
145 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
146};
147
148static u32 sde_hw_rotator_output_pixfmts[] = {
149 SDE_PIX_FMT_XRGB_8888,
150 SDE_PIX_FMT_ARGB_8888,
151 SDE_PIX_FMT_ABGR_8888,
152 SDE_PIX_FMT_RGBA_8888,
153 SDE_PIX_FMT_BGRA_8888,
154 SDE_PIX_FMT_RGBX_8888,
155 SDE_PIX_FMT_BGRX_8888,
156 SDE_PIX_FMT_XBGR_8888,
157 SDE_PIX_FMT_RGBA_5551,
158 SDE_PIX_FMT_ARGB_1555,
159 SDE_PIX_FMT_ABGR_1555,
160 SDE_PIX_FMT_BGRA_5551,
161 SDE_PIX_FMT_BGRX_5551,
162 SDE_PIX_FMT_RGBX_5551,
163 SDE_PIX_FMT_XBGR_1555,
164 SDE_PIX_FMT_XRGB_1555,
165 SDE_PIX_FMT_ARGB_4444,
166 SDE_PIX_FMT_RGBA_4444,
167 SDE_PIX_FMT_BGRA_4444,
168 SDE_PIX_FMT_ABGR_4444,
169 SDE_PIX_FMT_RGBX_4444,
170 SDE_PIX_FMT_XRGB_4444,
171 SDE_PIX_FMT_BGRX_4444,
172 SDE_PIX_FMT_XBGR_4444,
173 SDE_PIX_FMT_RGB_888,
174 SDE_PIX_FMT_BGR_888,
175 SDE_PIX_FMT_RGB_565,
176 SDE_PIX_FMT_BGR_565,
177 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
178 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
179 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
180 SDE_PIX_FMT_Y_CBCR_H2V2,
181 SDE_PIX_FMT_Y_CRCB_H2V2,
182 SDE_PIX_FMT_Y_CBCR_H1V2,
183 SDE_PIX_FMT_Y_CRCB_H1V2,
184 SDE_PIX_FMT_Y_CBCR_H2V1,
185 SDE_PIX_FMT_Y_CRCB_H2V1,
186 /* SDE_PIX_FMT_YCBYCR_H2V1 */
187 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
188 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
189 SDE_PIX_FMT_RGBA_8888_UBWC,
190 SDE_PIX_FMT_RGBX_8888_UBWC,
191 SDE_PIX_FMT_RGB_565_UBWC,
192 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
193 SDE_PIX_FMT_RGBA_1010102,
194 SDE_PIX_FMT_RGBX_1010102,
195 /* SDE_PIX_FMT_ARGB_2101010 */
196 /* SDE_PIX_FMT_XRGB_2101010 */
197 SDE_PIX_FMT_BGRA_1010102,
198 SDE_PIX_FMT_BGRX_1010102,
199 /* SDE_PIX_FMT_ABGR_2101010 */
200 /* SDE_PIX_FMT_XBGR_2101010 */
201 SDE_PIX_FMT_RGBA_1010102_UBWC,
202 SDE_PIX_FMT_RGBX_1010102_UBWC,
203 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
204 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
205 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
206};
207
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400208static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
209 {0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
210 {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
211 {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
212};
213
214static struct sde_rot_regdump sde_rot_r3_regdump[] = {
215 { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
216 { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
217 { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
218 { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
219 SDE_ROT_REGDUMP_READ },
220 /*
221 * Need to perform a SW reset to REGDMA in order to access the
222 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
223 * REGDMA RAM should be dump at last.
224 */
225 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
226 SDE_ROT_REGDUMP_WRITE },
227 { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
228 SDE_ROT_REGDUMP_READ },
229};
230
Alan Kwong818b7fc2016-07-24 22:07:41 -0400231/* Invalid software timestamp value for initialization */
232#define SDE_REGDMA_SWTS_INVALID (~0)
233
234/**
235 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
236 * @ts_curr: current software timestamp
237 * @ts_prev: previous software timestamp
238 * @return: the amount ts_curr is ahead of ts_prev
239 */
240static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
241{
242 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
243
244 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
245}
246
247/**
248 * sde_hw_rotator_pending_swts - Check if the given context is still pending
249 * @rot: Pointer to hw rotator
250 * @ctx: Pointer to rotator context
251 * @pswts: Pointer to returned reference software timestamp, optional
252 * @return: true if context has pending requests
253 */
254static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
255 struct sde_hw_rotator_context *ctx, u32 *pswts)
256{
257 u32 swts;
258 int ts_diff;
259 bool pending;
260
261 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
262 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
263 else
264 swts = ctx->last_regdma_timestamp;
265
266 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
267 swts >>= SDE_REGDMA_SWTS_SHIFT;
268
269 swts &= SDE_REGDMA_SWTS_MASK;
270
271 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
272
273 if (pswts)
274 *pswts = swts;
275
276 pending = (ts_diff > 0) ? true : false;
277
278 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
279 ctx->timestamp, ctx->q_id, swts, pending);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -0400280 SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400281 return pending;
282}
283
284/**
285 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
286 * Also, clear rotator/regdma irq status.
287 * @rot: Pointer to hw rotator
288 */
289static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
290{
291 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
292 atomic_read(&rot->irq_enabled));
293
294 if (!atomic_read(&rot->irq_enabled)) {
295 if (rot->mode == ROT_REGDMA_OFF)
296 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
297 ROT_DONE_MASK);
298 else
299 SDE_ROTREG_WRITE(rot->mdss_base,
300 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
301
302 enable_irq(rot->irq_num);
303 }
304 atomic_inc(&rot->irq_enabled);
305}
306
307/**
308 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
309 * Also, clear rotator/regdma irq enable masks.
310 * @rot: Pointer to hw rotator
311 */
312static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
313{
314 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
315 atomic_read(&rot->irq_enabled));
316
317 if (!atomic_read(&rot->irq_enabled)) {
318 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
319 return;
320 }
321
322 if (!atomic_dec_return(&rot->irq_enabled)) {
323 if (rot->mode == ROT_REGDMA_OFF)
324 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
325 else
326 SDE_ROTREG_WRITE(rot->mdss_base,
327 REGDMA_CSR_REGDMA_INT_EN, 0);
328 /* disable irq after last pending irq is handled, if any */
329 synchronize_irq(rot->irq_num);
330 disable_irq_nosync(rot->irq_num);
331 }
332}
333
334/**
335 * sde_hw_rotator_dump_status - Dump hw rotator status on error
336 * @rot: Pointer to hw rotator
337 */
338static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
339{
340 SDEROT_ERR(
341 "op_mode = %x, int_en = %x, int_status = %x\n",
342 SDE_ROTREG_READ(rot->mdss_base,
343 REGDMA_CSR_REGDMA_OP_MODE),
344 SDE_ROTREG_READ(rot->mdss_base,
345 REGDMA_CSR_REGDMA_INT_EN),
346 SDE_ROTREG_READ(rot->mdss_base,
347 REGDMA_CSR_REGDMA_INT_STATUS));
348
349 SDEROT_ERR(
350 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
351 SDE_ROTREG_READ(rot->mdss_base,
352 REGDMA_TIMESTAMP_REG),
353 SDE_ROTREG_READ(rot->mdss_base,
354 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
355 SDE_ROTREG_READ(rot->mdss_base,
356 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
357 SDE_ROTREG_READ(rot->mdss_base,
358 REGDMA_CSR_REGDMA_BLOCK_STATUS));
359
360 SDEROT_ERR(
361 "invalid_cmd_offset = %x, fsm_state = %x\n",
362 SDE_ROTREG_READ(rot->mdss_base,
363 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
364 SDE_ROTREG_READ(rot->mdss_base,
365 REGDMA_CSR_REGDMA_FSM_STATE));
366}
367
Alan Kwong9487de22016-01-16 22:06:36 -0500368/**
369 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
370 * on provided session_id. Each rotator has a different session_id.
371 */
372static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
373 struct sde_hw_rotator *rot, u32 session_id,
374 enum sde_rot_queue_prio q_id)
375{
376 int i;
377 struct sde_hw_rotator_context *ctx = NULL;
378
379 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
380 ctx = rot->rotCtx[q_id][i];
381
382 if (ctx && (ctx->session_id == session_id)) {
383 SDEROT_DBG(
384 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d\n",
385 q_id, i, ctx, ctx->session_id);
386 return ctx;
387 }
388 }
389
390 return NULL;
391}
392
393/*
394 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
395 * @dbgbuf: Pointer to debug buffer
396 * @buf: Pointer to layer buffer structure
397 * @data: Pointer to h/w mapped buffer structure
398 */
399static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
400 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
401{
402 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
403 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
404
405 dbgbuf->vaddr = NULL;
406 dbgbuf->width = buf->width;
407 dbgbuf->height = buf->height;
408
409 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
410 dma_buf_begin_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
411 DMA_FROM_DEVICE);
412 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
413 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
414 dbgbuf->vaddr, dbgbuf->buflen,
415 dbgbuf->width, dbgbuf->height);
416 }
417}
418
419/*
420 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
421 * @dbgbuf: Pointer to debug buffer
422 */
423static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
424{
425 if (dbgbuf->vaddr) {
426 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
427 dma_buf_end_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
428 DMA_FROM_DEVICE);
429 }
430
431 dbgbuf->vaddr = NULL;
432 dbgbuf->dmabuf = NULL;
433 dbgbuf->buflen = 0;
434 dbgbuf->width = 0;
435 dbgbuf->height = 0;
436}
437
438/*
439 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
440 * @ctx: Pointer to rotator context
441 * @mask: Bit mask location of the timestamp
442 * @swts: Software timestamp
443 */
444static void sde_hw_rotator_setup_timestamp_packet(
445 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
446{
447 u32 *wrptr;
448
449 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
450
451 /*
452 * Create a dummy packet write out to 1 location for timestamp
453 * generation.
454 */
455 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
456 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
457 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
458 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
459 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
460 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
461 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
462 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
463 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
464 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
465 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
466 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
467 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
Benjamin Chan15c93d82016-08-29 10:04:22 -0400468 /*
469 * Must clear secure buffer setting for SW timestamp because
470 * SW timstamp buffer allocation is always non-secure region.
471 */
472 if (ctx->is_secure) {
473 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
474 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
475 }
Alan Kwong9487de22016-01-16 22:06:36 -0500476 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
477 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
478 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
479 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
480 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
481 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
482 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
483 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
484 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
485 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
486 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
487 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
488 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
489
490 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
491}
492
493/*
494 * sde_hw_rotator_setup_fetchengine - setup fetch engine
495 * @ctx: Pointer to rotator context
496 * @queue_id: Priority queue identifier
497 * @cfg: Fetch configuration
498 * @danger_lut: real-time QoS LUT for danger setting (not used)
499 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400500 * @dnsc_factor_w: downscale factor for width
501 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -0500502 * @flags: Control flag
503 */
504static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
505 enum sde_rot_queue_prio queue_id,
506 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400507 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -0500508{
509 struct sde_hw_rotator *rot = ctx->rot;
510 struct sde_mdp_format_params *fmt;
511 struct sde_mdp_data *data;
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400512 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -0500513 u32 *wrptr;
514 u32 opmode = 0;
515 u32 chroma_samp = 0;
516 u32 src_format = 0;
517 u32 unpack = 0;
518 u32 width = cfg->img_width;
519 u32 height = cfg->img_height;
520 u32 fetch_blocksize = 0;
521 int i;
522
523 if (ctx->rot->mode == ROT_REGDMA_ON) {
524 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_EN,
525 REGDMA_INT_MASK);
526 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
527 REGDMA_EN);
528 }
529
530 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
531
532 /* source image setup */
533 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
534 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
535 for (i = 0; i < cfg->src_plane.num_planes; i++)
536 cfg->src_plane.ystride[i] *= 2;
537 width *= 2;
538 height /= 2;
539 }
540
541 /*
542 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
543 */
544 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
545
546 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
547 SDE_REGDMA_BLKWRITE_DATA(wrptr,
548 cfg->src_rect->w | (cfg->src_rect->h << 16));
549 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
550 SDE_REGDMA_BLKWRITE_DATA(wrptr,
551 cfg->src_rect->x | (cfg->src_rect->y << 16));
552 SDE_REGDMA_BLKWRITE_DATA(wrptr,
553 cfg->src_rect->w | (cfg->src_rect->h << 16));
554 SDE_REGDMA_BLKWRITE_DATA(wrptr,
555 cfg->src_rect->x | (cfg->src_rect->y << 16));
556
557 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
558 data = cfg->data;
559 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
560 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
561 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
562 (cfg->src_plane.ystride[1] << 16));
563 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
564 (cfg->src_plane.ystride[3] << 16));
565
566 /* UNUSED, write 0 */
567 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
568
569 /* setup source format */
570 fmt = cfg->fmt;
571
572 chroma_samp = fmt->chroma_sample;
573 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
574 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
575 chroma_samp = SDE_MDP_CHROMA_H1V2;
576 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
577 chroma_samp = SDE_MDP_CHROMA_H2V1;
578 }
579
580 src_format = (chroma_samp << 23) |
581 (fmt->fetch_planes << 19) |
582 (fmt->bits[C3_ALPHA] << 6) |
583 (fmt->bits[C2_R_Cr] << 4) |
584 (fmt->bits[C1_B_Cb] << 2) |
585 (fmt->bits[C0_G_Y] << 0);
586
587 if (fmt->alpha_enable &&
588 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
589 src_format |= BIT(8); /* SRCC3_EN */
590
591 src_format |= ((fmt->unpack_count - 1) << 12) |
592 (fmt->unpack_tight << 17) |
593 (fmt->unpack_align_msb << 18) |
594 ((fmt->bpp - 1) << 9) |
595 ((fmt->frame_format & 3) << 30);
596
597 if (flags & SDE_ROT_FLAG_ROT_90)
598 src_format |= BIT(11); /* ROT90 */
599
600 if (sde_mdp_is_ubwc_format(fmt))
601 opmode |= BIT(0); /* BWC_DEC_EN */
602
603 /* if this is YUV pixel format, enable CSC */
604 if (sde_mdp_is_yuv_format(fmt))
605 src_format |= BIT(15); /* SRC_COLOR_SPACE */
606
607 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
608 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
609
610 /* SRC_FORMAT */
611 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
612
613 /* setup source unpack pattern */
614 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
615 (fmt->element[1] << 8) | (fmt->element[0] << 0);
616
617 /* SRC_UNPACK_PATTERN */
618 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
619
620 /* setup source op mode */
621 if (flags & SDE_ROT_FLAG_FLIP_LR)
622 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
623 if (flags & SDE_ROT_FLAG_FLIP_UD)
624 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
625 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
626
627 /* SRC_OP_MODE */
628 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
629
630 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400631 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
632 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
633 if (sde_mdp_is_tp10_format(fmt))
634 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
635 else
636 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
637 } else {
638 if (sde_mdp_is_tp10_format(fmt))
639 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
640 else
641 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
642 }
643
Alan Kwong9487de22016-01-16 22:06:36 -0500644 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
645 fetch_blocksize |
646 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
647 ((rot->highest_bank & 0x3) << 18));
648
649 /* setup source buffer plane security status */
Abhijit Kulkarni298c8232016-09-26 22:32:10 -0700650 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
651 SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
Alan Kwong9487de22016-01-16 22:06:36 -0500652 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
653 ctx->is_secure = true;
Benjamin Chan15c93d82016-08-29 10:04:22 -0400654 } else {
655 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
656 ctx->is_secure = false;
Alan Kwong9487de22016-01-16 22:06:36 -0500657 }
658
Benjamin Chan99eb63b2016-12-21 15:45:26 -0500659 /*
660 * Determine if traffic shaping is required. Only enable traffic
661 * shaping when content is 4k@30fps. The actual traffic shaping
662 * bandwidth calculation is done in output setup.
663 */
664 if (((cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD) &&
665 (cfg->fps <= 30)) {
666 SDEROT_DBG("Enable Traffic Shaper\n");
667 ctx->is_traffic_shaping = true;
668 } else {
669 SDEROT_DBG("Disable Traffic Shaper\n");
670 ctx->is_traffic_shaping = false;
671 }
672
Alan Kwong9487de22016-01-16 22:06:36 -0500673 /* Update command queue write ptr */
674 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
675}
676
677/*
678 * sde_hw_rotator_setup_wbengine - setup writeback engine
679 * @ctx: Pointer to rotator context
680 * @queue_id: Priority queue identifier
681 * @cfg: Writeback configuration
682 * @flags: Control flag
683 */
684static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
685 enum sde_rot_queue_prio queue_id,
686 struct sde_hw_rot_wb_cfg *cfg,
687 u32 flags)
688{
689 struct sde_mdp_format_params *fmt;
690 u32 *wrptr;
691 u32 pack = 0;
692 u32 dst_format = 0;
693 int i;
694
695 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
696
697 fmt = cfg->fmt;
698
699 /* setup WB DST format */
700 dst_format |= (fmt->chroma_sample << 23) |
701 (fmt->fetch_planes << 19) |
702 (fmt->bits[C3_ALPHA] << 6) |
703 (fmt->bits[C2_R_Cr] << 4) |
704 (fmt->bits[C1_B_Cb] << 2) |
705 (fmt->bits[C0_G_Y] << 0);
706
707 /* alpha control */
708 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
709 dst_format |= BIT(8);
710 if (!fmt->alpha_enable) {
711 dst_format |= BIT(14);
712 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
713 }
714 }
715
716 dst_format |= ((fmt->unpack_count - 1) << 12) |
717 (fmt->unpack_tight << 17) |
718 (fmt->unpack_align_msb << 18) |
719 ((fmt->bpp - 1) << 9) |
720 ((fmt->frame_format & 3) << 30);
721
722 if (sde_mdp_is_yuv_format(fmt))
723 dst_format |= BIT(15);
724
725 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
726 dst_format |= BIT(21); /* PACK_DX_FORMAT */
727
728 /*
729 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
730 */
731 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
732
733 /* DST_FORMAT */
734 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
735
736 /* DST_OP_MODE */
737 if (sde_mdp_is_ubwc_format(fmt))
738 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
739 else
740 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
741
742 /* DST_PACK_PATTERN */
743 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
744 (fmt->element[1] << 8) | (fmt->element[0] << 0);
745 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
746
747 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
748 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
749 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
750 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
751 (cfg->dst_plane.ystride[1] << 16));
752 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
753 (cfg->dst_plane.ystride[3] << 16));
754
755 /* setup WB out image size and ROI */
756 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
757 cfg->img_width | (cfg->img_height << 16));
758 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
759 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
760 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
761 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
762
Abhijit Kulkarni298c8232016-09-26 22:32:10 -0700763 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
764 SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
Benjamin Chan15c93d82016-08-29 10:04:22 -0400765 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
766 else
767 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
768
Alan Kwong9487de22016-01-16 22:06:36 -0500769 /*
770 * setup Downscale factor
771 */
772 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
773 cfg->v_downscale_factor |
774 (cfg->h_downscale_factor << 16));
775
776 /* write config setup for bank configration */
777 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
778 (ctx->rot->highest_bank & 0x3) << 8);
779
780 if (flags & SDE_ROT_FLAG_ROT_90)
781 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x3);
782 else
783 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x1);
784
Benjamin Chan99eb63b2016-12-21 15:45:26 -0500785 /* setup traffic shaper for 4k 30fps content */
786 if (ctx->is_traffic_shaping) {
787 u32 bw;
788
789 /*
790 * Target to finish in 12ms, and we need to set number of bytes
791 * per clock tick for traffic shaping.
792 * Each clock tick run @ 19.2MHz, so we need we know total of
793 * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
794 * Finally, calcualte the byte count per clock tick based on
795 * resolution, bpp and compression ratio.
796 */
797 bw = cfg->dst_rect->w * cfg->dst_rect->h;
798
799 if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
800 bw = (bw * 3) / 2;
801 else
802 bw *= fmt->bpp;
803
804 bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
805 if (bw > 0xFF)
806 bw = 0xFF;
807 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
808 BIT(31) | bw);
809 SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
810 } else {
811 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
812 SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
813 }
814
Alan Kwong9487de22016-01-16 22:06:36 -0500815 /* Update command queue write ptr */
816 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
817}
818
819/*
820 * sde_hw_rotator_start_no_regdma - start non-regdma operation
821 * @ctx: Pointer to rotator context
822 * @queue_id: Priority queue identifier
823 */
824static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
825 enum sde_rot_queue_prio queue_id)
826{
827 struct sde_hw_rotator *rot = ctx->rot;
828 u32 *wrptr;
829 u32 *rdptr;
830 u8 *addr;
831 u32 mask;
832 u32 blksize;
833
834 rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
835 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
836
837 if (rot->irq_num >= 0) {
838 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
839 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
840 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400841 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -0500842 }
843
844 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
845
846 /* Update command queue write ptr */
847 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
848
849 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
850 /* Write all command stream to Rotator blocks */
851 /* Rotator will start right away after command stream finish writing */
852 while (rdptr < wrptr) {
853 u32 op = REGDMA_OP_MASK & *rdptr;
854
855 switch (op) {
856 case REGDMA_OP_NOP:
857 SDEROT_DBG("NOP\n");
858 rdptr++;
859 break;
860 case REGDMA_OP_REGWRITE:
861 SDEROT_DBG("REGW %6.6x %8.8x\n",
862 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
863 rdptr[1]);
864 addr = rot->mdss_base +
865 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
866 writel_relaxed(*rdptr++, addr);
867 break;
868 case REGDMA_OP_REGMODIFY:
869 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
870 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
871 rdptr[1], rdptr[2]);
872 addr = rot->mdss_base +
873 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
874 mask = *rdptr++;
875 writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
876 addr);
877 break;
878 case REGDMA_OP_BLKWRITE_SINGLE:
879 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
880 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
881 rdptr[1]);
882 addr = rot->mdss_base +
883 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
884 blksize = *rdptr++;
885 while (blksize--) {
886 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
887 writel_relaxed(*rdptr++, addr);
888 }
889 break;
890 case REGDMA_OP_BLKWRITE_INC:
891 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
892 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
893 rdptr[1]);
894 addr = rot->mdss_base +
895 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
896 blksize = *rdptr++;
897 while (blksize--) {
898 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
899 writel_relaxed(*rdptr++, addr);
900 addr += 4;
901 }
902 break;
903 default:
904 /* Other not supported OP mode
905 * Skip data for now for unregonized OP mode
906 */
907 SDEROT_DBG("UNDEFINED\n");
908 rdptr++;
909 break;
910 }
911 }
912 SDEROT_DBG("END %d\n", ctx->timestamp);
913
914 return ctx->timestamp;
915}
916
917/*
918 * sde_hw_rotator_start_regdma - start regdma operation
919 * @ctx: Pointer to rotator context
920 * @queue_id: Priority queue identifier
921 */
922static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
923 enum sde_rot_queue_prio queue_id)
924{
925 struct sde_hw_rotator *rot = ctx->rot;
926 u32 *wrptr;
927 u32 regdmaSlot;
928 u32 offset;
929 long length;
930 long ts_length;
931 u32 enableInt;
932 u32 swts = 0;
933 u32 mask = 0;
934
935 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
936
Alan Kwong9487de22016-01-16 22:06:36 -0500937 /*
938 * Last ROT command must be ROT_START before REGDMA start
939 */
940 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
941 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
942
943 /*
944 * Start REGDMA with command offset and size
945 */
946 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
947 length = ((long)wrptr - (long)ctx->regdma_base) / 4;
948 offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
949 REGDMA_RAM_REGDMA_CMD_RAM));
950 enableInt = ((ctx->timestamp & 1) + 1) << 30;
951
952 SDEROT_DBG(
953 "regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
954 queue_id, regdmaSlot, enableInt, length, offset,
955 ctx->timestamp);
956
957 /* ensure the command packet is issued before the submit command */
958 wmb();
959
960 /* REGDMA submission for current context */
961 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
962 SDE_ROTREG_WRITE(rot->mdss_base,
963 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
964 (length << 14) | offset);
965 swts = ctx->timestamp;
966 mask = ~SDE_REGDMA_SWTS_MASK;
967 } else {
968 SDE_ROTREG_WRITE(rot->mdss_base,
969 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
970 (length << 14) | offset);
971 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
972 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
973 }
974
975 /* Write timestamp after previous rotator job finished */
976 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
977 offset += length;
978 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
979 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
980
981 /* ensure command packet is issue before the submit command */
982 wmb();
983
984 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
985 SDE_ROTREG_WRITE(rot->mdss_base,
986 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
987 enableInt | (ts_length << 14) | offset);
988 } else {
989 SDE_ROTREG_WRITE(rot->mdss_base,
990 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
991 enableInt | (ts_length << 14) | offset);
992 }
993
Alan Kwong9487de22016-01-16 22:06:36 -0500994 /* Update command queue write ptr */
995 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
996
997 return ctx->timestamp;
998}
999
1000/*
1001 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
1002 * @ctx: Pointer to rotator context
1003 * @queue_id: Priority queue identifier
1004 * @flags: Option flag
1005 */
1006static u32 sde_hw_rotator_wait_done_no_regdma(
1007 struct sde_hw_rotator_context *ctx,
1008 enum sde_rot_queue_prio queue_id, u32 flag)
1009{
1010 struct sde_hw_rotator *rot = ctx->rot;
1011 int rc = 0;
1012 u32 sts = 0;
1013 u32 status;
1014 unsigned long flags;
1015
1016 if (rot->irq_num >= 0) {
1017 SDEROT_DBG("Wait for Rotator completion\n");
1018 rc = wait_for_completion_timeout(&ctx->rot_comp,
1019 KOFF_TIMEOUT);
1020
1021 spin_lock_irqsave(&rot->rotisr_lock, flags);
1022 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1023 if (rc == 0) {
1024 /*
1025 * Timeout, there might be error,
1026 * or rotator still busy
1027 */
1028 if (status & ROT_BUSY_BIT)
1029 SDEROT_ERR(
1030 "Timeout waiting for rotator done\n");
1031 else if (status & ROT_ERROR_BIT)
1032 SDEROT_ERR(
1033 "Rotator report error status\n");
1034 else
1035 SDEROT_WARN(
1036 "Timeout waiting, but rotator job is done!!\n");
1037
Alan Kwong818b7fc2016-07-24 22:07:41 -04001038 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001039 }
1040 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1041 } else {
1042 int cnt = 200;
1043
1044 do {
1045 udelay(500);
1046 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1047 cnt--;
1048 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
1049 && ((status & ROT_ERROR_BIT) == 0));
1050
1051 if (status & ROT_ERROR_BIT)
1052 SDEROT_ERR("Rotator error\n");
1053 else if (status & ROT_BUSY_BIT)
1054 SDEROT_ERR("Rotator busy\n");
1055
1056 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1057 ROT_DONE_CLEAR);
1058 }
1059
1060 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1061
1062 return sts;
1063}
1064
1065/*
1066 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
1067 * @ctx: Pointer to rotator context
1068 * @queue_id: Priority queue identifier
1069 * @flags: Option flag
1070 */
1071static u32 sde_hw_rotator_wait_done_regdma(
1072 struct sde_hw_rotator_context *ctx,
1073 enum sde_rot_queue_prio queue_id, u32 flag)
1074{
1075 struct sde_hw_rotator *rot = ctx->rot;
1076 int rc = 0;
1077 u32 status;
1078 u32 last_isr;
1079 u32 last_ts;
1080 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001081 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -05001082 u32 sts = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001083 unsigned long flags;
1084
1085 if (rot->irq_num >= 0) {
1086 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
1087 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001088 rc = wait_event_timeout(ctx->regdma_waitq,
1089 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Alan Kwong9487de22016-01-16 22:06:36 -05001090 KOFF_TIMEOUT);
1091
Benjamin Chane7ca72e2016-12-22 18:42:34 -05001092 ATRACE_INT("sde_rot_done", 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001093 spin_lock_irqsave(&rot->rotisr_lock, flags);
1094
1095 last_isr = ctx->last_regdma_isr_status;
1096 last_ts = ctx->last_regdma_timestamp;
1097 status = last_isr & REGDMA_INT_MASK;
1098 int_id = last_ts & 1;
1099 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
1100 status, int_id, last_ts);
1101
1102 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
Alan Kwong818b7fc2016-07-24 22:07:41 -04001103 bool pending;
1104
1105 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001106 SDEROT_ERR(
Alan Kwong818b7fc2016-07-24 22:07:41 -04001107 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
1108 ctx->timestamp, swts, pending);
Alan Kwong9487de22016-01-16 22:06:36 -05001109
1110 if (status & REGDMA_WATCHDOG_INT)
1111 SDEROT_ERR("REGDMA watchdog interrupt\n");
1112 else if (status & REGDMA_INVALID_DESCRIPTOR)
1113 SDEROT_ERR("REGDMA invalid descriptor\n");
1114 else if (status & REGDMA_INCOMPLETE_CMD)
1115 SDEROT_ERR("REGDMA incomplete command\n");
1116 else if (status & REGDMA_INVALID_CMD)
1117 SDEROT_ERR("REGDMA invalid command\n");
1118
Alan Kwong818b7fc2016-07-24 22:07:41 -04001119 sde_hw_rotator_dump_status(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001120 status = ROT_ERROR_BIT;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001121 } else {
1122 if (rc == 1)
1123 SDEROT_WARN(
1124 "REGDMA done but no irq, ts:0x%X/0x%X\n",
1125 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001126 status = 0;
1127 }
1128
Alan Kwong9487de22016-01-16 22:06:36 -05001129 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1130 } else {
1131 int cnt = 200;
Alan Kwongb0679602016-11-27 17:04:13 -08001132 bool pending;
Alan Kwong9487de22016-01-16 22:06:36 -05001133
1134 do {
1135 udelay(500);
Alan Kwongb0679602016-11-27 17:04:13 -08001136 last_isr = SDE_ROTREG_READ(rot->mdss_base,
1137 REGDMA_CSR_REGDMA_INT_STATUS);
1138 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001139 cnt--;
Alan Kwongb0679602016-11-27 17:04:13 -08001140 } while ((cnt > 0) && pending &&
1141 ((last_isr & REGDMA_INT_ERR_MASK) == 0));
Alan Kwong9487de22016-01-16 22:06:36 -05001142
Alan Kwongb0679602016-11-27 17:04:13 -08001143 if (last_isr & REGDMA_INT_ERR_MASK) {
1144 SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
1145 ctx->timestamp, swts, last_isr);
1146 sde_hw_rotator_dump_status(rot);
1147 status = ROT_ERROR_BIT;
1148 } else if (pending) {
1149 SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
1150 ctx->timestamp, swts, last_isr);
1151 sde_hw_rotator_dump_status(rot);
1152 status = ROT_ERROR_BIT;
1153 } else {
1154 status = 0;
1155 }
Alan Kwong9487de22016-01-16 22:06:36 -05001156
1157 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
Alan Kwongb0679602016-11-27 17:04:13 -08001158 last_isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001159 }
1160
1161 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1162
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001163 if (status & ROT_ERROR_BIT)
1164 SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus", "panic");
1165
Alan Kwong9487de22016-01-16 22:06:36 -05001166 return sts;
1167}
1168
1169/*
1170 * setup_rotator_ops - setup callback functions for the low-level HAL
1171 * @ops: Pointer to low-level ops callback
1172 * @mode: Operation mode (non-regdma or regdma)
1173 */
1174static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
1175 enum sde_rotator_regdma_mode mode)
1176{
1177 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
1178 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
1179 if (mode == ROT_REGDMA_ON) {
1180 ops->start_rotator = sde_hw_rotator_start_regdma;
1181 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
1182 } else {
1183 ops->start_rotator = sde_hw_rotator_start_no_regdma;
1184 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
1185 }
1186}
1187
1188/*
1189 * sde_hw_rotator_swts_create - create software timestamp buffer
1190 * @rot: Pointer to rotator hw
1191 *
1192 * This buffer is used by regdma to keep track of last completed command.
1193 */
1194static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
1195{
1196 int rc = 0;
1197 struct ion_handle *handle;
1198 struct sde_mdp_img_data *data;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001199 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001200 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
1201
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001202 rot->iclient = mdata->iclient;
Alan Kwong9487de22016-01-16 22:06:36 -05001203
1204 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
1205 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
1206 if (IS_ERR_OR_NULL(handle)) {
1207 SDEROT_ERR("ion memory allocation failed\n");
1208 return -ENOMEM;
1209 }
1210
1211 data = &rot->swts_buf;
1212 data->len = bufsize;
1213 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
1214 if (IS_ERR(data->srcp_dma_buf)) {
1215 SDEROT_ERR("ion_dma_buf setup failed\n");
1216 rc = -ENOMEM;
1217 goto imap_err;
1218 }
1219
1220 sde_smmu_ctrl(1);
1221
1222 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
1223 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
1224 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
1225 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
1226 rc = -ENOMEM;
1227 goto err_put;
1228 }
1229
1230 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
1231 DMA_BIDIRECTIONAL);
1232 if (IS_ERR_OR_NULL(data->srcp_table)) {
1233 SDEROT_ERR("dma_buf_map_attachment error\n");
1234 rc = -ENOMEM;
1235 goto err_detach;
1236 }
1237
1238 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
1239 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
1240 &data->len, DMA_BIDIRECTIONAL);
1241 if (IS_ERR_VALUE(rc)) {
1242 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
1243 goto err_unmap;
1244 }
1245
1246 dma_buf_begin_cpu_access(data->srcp_dma_buf, 0, data->len,
1247 DMA_FROM_DEVICE);
1248 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
1249 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
1250 SDEROT_ERR("ion kernel memory mapping failed\n");
1251 rc = IS_ERR(rot->swts_buffer);
1252 goto kmap_err;
1253 }
1254
1255 data->mapped = true;
1256 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
1257 data->len, rot->swts_buffer);
1258
1259 ion_free(rot->iclient, handle);
1260
1261 sde_smmu_ctrl(0);
1262
1263 return rc;
1264kmap_err:
1265 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1266 DMA_FROM_DEVICE, data->srcp_dma_buf);
1267err_unmap:
1268 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1269 DMA_FROM_DEVICE);
1270err_detach:
1271 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1272err_put:
1273 dma_buf_put(data->srcp_dma_buf);
1274 data->srcp_dma_buf = NULL;
1275imap_err:
1276 ion_free(rot->iclient, handle);
1277
1278 return rc;
1279}
1280
1281/*
1282 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
1283 * @rot: Pointer to rotator hw
1284 */
1285static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
1286{
1287 struct sde_mdp_img_data *data;
1288
1289 data = &rot->swts_buf;
1290
1291 dma_buf_end_cpu_access(data->srcp_dma_buf, 0, data->len,
1292 DMA_FROM_DEVICE);
1293 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
1294
1295 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1296 DMA_FROM_DEVICE, data->srcp_dma_buf);
1297 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1298 DMA_FROM_DEVICE);
1299 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1300 dma_buf_put(data->srcp_dma_buf);
1301 data->srcp_dma_buf = NULL;
1302}
1303
1304/*
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001305 * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
1306 * PM event occurs
1307 * @mgr: Pointer to rotator manager
1308 * @pmon: Boolean indicate an on/off power event
1309 */
1310void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1311{
1312 struct sde_hw_rotator *rot;
1313 u32 l_ts, h_ts, swts, hwts;
1314 u32 rotsts, regdmasts;
1315
1316 /*
1317 * Check last HW timestamp with SW timestamp before power off event.
1318 * If there is a mismatch, that will be quite possible the rotator HW
1319 * is either hang or not finishing last submitted job. In that case,
1320 * it is best to do a timeout eventlog to capture some good events
1321 * log data for analysis.
1322 */
1323 if (!pmon && mgr && mgr->hw_data) {
1324 rot = mgr->hw_data;
1325 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1326 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1327
1328 /* contruct the combined timstamp */
1329 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1330 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1331 SDE_REGDMA_SWTS_SHIFT);
1332
1333 /* Need to turn on clock to access rotator register */
1334 sde_rotator_clk_ctrl(mgr, true);
1335 hwts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1336 regdmasts = SDE_ROTREG_READ(rot->mdss_base,
1337 REGDMA_CSR_REGDMA_BLOCK_STATUS);
1338 rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1339
1340 SDEROT_DBG(
1341 "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1342 swts, hwts, regdmasts, rotsts);
1343 SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
1344
1345 if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
1346 (rotsts & ROT_STATUS_MASK))) {
1347 SDEROT_ERR(
1348 "Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1349 swts, hwts, regdmasts, rotsts);
1350 SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus",
1351 "panic");
1352 }
1353
1354 /* Turn off rotator clock after checking rotator registers */
1355 sde_rotator_clk_ctrl(mgr, false);
1356 }
1357}
1358
1359/*
1360 * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
1361 * PM event occurs
1362 * @mgr: Pointer to rotator manager
1363 * @pmon: Boolean indicate an on/off power event
1364 */
1365void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1366{
1367 struct sde_hw_rotator *rot;
1368 u32 l_ts, h_ts, swts;
1369
1370 /*
1371 * After a power on event, the rotator HW is reset to default setting.
1372 * It is necessary to synchronize the SW timestamp with the HW.
1373 */
1374 if (pmon && mgr && mgr->hw_data) {
1375 rot = mgr->hw_data;
1376 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1377 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1378
1379 /* contruct the combined timstamp */
1380 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1381 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1382 SDE_REGDMA_SWTS_SHIFT);
1383
1384 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
1385 swts, h_ts, l_ts);
1386 SDEROT_EVTLOG(swts, h_ts, l_ts);
1387 rot->reset_hw_ts = true;
1388 rot->last_hw_ts = swts;
1389 }
1390}
1391
1392/*
Alan Kwong9487de22016-01-16 22:06:36 -05001393 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
1394 * @mgr: Pointer to rotator manager
1395 */
1396static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
1397{
1398 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1399 struct sde_hw_rotator *rot;
1400
1401 if (!mgr || !mgr->pdev || !mgr->hw_data) {
1402 SDEROT_ERR("null parameters\n");
1403 return;
1404 }
1405
1406 rot = mgr->hw_data;
1407 if (rot->irq_num >= 0)
1408 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
1409
1410 if (rot->mode == ROT_REGDMA_ON)
1411 sde_hw_rotator_swtc_destroy(rot);
1412
1413 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
1414 mgr->hw_data = NULL;
1415}
1416
1417/*
1418 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
1419 * @mgr: Pointer to rotator manager
1420 * @pipe_id: pipe identifier (not used)
1421 * @wb_id: writeback identifier/priority queue identifier
1422 *
1423 * This function allocates a new hw rotator resource for the given priority.
1424 */
1425static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
1426 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
1427{
1428 struct sde_hw_rotator_resource_info *resinfo;
1429
1430 if (!mgr || !mgr->hw_data) {
1431 SDEROT_ERR("null parameters\n");
1432 return NULL;
1433 }
1434
1435 /*
1436 * Allocate rotator resource info. Each allocation is per
1437 * HW priority queue
1438 */
1439 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
1440 if (!resinfo) {
1441 SDEROT_ERR("Failed allocation HW rotator resource info\n");
1442 return NULL;
1443 }
1444
1445 resinfo->rot = mgr->hw_data;
1446 resinfo->hw.wb_id = wb_id;
1447 atomic_set(&resinfo->hw.num_active, 0);
1448 init_waitqueue_head(&resinfo->hw.wait_queue);
1449
1450 /* For non-regdma, only support one active session */
1451 if (resinfo->rot->mode == ROT_REGDMA_OFF)
1452 resinfo->hw.max_active = 1;
1453 else {
1454 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
1455
1456 if (resinfo->rot->iclient == NULL)
1457 sde_hw_rotator_swts_create(resinfo->rot);
1458 }
1459
Alan Kwongf987ea32016-07-06 12:11:44 -04001460 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001461 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04001462
Alan Kwong9487de22016-01-16 22:06:36 -05001463 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
1464 resinfo, wb_id);
1465
1466 return &resinfo->hw;
1467}
1468
1469/*
1470 * sde_hw_rotator_free_ext - free the given rotator resource
1471 * @mgr: Pointer to rotator manager
1472 * @hw: Pointer to rotator resource
1473 */
1474static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
1475 struct sde_rot_hw_resource *hw)
1476{
1477 struct sde_hw_rotator_resource_info *resinfo;
1478
1479 if (!mgr || !mgr->hw_data)
1480 return;
1481
1482 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1483
1484 SDEROT_DBG(
1485 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
1486 resinfo, hw->wb_id, atomic_read(&hw->num_active),
1487 hw->pending_count);
1488
Alan Kwongf987ea32016-07-06 12:11:44 -04001489 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001490 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04001491
Alan Kwong9487de22016-01-16 22:06:36 -05001492 devm_kfree(&mgr->pdev->dev, resinfo);
1493}
1494
1495/*
1496 * sde_hw_rotator_alloc_rotctx - allocate rotator context
1497 * @rot: Pointer to rotator hw
1498 * @hw: Pointer to rotator resource
1499 * @session_id: Session identifier of this context
1500 *
1501 * This function allocates a new rotator context for the given session id.
1502 */
1503static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
1504 struct sde_hw_rotator *rot,
1505 struct sde_rot_hw_resource *hw,
1506 u32 session_id)
1507{
1508 struct sde_hw_rotator_context *ctx;
1509
1510 /* Allocate rotator context */
1511 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1512 if (!ctx) {
1513 SDEROT_ERR("Failed allocation HW rotator context\n");
1514 return NULL;
1515 }
1516
1517 ctx->rot = rot;
1518 ctx->q_id = hw->wb_id;
1519 ctx->session_id = session_id;
1520 ctx->hwres = hw;
1521 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
1522 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
1523 ctx->is_secure = false;
1524
1525 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
1526 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
1527 ctx->regdma_wrptr = ctx->regdma_base;
1528 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
1529 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
1530 sde_hw_rotator_get_regdma_ctxidx(ctx));
1531
Alan Kwong818b7fc2016-07-24 22:07:41 -04001532 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
1533
Alan Kwong9487de22016-01-16 22:06:36 -05001534 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001535 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001536
1537 /* Store rotator context for lookup purpose */
1538 sde_hw_rotator_put_ctx(ctx);
1539
1540 SDEROT_DBG(
1541 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1542 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1543 ctx->q_id, ctx->timestamp,
1544 atomic_read(&ctx->hwres->num_active));
1545
1546 return ctx;
1547}
1548
1549/*
1550 * sde_hw_rotator_free_rotctx - free the given rotator context
1551 * @rot: Pointer to rotator hw
1552 * @ctx: Pointer to rotator context
1553 */
1554static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
1555 struct sde_hw_rotator_context *ctx)
1556{
1557 if (!rot || !ctx)
1558 return;
1559
1560 SDEROT_DBG(
1561 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1562 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1563 ctx->q_id, ctx->timestamp,
1564 atomic_read(&ctx->hwres->num_active));
1565
Benjamin Chanc3e185f2016-11-08 21:48:21 -05001566 /* Clear rotator context from lookup purpose */
1567 sde_hw_rotator_clr_ctx(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05001568
1569 devm_kfree(&rot->pdev->dev, ctx);
1570}
1571
1572/*
1573 * sde_hw_rotator_config - configure hw for the given rotation entry
1574 * @hw: Pointer to rotator resource
1575 * @entry: Pointer to rotation entry
1576 *
1577 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
1578 * based on the given rotation entry.
1579 */
1580static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
1581 struct sde_rot_entry *entry)
1582{
1583 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1584 struct sde_hw_rotator *rot;
1585 struct sde_hw_rotator_resource_info *resinfo;
1586 struct sde_hw_rotator_context *ctx;
1587 struct sde_hw_rot_sspp_cfg sspp_cfg;
1588 struct sde_hw_rot_wb_cfg wb_cfg;
1589 u32 danger_lut = 0; /* applicable for realtime client only */
1590 u32 safe_lut = 0; /* applicable for realtime client only */
1591 u32 flags = 0;
1592 struct sde_rotation_item *item;
1593
1594 if (!hw || !entry) {
1595 SDEROT_ERR("null hw resource/entry\n");
1596 return -EINVAL;
1597 }
1598
1599 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1600 rot = resinfo->rot;
1601 item = &entry->item;
1602
1603 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id);
1604 if (!ctx) {
1605 SDEROT_ERR("Failed allocating rotator context!!\n");
1606 return -EINVAL;
1607 }
1608
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001609 if (rot->reset_hw_ts) {
1610 SDEROT_EVTLOG(rot->last_hw_ts);
1611 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
1612 rot->last_hw_ts);
1613 /* ensure write is issued to the rotator HW */
1614 wmb();
1615 rot->reset_hw_ts = false;
1616 }
1617
Alan Kwong9487de22016-01-16 22:06:36 -05001618 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
1619 SDE_ROT_FLAG_FLIP_LR : 0;
1620 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
1621 SDE_ROT_FLAG_FLIP_UD : 0;
1622 flags |= (item->flags & SDE_ROTATION_90) ?
1623 SDE_ROT_FLAG_ROT_90 : 0;
1624 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
1625 SDE_ROT_FLAG_DEINTERLACE : 0;
1626 flags |= (item->flags & SDE_ROTATION_SECURE) ?
1627 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001628 flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
1629 SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
1630
Alan Kwong9487de22016-01-16 22:06:36 -05001631
1632 sspp_cfg.img_width = item->input.width;
1633 sspp_cfg.img_height = item->input.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001634 sspp_cfg.fps = entry->perf->config.frame_rate;
1635 sspp_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05001636 sspp_cfg.fmt = sde_get_format_params(item->input.format);
1637 if (!sspp_cfg.fmt) {
1638 SDEROT_ERR("null format\n");
1639 return -EINVAL;
1640 }
1641 sspp_cfg.src_rect = &item->src_rect;
1642 sspp_cfg.data = &entry->src_buf;
1643 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
1644 item->input.height, &sspp_cfg.src_plane,
1645 0, /* No bwc_mode */
1646 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
1647 true : false);
1648
1649 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001650 &sspp_cfg, danger_lut, safe_lut,
1651 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05001652
1653 wb_cfg.img_width = item->output.width;
1654 wb_cfg.img_height = item->output.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001655 wb_cfg.fps = entry->perf->config.frame_rate;
1656 wb_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05001657 wb_cfg.fmt = sde_get_format_params(item->output.format);
1658 wb_cfg.dst_rect = &item->dst_rect;
1659 wb_cfg.data = &entry->dst_buf;
1660 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
1661 item->output.height, &wb_cfg.dst_plane,
1662 0, /* No bwc_mode */
1663 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
1664
1665 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
1666 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
1667
1668 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
1669
1670 /* setup VA mapping for debugfs */
1671 if (rot->dbgmem) {
1672 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
1673 &item->input,
1674 &entry->src_buf);
1675
1676 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
1677 &item->output,
1678 &entry->dst_buf);
1679 }
1680
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001681 SDEROT_EVTLOG(ctx->timestamp, flags,
1682 item->input.width, item->input.height,
Benjamin Chan53e3bce2016-08-31 14:43:29 -04001683 item->output.width, item->output.height,
1684 entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr);
1685
Alan Kwong9487de22016-01-16 22:06:36 -05001686 if (mdata->default_ot_rd_limit) {
1687 struct sde_mdp_set_ot_params ot_params;
1688
1689 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1690 ot_params.xin_id = XIN_SSPP;
1691 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001692 ot_params.width = entry->perf->config.input.width;
1693 ot_params.height = entry->perf->config.input.height;
1694 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001695 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
1696 ot_params.reg_off_mdp_clk_ctrl =
1697 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1698 ot_params.bit_off_mdp_clk_ctrl =
1699 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001700 ot_params.fmt = ctx->is_traffic_shaping ?
1701 SDE_PIX_FMT_ABGR_8888 :
1702 entry->perf->config.input.format;
Alan Kwong9487de22016-01-16 22:06:36 -05001703 sde_mdp_set_ot_limit(&ot_params);
1704 }
1705
1706 if (mdata->default_ot_wr_limit) {
1707 struct sde_mdp_set_ot_params ot_params;
1708
1709 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1710 ot_params.xin_id = XIN_WRITEBACK;
1711 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001712 ot_params.width = entry->perf->config.input.width;
1713 ot_params.height = entry->perf->config.input.height;
1714 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001715 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
1716 ot_params.reg_off_mdp_clk_ctrl =
1717 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1718 ot_params.bit_off_mdp_clk_ctrl =
1719 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001720 ot_params.fmt = ctx->is_traffic_shaping ?
1721 SDE_PIX_FMT_ABGR_8888 :
1722 entry->perf->config.input.format;
Alan Kwong9487de22016-01-16 22:06:36 -05001723 sde_mdp_set_ot_limit(&ot_params);
1724 }
1725
1726 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
1727 u32 qos_lut = 0; /* low priority for nrt read client */
1728
1729 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
1730 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
1731
1732 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
1733 }
1734
1735 if (mdata->npriority_lvl > 0) {
1736 u32 mask, reg_val, i, vbif_qos;
1737
1738 for (i = 0; i < mdata->npriority_lvl; i++) {
1739 reg_val = SDE_VBIF_READ(mdata,
1740 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
1741 mask = 0x3 << (XIN_SSPP * 2);
1742 reg_val &= ~(mask);
1743 vbif_qos = mdata->vbif_nrt_qos[i];
1744 reg_val |= vbif_qos << (XIN_SSPP * 2);
1745 /* ensure write is issued after the read operation */
1746 mb();
1747 SDE_VBIF_WRITE(mdata,
1748 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
1749 reg_val);
1750 }
1751 }
1752
1753 /* Enable write gather for writeback to remove write gaps, which
1754 * may hang AXI/BIMC/SDE.
1755 */
1756 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
1757 BIT(XIN_WRITEBACK));
1758
1759 return 0;
1760}
1761
1762/*
1763 * sde_hw_rotator_kickoff - kickoff processing on the given entry
1764 * @hw: Pointer to rotator resource
1765 * @entry: Pointer to rotation entry
1766 */
1767static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
1768 struct sde_rot_entry *entry)
1769{
1770 struct sde_hw_rotator *rot;
1771 struct sde_hw_rotator_resource_info *resinfo;
1772 struct sde_hw_rotator_context *ctx;
Alan Kwong9487de22016-01-16 22:06:36 -05001773
1774 if (!hw || !entry) {
1775 SDEROT_ERR("null hw resource/entry\n");
1776 return -EINVAL;
1777 }
1778
1779 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1780 rot = resinfo->rot;
1781
1782 /* Lookup rotator context from session-id */
1783 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1784 if (!ctx) {
1785 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1786 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001787 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05001788 }
Alan Kwong9487de22016-01-16 22:06:36 -05001789
Alan Kwong9487de22016-01-16 22:06:36 -05001790 rot->ops.start_rotator(ctx, ctx->q_id);
1791
1792 return 0;
1793}
1794
1795/*
1796 * sde_hw_rotator_wait4done - wait for completion notification
1797 * @hw: Pointer to rotator resource
1798 * @entry: Pointer to rotation entry
1799 *
1800 * This function blocks until the given entry is complete, error
1801 * is detected, or timeout.
1802 */
1803static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
1804 struct sde_rot_entry *entry)
1805{
1806 struct sde_hw_rotator *rot;
1807 struct sde_hw_rotator_resource_info *resinfo;
1808 struct sde_hw_rotator_context *ctx;
1809 int ret;
1810
1811 if (!hw || !entry) {
1812 SDEROT_ERR("null hw resource/entry\n");
1813 return -EINVAL;
1814 }
1815
1816 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1817 rot = resinfo->rot;
1818
1819 /* Lookup rotator context from session-id */
1820 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1821 if (!ctx) {
1822 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1823 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001824 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05001825 }
Alan Kwong9487de22016-01-16 22:06:36 -05001826
1827 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
1828
Alan Kwong9487de22016-01-16 22:06:36 -05001829 if (rot->dbgmem) {
1830 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
1831 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
1832 }
1833
1834 /* Current rotator context job is finished, time to free up*/
1835 sde_hw_rotator_free_rotctx(rot, ctx);
1836
1837 return ret;
1838}
1839
1840/*
1841 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
1842 * @rot: Pointer to hw rotator
1843 *
1844 * This function initializes feature and/or capability bitmask based on
1845 * h/w version read from the device.
1846 */
1847static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
1848{
1849 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1850 u32 hw_version;
1851
1852 if (!mdata) {
1853 SDEROT_ERR("null rotator data\n");
1854 return -EINVAL;
1855 }
1856
1857 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
1858 SDEROT_DBG("hw version %8.8x\n", hw_version);
1859
1860 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
1861 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
1862 clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
1863 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
1864 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
1865 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
1866
1867 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
1868
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001869 if (hw_version != SDE_ROT_TYPE_V1_0) {
1870 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
1871 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
1872 }
1873
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001874 set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
1875
Benjamin Chan53e3bce2016-08-31 14:43:29 -04001876 mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
1877 mdata->nrt_vbif_dbg_bus_size =
1878 ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
1879
1880 mdata->regdump = sde_rot_r3_regdump;
1881 mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001882 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001883 return 0;
1884}
1885
1886/*
1887 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
1888 * @irq: Interrupt number
1889 * @ptr: Pointer to private handle provided during registration
1890 *
1891 * This function services rotator interrupt and wakes up waiting client
1892 * with pending rotation requests already submitted to h/w.
1893 */
1894static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
1895{
1896 struct sde_hw_rotator *rot = ptr;
1897 struct sde_hw_rotator_context *ctx;
1898 irqreturn_t ret = IRQ_NONE;
1899 u32 isr;
1900
1901 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
1902
1903 SDEROT_DBG("intr_status = %8.8x\n", isr);
1904
1905 if (isr & ROT_DONE_MASK) {
1906 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001907 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001908 SDEROT_DBG("Notify rotator complete\n");
1909
1910 /* Normal rotator only 1 session, no need to lookup */
1911 ctx = rot->rotCtx[0][0];
1912 WARN_ON(ctx == NULL);
1913 complete_all(&ctx->rot_comp);
1914
1915 spin_lock(&rot->rotisr_lock);
1916 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1917 ROT_DONE_CLEAR);
1918 spin_unlock(&rot->rotisr_lock);
1919 ret = IRQ_HANDLED;
1920 }
1921
1922 return ret;
1923}
1924
1925/*
1926 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
1927 * @irq: Interrupt number
1928 * @ptr: Pointer to private handle provided during registration
1929 *
1930 * This function services rotator interrupt, decoding the source of
1931 * events (high/low priority queue), and wakes up all waiting clients
1932 * with pending rotation requests already submitted to h/w.
1933 */
1934static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
1935{
1936 struct sde_hw_rotator *rot = ptr;
1937 struct sde_hw_rotator_context *ctx;
1938 irqreturn_t ret = IRQ_NONE;
1939 u32 isr;
1940 u32 ts;
1941 u32 q_id;
1942
1943 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001944 /* acknowledge interrupt before reading latest timestamp */
1945 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001946 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1947
1948 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
1949
1950 /* Any REGDMA status, including error and watchdog timer, should
1951 * trigger and wake up waiting thread
1952 */
1953 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
1954 spin_lock(&rot->rotisr_lock);
1955
1956 /*
1957 * Obtain rotator context based on timestamp from regdma
1958 * and low/high interrupt status
1959 */
1960 if (isr & REGDMA_INT_HIGH_MASK) {
1961 q_id = ROT_QUEUE_HIGH_PRIORITY;
1962 ts = ts & SDE_REGDMA_SWTS_MASK;
1963 } else if (isr & REGDMA_INT_LOW_MASK) {
1964 q_id = ROT_QUEUE_LOW_PRIORITY;
1965 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
1966 SDE_REGDMA_SWTS_MASK;
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001967 } else {
1968 SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
1969 goto done_isr_handle;
Alan Kwong9487de22016-01-16 22:06:36 -05001970 }
Alan Kwong9487de22016-01-16 22:06:36 -05001971 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05001972
1973 /*
1974 * Wake up all waiting context from the current and previous
1975 * SW Timestamp.
1976 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04001977 while (ctx &&
1978 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05001979 ctx->last_regdma_isr_status = isr;
1980 ctx->last_regdma_timestamp = ts;
1981 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04001982 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001983 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001984
1985 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
1986 ctx = rot->rotCtx[q_id]
1987 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04001988 };
Alan Kwong9487de22016-01-16 22:06:36 -05001989
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001990done_isr_handle:
Alan Kwong9487de22016-01-16 22:06:36 -05001991 spin_unlock(&rot->rotisr_lock);
1992 ret = IRQ_HANDLED;
1993 } else if (isr & REGDMA_INT_ERR_MASK) {
1994 /*
1995 * For REGDMA Err, we save the isr info and wake up
1996 * all waiting contexts
1997 */
1998 int i, j;
1999
2000 SDEROT_ERR(
2001 "regdma err isr:%X, wake up all waiting contexts\n",
2002 isr);
2003
2004 spin_lock(&rot->rotisr_lock);
2005
2006 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2007 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
2008 ctx = rot->rotCtx[i][j];
2009 if (ctx && ctx->last_regdma_isr_status == 0) {
2010 ctx->last_regdma_isr_status = isr;
2011 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04002012 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002013 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
2014 i, j, ctx);
2015 }
2016 }
2017 }
2018
Alan Kwong9487de22016-01-16 22:06:36 -05002019 spin_unlock(&rot->rotisr_lock);
2020 ret = IRQ_HANDLED;
2021 }
2022
2023 return ret;
2024}
2025
2026/*
2027 * sde_hw_rotator_validate_entry - validate rotation entry
2028 * @mgr: Pointer to rotator manager
2029 * @entry: Pointer to rotation entry
2030 *
2031 * This function validates the given rotation entry and provides possible
2032 * fixup (future improvement) if available. This function returns 0 if
2033 * the entry is valid, and returns error code otherwise.
2034 */
2035static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
2036 struct sde_rot_entry *entry)
2037{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002038 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002039 int ret = 0;
2040 u16 src_w, src_h, dst_w, dst_h;
2041 struct sde_rotation_item *item = &entry->item;
2042 struct sde_mdp_format_params *fmt;
2043
2044 src_w = item->src_rect.w;
2045 src_h = item->src_rect.h;
2046
2047 if (item->flags & SDE_ROTATION_90) {
2048 dst_w = item->dst_rect.h;
2049 dst_h = item->dst_rect.w;
2050 } else {
2051 dst_w = item->dst_rect.w;
2052 dst_h = item->dst_rect.h;
2053 }
2054
2055 entry->dnsc_factor_w = 0;
2056 entry->dnsc_factor_h = 0;
2057
2058 if ((src_w != dst_w) || (src_h != dst_h)) {
2059 if ((src_w % dst_w) || (src_h % dst_h)) {
2060 SDEROT_DBG("non integral scale not support\n");
2061 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002062 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05002063 }
2064 entry->dnsc_factor_w = src_w / dst_w;
2065 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
2066 (entry->dnsc_factor_w > 64)) {
2067 SDEROT_DBG("non power-of-2 w_scale not support\n");
2068 ret = -EINVAL;
2069 goto dnsc_err;
2070 }
2071 entry->dnsc_factor_h = src_h / dst_h;
2072 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
2073 (entry->dnsc_factor_h > 64)) {
2074 SDEROT_DBG("non power-of-2 h_scale not support\n");
2075 ret = -EINVAL;
2076 goto dnsc_err;
2077 }
2078 }
2079
Benjamin Chan886ff672016-11-07 15:23:17 -05002080 fmt = sde_get_format_params(item->input.format);
2081 /*
2082 * Rotator downscale support max 4 times for UBWC format and
2083 * max 2 times for TP10/TP10_UBWC format
2084 */
2085 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
2086 SDEROT_DBG("max downscale for UBWC format is 4\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002087 ret = -EINVAL;
2088 goto dnsc_err;
2089 }
Benjamin Chan886ff672016-11-07 15:23:17 -05002090 if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
2091 SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002092 ret = -EINVAL;
2093 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002094 goto dnsc_err;
2095
2096dnsc_1p5_check:
2097 /* Check for 1.5 downscale that only applies to V2 HW */
2098 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
2099 entry->dnsc_factor_w = src_w / dst_w;
2100 if ((entry->dnsc_factor_w != 1) ||
2101 ((dst_w * 3) != (src_w * 2))) {
2102 SDEROT_DBG(
2103 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
2104 src_w, dst_w);
2105 ret = -EINVAL;
2106 goto dnsc_err;
2107 }
2108
2109 entry->dnsc_factor_h = src_h / dst_h;
2110 if ((entry->dnsc_factor_h != 1) ||
2111 ((dst_h * 3) != (src_h * 2))) {
2112 SDEROT_DBG(
2113 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
2114 src_h, dst_h);
2115 ret = -EINVAL;
2116 goto dnsc_err;
2117 }
2118 ret = 0;
2119 }
Alan Kwong9487de22016-01-16 22:06:36 -05002120
2121dnsc_err:
2122 /* Downscaler does not support asymmetrical dnsc */
2123 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
2124 SDEROT_DBG("asymmetric downscale not support\n");
2125 ret = -EINVAL;
2126 }
2127
2128 if (ret) {
2129 entry->dnsc_factor_w = 0;
2130 entry->dnsc_factor_h = 0;
2131 }
2132 return ret;
2133}
2134
2135/*
2136 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
2137 * @mgr: Pointer to rotator manager
2138 * @attr: Pointer to device attribute interface
2139 * @buf: Pointer to output buffer
2140 * @len: Length of output buffer
2141 */
2142static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
2143 struct device_attribute *attr, char *buf, ssize_t len)
2144{
2145 struct sde_hw_rotator *hw_data;
Benjamin Chan886ff672016-11-07 15:23:17 -05002146 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002147 int cnt = 0;
2148
2149 if (!mgr || !buf)
2150 return 0;
2151
2152 hw_data = mgr->hw_data;
2153
2154#define SPRINT(fmt, ...) \
2155 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2156
2157 /* insert capabilities here */
Benjamin Chan886ff672016-11-07 15:23:17 -05002158 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
2159 SPRINT("min_downscale=1.5\n");
2160 else
2161 SPRINT("min_downscale=2.0\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002162
Benjamin Chan42db2c92016-11-22 22:50:01 -05002163 SPRINT("downscale_compression=1\n");
2164
Alan Kwong9487de22016-01-16 22:06:36 -05002165#undef SPRINT
2166 return cnt;
2167}
2168
2169/*
2170 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
2171 * @mgr: Pointer to rotator manager
2172 * @attr: Pointer to device attribute interface
2173 * @buf: Pointer to output buffer
2174 * @len: Length of output buffer
2175 */
2176static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
2177 struct device_attribute *attr, char *buf, ssize_t len)
2178{
2179 struct sde_hw_rotator *rot;
2180 struct sde_hw_rotator_context *ctx;
2181 int cnt = 0;
2182 int num_active = 0;
2183 int i, j;
2184
2185 if (!mgr || !buf) {
2186 SDEROT_ERR("null parameters\n");
2187 return 0;
2188 }
2189
2190 rot = mgr->hw_data;
2191
2192#define SPRINT(fmt, ...) \
2193 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2194
2195 if (rot) {
2196 SPRINT("rot_mode=%d\n", rot->mode);
2197 SPRINT("irq_num=%d\n", rot->irq_num);
2198
2199 if (rot->mode == ROT_REGDMA_OFF) {
2200 SPRINT("max_active=1\n");
2201 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
2202 } else {
2203 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2204 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
2205 j++) {
2206 ctx = rot->rotCtx[i][j];
2207
2208 if (ctx) {
2209 SPRINT(
2210 "rotCtx[%d][%d]:%p\n",
2211 i, j, ctx);
2212 ++num_active;
2213 }
2214 }
2215 }
2216
2217 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
2218 SPRINT("num_active=%d\n", num_active);
2219 }
2220 }
2221
2222#undef SPRINT
2223 return cnt;
2224}
2225
2226/*
Alan Kwongda16e442016-08-14 20:47:18 -04002227 * sde_hw_rotator_get_pixfmt - get the indexed pixel format
2228 * @mgr: Pointer to rotator manager
2229 * @index: index of pixel format
2230 * @input: true for input port; false for output port
2231 */
2232static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
2233 int index, bool input)
2234{
2235 if (input) {
2236 if (index < ARRAY_SIZE(sde_hw_rotator_input_pixfmts))
2237 return sde_hw_rotator_input_pixfmts[index];
2238 else
2239 return 0;
2240 } else {
2241 if (index < ARRAY_SIZE(sde_hw_rotator_output_pixfmts))
2242 return sde_hw_rotator_output_pixfmts[index];
2243 else
2244 return 0;
2245 }
2246}
2247
2248/*
2249 * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
2250 * @mgr: Pointer to rotator manager
2251 * @pixfmt: pixel format to be verified
2252 * @input: true for input port; false for output port
2253 */
2254static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
2255 bool input)
2256{
2257 int i;
2258
2259 if (input) {
2260 for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_input_pixfmts); i++)
2261 if (sde_hw_rotator_input_pixfmts[i] == pixfmt)
2262 return true;
2263 } else {
2264 for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_output_pixfmts); i++)
2265 if (sde_hw_rotator_output_pixfmts[i] == pixfmt)
2266 return true;
2267 }
2268
2269 return false;
2270}
2271
2272/*
Alan Kwong9487de22016-01-16 22:06:36 -05002273 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
2274 * @hw_data: Pointer to rotator hw
2275 * @dev: Pointer to platform device
2276 */
2277static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
2278 struct platform_device *dev)
2279{
2280 int ret = 0;
2281 u32 data;
2282
2283 if (!hw_data || !dev)
2284 return -EINVAL;
2285
2286 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
2287 &data);
2288 if (ret) {
2289 SDEROT_DBG("default to regdma off\n");
2290 ret = 0;
2291 hw_data->mode = ROT_REGDMA_OFF;
2292 } else if (data < ROT_REGDMA_MAX) {
2293 SDEROT_DBG("set to regdma mode %d\n", data);
2294 hw_data->mode = data;
2295 } else {
2296 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
2297 hw_data->mode = ROT_REGDMA_OFF;
2298 }
2299
2300 ret = of_property_read_u32(dev->dev.of_node,
2301 "qcom,mdss-highest-bank-bit", &data);
2302 if (ret) {
2303 SDEROT_DBG("default to A5X bank\n");
2304 ret = 0;
2305 hw_data->highest_bank = 2;
2306 } else {
2307 SDEROT_DBG("set highest bank bit to %d\n", data);
2308 hw_data->highest_bank = data;
2309 }
2310
2311 return ret;
2312}
2313
2314/*
2315 * sde_rotator_r3_init - initialize the r3 module
2316 * @mgr: Pointer to rotator manager
2317 *
2318 * This function setup r3 callback functions, parses r3 specific
2319 * device tree settings, installs r3 specific interrupt handler,
2320 * as well as initializes r3 internal data structure.
2321 */
2322int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
2323{
2324 struct sde_hw_rotator *rot;
2325 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2326 int i;
2327 int ret;
2328
2329 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
2330 if (!rot)
2331 return -ENOMEM;
2332
2333 mgr->hw_data = rot;
2334 mgr->queue_count = ROT_QUEUE_MAX;
2335
2336 rot->mdss_base = mdata->sde_io.base;
2337 rot->pdev = mgr->pdev;
2338
2339 /* Assign ops */
2340 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
2341 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
2342 mgr->ops_hw_free = sde_hw_rotator_free_ext;
2343 mgr->ops_config_hw = sde_hw_rotator_config;
2344 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
2345 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
2346 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
2347 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
2348 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
2349 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
Alan Kwongda16e442016-08-14 20:47:18 -04002350 mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
2351 mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002352 mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
2353 mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
Alan Kwong9487de22016-01-16 22:06:36 -05002354
2355 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
2356 if (ret)
2357 goto error_parse_dt;
2358
2359 rot->irq_num = platform_get_irq(mgr->pdev, 0);
2360 if (rot->irq_num < 0) {
2361 SDEROT_ERR("fail to get rotator irq\n");
2362 } else {
2363 if (rot->mode == ROT_REGDMA_OFF)
2364 ret = devm_request_threaded_irq(&mgr->pdev->dev,
2365 rot->irq_num,
2366 sde_hw_rotator_rotirq_handler,
2367 NULL, 0, "sde_rotator_r3", rot);
2368 else
2369 ret = devm_request_threaded_irq(&mgr->pdev->dev,
2370 rot->irq_num,
2371 sde_hw_rotator_regdmairq_handler,
2372 NULL, 0, "sde_rotator_r3", rot);
2373 if (ret) {
2374 SDEROT_ERR("fail to request irq r:%d\n", ret);
2375 rot->irq_num = -1;
2376 } else {
2377 disable_irq(rot->irq_num);
2378 }
2379 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04002380 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05002381
2382 setup_rotator_ops(&rot->ops, rot->mode);
2383
2384 spin_lock_init(&rot->rotctx_lock);
2385 spin_lock_init(&rot->rotisr_lock);
2386
2387 /* REGDMA initialization */
2388 if (rot->mode == ROT_REGDMA_OFF) {
2389 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2390 rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
2391 SDE_HW_ROT_REGDMA_SEG_SIZE * i];
2392 } else {
2393 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2394 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
2395 (u32 *)(rot->mdss_base +
2396 REGDMA_RAM_REGDMA_CMD_RAM +
2397 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
2398
2399 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2400 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
2401 (u32 *)(rot->mdss_base +
2402 REGDMA_RAM_REGDMA_CMD_RAM +
2403 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
2404 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
2405 }
2406
2407 atomic_set(&rot->timestamp[0], 0);
2408 atomic_set(&rot->timestamp[1], 0);
Alan Kwong9487de22016-01-16 22:06:36 -05002409
2410 ret = sde_rotator_hw_rev_init(rot);
2411 if (ret)
2412 goto error_hw_rev_init;
2413
Alan Kwong315cd772016-08-03 22:29:42 -04002414 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
Benjamin Chan77aed192016-10-17 17:49:41 -04002415 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04002416 CLKFLAG_NORETAIN_MEM);
Benjamin Chan77aed192016-10-17 17:49:41 -04002417 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04002418 CLKFLAG_NORETAIN_PERIPH);
2419
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002420 mdata->sde_rot_hw = rot;
Alan Kwong9487de22016-01-16 22:06:36 -05002421 return 0;
2422error_hw_rev_init:
2423 if (rot->irq_num >= 0)
2424 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2425 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2426error_parse_dt:
2427 return ret;
2428}