blob: 1f42bc89ba7831f72bffb653056fbe3fcacbdccd [file] [log] [blame]
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Alan Kwong9487de22016-01-16 22:06:36 -05002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
20#include <linux/sync.h>
21#include <linux/delay.h>
22#include <linux/debugfs.h>
23#include <linux/interrupt.h>
24#include <linux/dma-mapping.h>
25#include <linux/dma-buf.h>
26#include <linux/msm_ion.h>
Alan Kwong315cd772016-08-03 22:29:42 -040027#include <linux/clk/msm-clk.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
Benjamin Chan53e3bce2016-08-31 14:43:29 -040037#include "sde_rotator_debug.h"
Alan Kwong9487de22016-01-16 22:06:36 -050038
Benjamin Chan99eb63b2016-12-21 15:45:26 -050039#define RES_UHD (3840*2160)
40
41/* traffic shaping clock ticks = finish_time x 19.2MHz */
42#define TRAFFIC_SHAPE_CLKTICK_14MS 268800
43#define TRAFFIC_SHAPE_CLKTICK_12MS 230400
44
Alan Kwong9487de22016-01-16 22:06:36 -050045/* XIN mapping */
46#define XIN_SSPP 0
47#define XIN_WRITEBACK 1
48
49/* wait for at most 2 vsync for lowest refresh rate (24hz) */
50#define KOFF_TIMEOUT msecs_to_jiffies(42 * 32)
51
52/* Macro for constructing the REGDMA command */
53#define SDE_REGDMA_WRITE(p, off, data) \
54 do { \
55 *p++ = REGDMA_OP_REGWRITE | \
56 ((off) & REGDMA_ADDR_OFFSET_MASK); \
57 *p++ = (data); \
58 } while (0)
59
60#define SDE_REGDMA_MODIFY(p, off, mask, data) \
61 do { \
62 *p++ = REGDMA_OP_REGMODIFY | \
63 ((off) & REGDMA_ADDR_OFFSET_MASK); \
64 *p++ = (mask); \
65 *p++ = (data); \
66 } while (0)
67
68#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
69 do { \
70 *p++ = REGDMA_OP_BLKWRITE_INC | \
71 ((off) & REGDMA_ADDR_OFFSET_MASK); \
72 *p++ = (len); \
73 } while (0)
74
75#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
76 do { \
77 *(p) = (data); \
78 (p)++; \
79 } while (0)
80
81/* Macro for directly accessing mapped registers */
82#define SDE_ROTREG_WRITE(base, off, data) \
83 writel_relaxed(data, (base + (off)))
84
85#define SDE_ROTREG_READ(base, off) \
86 readl_relaxed(base + (off))
87
Alan Kwongda16e442016-08-14 20:47:18 -040088static u32 sde_hw_rotator_input_pixfmts[] = {
89 SDE_PIX_FMT_XRGB_8888,
90 SDE_PIX_FMT_ARGB_8888,
91 SDE_PIX_FMT_ABGR_8888,
92 SDE_PIX_FMT_RGBA_8888,
93 SDE_PIX_FMT_BGRA_8888,
94 SDE_PIX_FMT_RGBX_8888,
95 SDE_PIX_FMT_BGRX_8888,
96 SDE_PIX_FMT_XBGR_8888,
97 SDE_PIX_FMT_RGBA_5551,
98 SDE_PIX_FMT_ARGB_1555,
99 SDE_PIX_FMT_ABGR_1555,
100 SDE_PIX_FMT_BGRA_5551,
101 SDE_PIX_FMT_BGRX_5551,
102 SDE_PIX_FMT_RGBX_5551,
103 SDE_PIX_FMT_XBGR_1555,
104 SDE_PIX_FMT_XRGB_1555,
105 SDE_PIX_FMT_ARGB_4444,
106 SDE_PIX_FMT_RGBA_4444,
107 SDE_PIX_FMT_BGRA_4444,
108 SDE_PIX_FMT_ABGR_4444,
109 SDE_PIX_FMT_RGBX_4444,
110 SDE_PIX_FMT_XRGB_4444,
111 SDE_PIX_FMT_BGRX_4444,
112 SDE_PIX_FMT_XBGR_4444,
113 SDE_PIX_FMT_RGB_888,
114 SDE_PIX_FMT_BGR_888,
115 SDE_PIX_FMT_RGB_565,
116 SDE_PIX_FMT_BGR_565,
117 SDE_PIX_FMT_Y_CB_CR_H2V2,
118 SDE_PIX_FMT_Y_CR_CB_H2V2,
119 SDE_PIX_FMT_Y_CR_CB_GH2V2,
120 SDE_PIX_FMT_Y_CBCR_H2V2,
121 SDE_PIX_FMT_Y_CRCB_H2V2,
122 SDE_PIX_FMT_Y_CBCR_H1V2,
123 SDE_PIX_FMT_Y_CRCB_H1V2,
124 SDE_PIX_FMT_Y_CBCR_H2V1,
125 SDE_PIX_FMT_Y_CRCB_H2V1,
126 SDE_PIX_FMT_YCBYCR_H2V1,
127 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
128 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
129 SDE_PIX_FMT_RGBA_8888_UBWC,
130 SDE_PIX_FMT_RGBX_8888_UBWC,
131 SDE_PIX_FMT_RGB_565_UBWC,
132 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
133 SDE_PIX_FMT_RGBA_1010102,
134 SDE_PIX_FMT_RGBX_1010102,
135 SDE_PIX_FMT_ARGB_2101010,
136 SDE_PIX_FMT_XRGB_2101010,
137 SDE_PIX_FMT_BGRA_1010102,
138 SDE_PIX_FMT_BGRX_1010102,
139 SDE_PIX_FMT_ABGR_2101010,
140 SDE_PIX_FMT_XBGR_2101010,
141 SDE_PIX_FMT_RGBA_1010102_UBWC,
142 SDE_PIX_FMT_RGBX_1010102_UBWC,
143 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
144 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
145 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
146};
147
148static u32 sde_hw_rotator_output_pixfmts[] = {
149 SDE_PIX_FMT_XRGB_8888,
150 SDE_PIX_FMT_ARGB_8888,
151 SDE_PIX_FMT_ABGR_8888,
152 SDE_PIX_FMT_RGBA_8888,
153 SDE_PIX_FMT_BGRA_8888,
154 SDE_PIX_FMT_RGBX_8888,
155 SDE_PIX_FMT_BGRX_8888,
156 SDE_PIX_FMT_XBGR_8888,
157 SDE_PIX_FMT_RGBA_5551,
158 SDE_PIX_FMT_ARGB_1555,
159 SDE_PIX_FMT_ABGR_1555,
160 SDE_PIX_FMT_BGRA_5551,
161 SDE_PIX_FMT_BGRX_5551,
162 SDE_PIX_FMT_RGBX_5551,
163 SDE_PIX_FMT_XBGR_1555,
164 SDE_PIX_FMT_XRGB_1555,
165 SDE_PIX_FMT_ARGB_4444,
166 SDE_PIX_FMT_RGBA_4444,
167 SDE_PIX_FMT_BGRA_4444,
168 SDE_PIX_FMT_ABGR_4444,
169 SDE_PIX_FMT_RGBX_4444,
170 SDE_PIX_FMT_XRGB_4444,
171 SDE_PIX_FMT_BGRX_4444,
172 SDE_PIX_FMT_XBGR_4444,
173 SDE_PIX_FMT_RGB_888,
174 SDE_PIX_FMT_BGR_888,
175 SDE_PIX_FMT_RGB_565,
176 SDE_PIX_FMT_BGR_565,
177 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
178 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
179 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
180 SDE_PIX_FMT_Y_CBCR_H2V2,
181 SDE_PIX_FMT_Y_CRCB_H2V2,
182 SDE_PIX_FMT_Y_CBCR_H1V2,
183 SDE_PIX_FMT_Y_CRCB_H1V2,
184 SDE_PIX_FMT_Y_CBCR_H2V1,
185 SDE_PIX_FMT_Y_CRCB_H2V1,
186 /* SDE_PIX_FMT_YCBYCR_H2V1 */
187 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
188 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
189 SDE_PIX_FMT_RGBA_8888_UBWC,
190 SDE_PIX_FMT_RGBX_8888_UBWC,
191 SDE_PIX_FMT_RGB_565_UBWC,
192 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
193 SDE_PIX_FMT_RGBA_1010102,
194 SDE_PIX_FMT_RGBX_1010102,
195 /* SDE_PIX_FMT_ARGB_2101010 */
196 /* SDE_PIX_FMT_XRGB_2101010 */
197 SDE_PIX_FMT_BGRA_1010102,
198 SDE_PIX_FMT_BGRX_1010102,
199 /* SDE_PIX_FMT_ABGR_2101010 */
200 /* SDE_PIX_FMT_XBGR_2101010 */
201 SDE_PIX_FMT_RGBA_1010102_UBWC,
202 SDE_PIX_FMT_RGBX_1010102_UBWC,
203 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
204 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
205 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
206};
207
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400208static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
209 {0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
210 {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
211 {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
212};
213
214static struct sde_rot_regdump sde_rot_r3_regdump[] = {
215 { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
216 { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
217 { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
218 { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
219 SDE_ROT_REGDUMP_READ },
220 /*
221 * Need to perform a SW reset to REGDMA in order to access the
222 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
223 * REGDMA RAM should be dump at last.
224 */
225 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
226 SDE_ROT_REGDUMP_WRITE },
227 { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
228 SDE_ROT_REGDUMP_READ },
Benjamin Chan59a06052017-01-12 18:06:03 -0500229 { "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
230 SDE_ROT_REGDUMP_VBIF },
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400231};
232
Alan Kwong818b7fc2016-07-24 22:07:41 -0400233/* Invalid software timestamp value for initialization */
234#define SDE_REGDMA_SWTS_INVALID (~0)
235
236/**
237 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
238 * @ts_curr: current software timestamp
239 * @ts_prev: previous software timestamp
240 * @return: the amount ts_curr is ahead of ts_prev
241 */
242static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
243{
244 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
245
246 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
247}
248
249/**
250 * sde_hw_rotator_pending_swts - Check if the given context is still pending
251 * @rot: Pointer to hw rotator
252 * @ctx: Pointer to rotator context
253 * @pswts: Pointer to returned reference software timestamp, optional
254 * @return: true if context has pending requests
255 */
256static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
257 struct sde_hw_rotator_context *ctx, u32 *pswts)
258{
259 u32 swts;
260 int ts_diff;
261 bool pending;
262
263 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
264 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
265 else
266 swts = ctx->last_regdma_timestamp;
267
268 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
269 swts >>= SDE_REGDMA_SWTS_SHIFT;
270
271 swts &= SDE_REGDMA_SWTS_MASK;
272
273 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
274
275 if (pswts)
276 *pswts = swts;
277
278 pending = (ts_diff > 0) ? true : false;
279
280 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
281 ctx->timestamp, ctx->q_id, swts, pending);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -0400282 SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400283 return pending;
284}
285
286/**
287 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
288 * Also, clear rotator/regdma irq status.
289 * @rot: Pointer to hw rotator
290 */
291static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
292{
293 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
294 atomic_read(&rot->irq_enabled));
295
296 if (!atomic_read(&rot->irq_enabled)) {
297 if (rot->mode == ROT_REGDMA_OFF)
298 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
299 ROT_DONE_MASK);
300 else
301 SDE_ROTREG_WRITE(rot->mdss_base,
302 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
303
304 enable_irq(rot->irq_num);
305 }
306 atomic_inc(&rot->irq_enabled);
307}
308
309/**
310 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
311 * Also, clear rotator/regdma irq enable masks.
312 * @rot: Pointer to hw rotator
313 */
314static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
315{
316 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
317 atomic_read(&rot->irq_enabled));
318
319 if (!atomic_read(&rot->irq_enabled)) {
320 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
321 return;
322 }
323
324 if (!atomic_dec_return(&rot->irq_enabled)) {
325 if (rot->mode == ROT_REGDMA_OFF)
326 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
327 else
328 SDE_ROTREG_WRITE(rot->mdss_base,
329 REGDMA_CSR_REGDMA_INT_EN, 0);
330 /* disable irq after last pending irq is handled, if any */
331 synchronize_irq(rot->irq_num);
332 disable_irq_nosync(rot->irq_num);
333 }
334}
335
336/**
337 * sde_hw_rotator_dump_status - Dump hw rotator status on error
338 * @rot: Pointer to hw rotator
339 */
340static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
341{
Benjamin Chan1b94f952017-01-23 17:42:30 -0500342 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
343
Alan Kwong818b7fc2016-07-24 22:07:41 -0400344 SDEROT_ERR(
345 "op_mode = %x, int_en = %x, int_status = %x\n",
346 SDE_ROTREG_READ(rot->mdss_base,
347 REGDMA_CSR_REGDMA_OP_MODE),
348 SDE_ROTREG_READ(rot->mdss_base,
349 REGDMA_CSR_REGDMA_INT_EN),
350 SDE_ROTREG_READ(rot->mdss_base,
351 REGDMA_CSR_REGDMA_INT_STATUS));
352
353 SDEROT_ERR(
354 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
355 SDE_ROTREG_READ(rot->mdss_base,
356 REGDMA_TIMESTAMP_REG),
357 SDE_ROTREG_READ(rot->mdss_base,
358 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
359 SDE_ROTREG_READ(rot->mdss_base,
360 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
361 SDE_ROTREG_READ(rot->mdss_base,
362 REGDMA_CSR_REGDMA_BLOCK_STATUS));
363
364 SDEROT_ERR(
365 "invalid_cmd_offset = %x, fsm_state = %x\n",
366 SDE_ROTREG_READ(rot->mdss_base,
367 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
368 SDE_ROTREG_READ(rot->mdss_base,
369 REGDMA_CSR_REGDMA_FSM_STATE));
Benjamin Chan59a06052017-01-12 18:06:03 -0500370
371 SDEROT_ERR(
372 "UBWC decode status = %x, UBWC encode status = %x\n",
373 SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS),
374 SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
Benjamin Chan1b94f952017-01-23 17:42:30 -0500375
376 SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
377 SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
378 SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
Alan Kwong818b7fc2016-07-24 22:07:41 -0400379}
380
Alan Kwong9487de22016-01-16 22:06:36 -0500381/**
382 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
383 * on provided session_id. Each rotator has a different session_id.
384 */
385static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
386 struct sde_hw_rotator *rot, u32 session_id,
387 enum sde_rot_queue_prio q_id)
388{
389 int i;
390 struct sde_hw_rotator_context *ctx = NULL;
391
392 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
393 ctx = rot->rotCtx[q_id][i];
394
395 if (ctx && (ctx->session_id == session_id)) {
396 SDEROT_DBG(
397 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d\n",
398 q_id, i, ctx, ctx->session_id);
399 return ctx;
400 }
401 }
402
403 return NULL;
404}
405
406/*
407 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
408 * @dbgbuf: Pointer to debug buffer
409 * @buf: Pointer to layer buffer structure
410 * @data: Pointer to h/w mapped buffer structure
411 */
412static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
413 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
414{
415 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
416 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
417
418 dbgbuf->vaddr = NULL;
419 dbgbuf->width = buf->width;
420 dbgbuf->height = buf->height;
421
422 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
423 dma_buf_begin_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
424 DMA_FROM_DEVICE);
425 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
426 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
427 dbgbuf->vaddr, dbgbuf->buflen,
428 dbgbuf->width, dbgbuf->height);
429 }
430}
431
432/*
433 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
434 * @dbgbuf: Pointer to debug buffer
435 */
436static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
437{
438 if (dbgbuf->vaddr) {
439 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
440 dma_buf_end_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
441 DMA_FROM_DEVICE);
442 }
443
444 dbgbuf->vaddr = NULL;
445 dbgbuf->dmabuf = NULL;
446 dbgbuf->buflen = 0;
447 dbgbuf->width = 0;
448 dbgbuf->height = 0;
449}
450
451/*
452 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
453 * @ctx: Pointer to rotator context
454 * @mask: Bit mask location of the timestamp
455 * @swts: Software timestamp
456 */
457static void sde_hw_rotator_setup_timestamp_packet(
458 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
459{
460 u32 *wrptr;
461
462 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
463
464 /*
465 * Create a dummy packet write out to 1 location for timestamp
466 * generation.
467 */
468 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
469 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
470 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
471 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
472 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
473 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
474 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
475 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
476 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
477 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
478 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
479 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
480 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
Benjamin Chan15c93d82016-08-29 10:04:22 -0400481 /*
482 * Must clear secure buffer setting for SW timestamp because
483 * SW timstamp buffer allocation is always non-secure region.
484 */
485 if (ctx->is_secure) {
486 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
487 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
488 }
Alan Kwong9487de22016-01-16 22:06:36 -0500489 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
490 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
491 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
492 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
493 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
494 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
495 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
496 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
497 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
498 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
499 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
500 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
501 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
502
503 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
504}
505
506/*
507 * sde_hw_rotator_setup_fetchengine - setup fetch engine
508 * @ctx: Pointer to rotator context
509 * @queue_id: Priority queue identifier
510 * @cfg: Fetch configuration
511 * @danger_lut: real-time QoS LUT for danger setting (not used)
512 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400513 * @dnsc_factor_w: downscale factor for width
514 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -0500515 * @flags: Control flag
516 */
517static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
518 enum sde_rot_queue_prio queue_id,
519 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400520 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -0500521{
522 struct sde_hw_rotator *rot = ctx->rot;
523 struct sde_mdp_format_params *fmt;
524 struct sde_mdp_data *data;
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400525 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -0500526 u32 *wrptr;
527 u32 opmode = 0;
528 u32 chroma_samp = 0;
529 u32 src_format = 0;
530 u32 unpack = 0;
531 u32 width = cfg->img_width;
532 u32 height = cfg->img_height;
533 u32 fetch_blocksize = 0;
534 int i;
535
536 if (ctx->rot->mode == ROT_REGDMA_ON) {
537 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_EN,
538 REGDMA_INT_MASK);
539 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
540 REGDMA_EN);
541 }
542
543 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
544
545 /* source image setup */
546 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
547 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
548 for (i = 0; i < cfg->src_plane.num_planes; i++)
549 cfg->src_plane.ystride[i] *= 2;
550 width *= 2;
551 height /= 2;
552 }
553
554 /*
555 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
556 */
557 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
558
559 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
560 SDE_REGDMA_BLKWRITE_DATA(wrptr,
561 cfg->src_rect->w | (cfg->src_rect->h << 16));
562 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
563 SDE_REGDMA_BLKWRITE_DATA(wrptr,
564 cfg->src_rect->x | (cfg->src_rect->y << 16));
565 SDE_REGDMA_BLKWRITE_DATA(wrptr,
566 cfg->src_rect->w | (cfg->src_rect->h << 16));
567 SDE_REGDMA_BLKWRITE_DATA(wrptr,
568 cfg->src_rect->x | (cfg->src_rect->y << 16));
569
570 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
571 data = cfg->data;
572 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
573 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
574 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
575 (cfg->src_plane.ystride[1] << 16));
576 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
577 (cfg->src_plane.ystride[3] << 16));
578
579 /* UNUSED, write 0 */
580 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
581
582 /* setup source format */
583 fmt = cfg->fmt;
584
585 chroma_samp = fmt->chroma_sample;
586 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
587 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
588 chroma_samp = SDE_MDP_CHROMA_H1V2;
589 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
590 chroma_samp = SDE_MDP_CHROMA_H2V1;
591 }
592
593 src_format = (chroma_samp << 23) |
594 (fmt->fetch_planes << 19) |
595 (fmt->bits[C3_ALPHA] << 6) |
596 (fmt->bits[C2_R_Cr] << 4) |
597 (fmt->bits[C1_B_Cb] << 2) |
598 (fmt->bits[C0_G_Y] << 0);
599
600 if (fmt->alpha_enable &&
601 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
602 src_format |= BIT(8); /* SRCC3_EN */
603
604 src_format |= ((fmt->unpack_count - 1) << 12) |
605 (fmt->unpack_tight << 17) |
606 (fmt->unpack_align_msb << 18) |
607 ((fmt->bpp - 1) << 9) |
608 ((fmt->frame_format & 3) << 30);
609
610 if (flags & SDE_ROT_FLAG_ROT_90)
611 src_format |= BIT(11); /* ROT90 */
612
613 if (sde_mdp_is_ubwc_format(fmt))
614 opmode |= BIT(0); /* BWC_DEC_EN */
615
616 /* if this is YUV pixel format, enable CSC */
617 if (sde_mdp_is_yuv_format(fmt))
618 src_format |= BIT(15); /* SRC_COLOR_SPACE */
619
620 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
621 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
622
623 /* SRC_FORMAT */
624 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
625
626 /* setup source unpack pattern */
627 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
628 (fmt->element[1] << 8) | (fmt->element[0] << 0);
629
630 /* SRC_UNPACK_PATTERN */
631 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
632
633 /* setup source op mode */
634 if (flags & SDE_ROT_FLAG_FLIP_LR)
635 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
636 if (flags & SDE_ROT_FLAG_FLIP_UD)
637 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
638 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
639
640 /* SRC_OP_MODE */
641 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
642
643 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400644 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
645 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
646 if (sde_mdp_is_tp10_format(fmt))
647 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
648 else
649 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
650 } else {
651 if (sde_mdp_is_tp10_format(fmt))
652 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
653 else
654 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
655 }
656
Alan Kwong9487de22016-01-16 22:06:36 -0500657 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
658 fetch_blocksize |
659 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
660 ((rot->highest_bank & 0x3) << 18));
661
662 /* setup source buffer plane security status */
Abhijit Kulkarni298c8232016-09-26 22:32:10 -0700663 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
664 SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
Alan Kwong9487de22016-01-16 22:06:36 -0500665 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
666 ctx->is_secure = true;
Benjamin Chan15c93d82016-08-29 10:04:22 -0400667 } else {
668 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
669 ctx->is_secure = false;
Alan Kwong9487de22016-01-16 22:06:36 -0500670 }
671
Benjamin Chan99eb63b2016-12-21 15:45:26 -0500672 /*
673 * Determine if traffic shaping is required. Only enable traffic
674 * shaping when content is 4k@30fps. The actual traffic shaping
675 * bandwidth calculation is done in output setup.
676 */
677 if (((cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD) &&
678 (cfg->fps <= 30)) {
679 SDEROT_DBG("Enable Traffic Shaper\n");
680 ctx->is_traffic_shaping = true;
681 } else {
682 SDEROT_DBG("Disable Traffic Shaper\n");
683 ctx->is_traffic_shaping = false;
684 }
685
Alan Kwong9487de22016-01-16 22:06:36 -0500686 /* Update command queue write ptr */
687 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
688}
689
690/*
691 * sde_hw_rotator_setup_wbengine - setup writeback engine
692 * @ctx: Pointer to rotator context
693 * @queue_id: Priority queue identifier
694 * @cfg: Writeback configuration
695 * @flags: Control flag
696 */
697static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
698 enum sde_rot_queue_prio queue_id,
699 struct sde_hw_rot_wb_cfg *cfg,
700 u32 flags)
701{
702 struct sde_mdp_format_params *fmt;
703 u32 *wrptr;
704 u32 pack = 0;
705 u32 dst_format = 0;
706 int i;
707
708 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
709
710 fmt = cfg->fmt;
711
712 /* setup WB DST format */
713 dst_format |= (fmt->chroma_sample << 23) |
714 (fmt->fetch_planes << 19) |
715 (fmt->bits[C3_ALPHA] << 6) |
716 (fmt->bits[C2_R_Cr] << 4) |
717 (fmt->bits[C1_B_Cb] << 2) |
718 (fmt->bits[C0_G_Y] << 0);
719
720 /* alpha control */
721 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
722 dst_format |= BIT(8);
723 if (!fmt->alpha_enable) {
724 dst_format |= BIT(14);
725 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
726 }
727 }
728
729 dst_format |= ((fmt->unpack_count - 1) << 12) |
730 (fmt->unpack_tight << 17) |
731 (fmt->unpack_align_msb << 18) |
732 ((fmt->bpp - 1) << 9) |
733 ((fmt->frame_format & 3) << 30);
734
735 if (sde_mdp_is_yuv_format(fmt))
736 dst_format |= BIT(15);
737
738 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
739 dst_format |= BIT(21); /* PACK_DX_FORMAT */
740
741 /*
742 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
743 */
744 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
745
746 /* DST_FORMAT */
747 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
748
749 /* DST_OP_MODE */
750 if (sde_mdp_is_ubwc_format(fmt))
751 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
752 else
753 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
754
755 /* DST_PACK_PATTERN */
756 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
757 (fmt->element[1] << 8) | (fmt->element[0] << 0);
758 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
759
760 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
761 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
762 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
763 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
764 (cfg->dst_plane.ystride[1] << 16));
765 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
766 (cfg->dst_plane.ystride[3] << 16));
767
768 /* setup WB out image size and ROI */
769 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
770 cfg->img_width | (cfg->img_height << 16));
771 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
772 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
773 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
774 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
775
Abhijit Kulkarni298c8232016-09-26 22:32:10 -0700776 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
777 SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
Benjamin Chan15c93d82016-08-29 10:04:22 -0400778 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
779 else
780 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
781
Alan Kwong9487de22016-01-16 22:06:36 -0500782 /*
783 * setup Downscale factor
784 */
785 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
786 cfg->v_downscale_factor |
787 (cfg->h_downscale_factor << 16));
788
789 /* write config setup for bank configration */
790 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
791 (ctx->rot->highest_bank & 0x3) << 8);
792
793 if (flags & SDE_ROT_FLAG_ROT_90)
794 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x3);
795 else
796 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x1);
797
Benjamin Chan99eb63b2016-12-21 15:45:26 -0500798 /* setup traffic shaper for 4k 30fps content */
799 if (ctx->is_traffic_shaping) {
800 u32 bw;
801
802 /*
803 * Target to finish in 12ms, and we need to set number of bytes
804 * per clock tick for traffic shaping.
805 * Each clock tick run @ 19.2MHz, so we need we know total of
806 * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
807 * Finally, calcualte the byte count per clock tick based on
808 * resolution, bpp and compression ratio.
809 */
810 bw = cfg->dst_rect->w * cfg->dst_rect->h;
811
812 if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
813 bw = (bw * 3) / 2;
814 else
815 bw *= fmt->bpp;
816
817 bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
818 if (bw > 0xFF)
819 bw = 0xFF;
820 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
821 BIT(31) | bw);
822 SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
823 } else {
824 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
825 SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
826 }
827
Alan Kwong9487de22016-01-16 22:06:36 -0500828 /* Update command queue write ptr */
829 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
830}
831
832/*
833 * sde_hw_rotator_start_no_regdma - start non-regdma operation
834 * @ctx: Pointer to rotator context
835 * @queue_id: Priority queue identifier
836 */
837static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
838 enum sde_rot_queue_prio queue_id)
839{
840 struct sde_hw_rotator *rot = ctx->rot;
841 u32 *wrptr;
842 u32 *rdptr;
843 u8 *addr;
844 u32 mask;
845 u32 blksize;
846
847 rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
848 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
849
850 if (rot->irq_num >= 0) {
851 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
852 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
853 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400854 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -0500855 }
856
857 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
858
859 /* Update command queue write ptr */
860 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
861
862 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
863 /* Write all command stream to Rotator blocks */
864 /* Rotator will start right away after command stream finish writing */
865 while (rdptr < wrptr) {
866 u32 op = REGDMA_OP_MASK & *rdptr;
867
868 switch (op) {
869 case REGDMA_OP_NOP:
870 SDEROT_DBG("NOP\n");
871 rdptr++;
872 break;
873 case REGDMA_OP_REGWRITE:
874 SDEROT_DBG("REGW %6.6x %8.8x\n",
875 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
876 rdptr[1]);
877 addr = rot->mdss_base +
878 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
879 writel_relaxed(*rdptr++, addr);
880 break;
881 case REGDMA_OP_REGMODIFY:
882 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
883 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
884 rdptr[1], rdptr[2]);
885 addr = rot->mdss_base +
886 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
887 mask = *rdptr++;
888 writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
889 addr);
890 break;
891 case REGDMA_OP_BLKWRITE_SINGLE:
892 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
893 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
894 rdptr[1]);
895 addr = rot->mdss_base +
896 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
897 blksize = *rdptr++;
898 while (blksize--) {
899 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
900 writel_relaxed(*rdptr++, addr);
901 }
902 break;
903 case REGDMA_OP_BLKWRITE_INC:
904 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
905 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
906 rdptr[1]);
907 addr = rot->mdss_base +
908 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
909 blksize = *rdptr++;
910 while (blksize--) {
911 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
912 writel_relaxed(*rdptr++, addr);
913 addr += 4;
914 }
915 break;
916 default:
917 /* Other not supported OP mode
918 * Skip data for now for unregonized OP mode
919 */
920 SDEROT_DBG("UNDEFINED\n");
921 rdptr++;
922 break;
923 }
924 }
925 SDEROT_DBG("END %d\n", ctx->timestamp);
926
927 return ctx->timestamp;
928}
929
930/*
931 * sde_hw_rotator_start_regdma - start regdma operation
932 * @ctx: Pointer to rotator context
933 * @queue_id: Priority queue identifier
934 */
935static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
936 enum sde_rot_queue_prio queue_id)
937{
938 struct sde_hw_rotator *rot = ctx->rot;
939 u32 *wrptr;
940 u32 regdmaSlot;
941 u32 offset;
942 long length;
943 long ts_length;
944 u32 enableInt;
945 u32 swts = 0;
946 u32 mask = 0;
947
948 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
949
Alan Kwong9487de22016-01-16 22:06:36 -0500950 /*
951 * Last ROT command must be ROT_START before REGDMA start
952 */
953 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
954 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
955
956 /*
957 * Start REGDMA with command offset and size
958 */
959 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
960 length = ((long)wrptr - (long)ctx->regdma_base) / 4;
961 offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
962 REGDMA_RAM_REGDMA_CMD_RAM));
963 enableInt = ((ctx->timestamp & 1) + 1) << 30;
964
965 SDEROT_DBG(
966 "regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
967 queue_id, regdmaSlot, enableInt, length, offset,
968 ctx->timestamp);
969
970 /* ensure the command packet is issued before the submit command */
971 wmb();
972
973 /* REGDMA submission for current context */
974 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
975 SDE_ROTREG_WRITE(rot->mdss_base,
976 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
977 (length << 14) | offset);
978 swts = ctx->timestamp;
979 mask = ~SDE_REGDMA_SWTS_MASK;
980 } else {
981 SDE_ROTREG_WRITE(rot->mdss_base,
982 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
983 (length << 14) | offset);
984 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
985 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
986 }
987
988 /* Write timestamp after previous rotator job finished */
989 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
990 offset += length;
991 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
992 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
993
994 /* ensure command packet is issue before the submit command */
995 wmb();
996
997 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
998 SDE_ROTREG_WRITE(rot->mdss_base,
999 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
1000 enableInt | (ts_length << 14) | offset);
1001 } else {
1002 SDE_ROTREG_WRITE(rot->mdss_base,
1003 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
1004 enableInt | (ts_length << 14) | offset);
1005 }
1006
Alan Kwong9487de22016-01-16 22:06:36 -05001007 /* Update command queue write ptr */
1008 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1009
1010 return ctx->timestamp;
1011}
1012
1013/*
1014 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
1015 * @ctx: Pointer to rotator context
1016 * @queue_id: Priority queue identifier
1017 * @flags: Option flag
1018 */
1019static u32 sde_hw_rotator_wait_done_no_regdma(
1020 struct sde_hw_rotator_context *ctx,
1021 enum sde_rot_queue_prio queue_id, u32 flag)
1022{
1023 struct sde_hw_rotator *rot = ctx->rot;
1024 int rc = 0;
1025 u32 sts = 0;
1026 u32 status;
1027 unsigned long flags;
1028
1029 if (rot->irq_num >= 0) {
1030 SDEROT_DBG("Wait for Rotator completion\n");
1031 rc = wait_for_completion_timeout(&ctx->rot_comp,
1032 KOFF_TIMEOUT);
1033
1034 spin_lock_irqsave(&rot->rotisr_lock, flags);
1035 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1036 if (rc == 0) {
1037 /*
1038 * Timeout, there might be error,
1039 * or rotator still busy
1040 */
1041 if (status & ROT_BUSY_BIT)
1042 SDEROT_ERR(
1043 "Timeout waiting for rotator done\n");
1044 else if (status & ROT_ERROR_BIT)
1045 SDEROT_ERR(
1046 "Rotator report error status\n");
1047 else
1048 SDEROT_WARN(
1049 "Timeout waiting, but rotator job is done!!\n");
1050
Alan Kwong818b7fc2016-07-24 22:07:41 -04001051 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001052 }
1053 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1054 } else {
1055 int cnt = 200;
1056
1057 do {
1058 udelay(500);
1059 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1060 cnt--;
1061 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
1062 && ((status & ROT_ERROR_BIT) == 0));
1063
1064 if (status & ROT_ERROR_BIT)
1065 SDEROT_ERR("Rotator error\n");
1066 else if (status & ROT_BUSY_BIT)
1067 SDEROT_ERR("Rotator busy\n");
1068
1069 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1070 ROT_DONE_CLEAR);
1071 }
1072
1073 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1074
1075 return sts;
1076}
1077
1078/*
1079 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
1080 * @ctx: Pointer to rotator context
1081 * @queue_id: Priority queue identifier
1082 * @flags: Option flag
1083 */
1084static u32 sde_hw_rotator_wait_done_regdma(
1085 struct sde_hw_rotator_context *ctx,
1086 enum sde_rot_queue_prio queue_id, u32 flag)
1087{
1088 struct sde_hw_rotator *rot = ctx->rot;
1089 int rc = 0;
1090 u32 status;
1091 u32 last_isr;
1092 u32 last_ts;
1093 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001094 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -05001095 u32 sts = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001096 unsigned long flags;
1097
1098 if (rot->irq_num >= 0) {
1099 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
1100 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001101 rc = wait_event_timeout(ctx->regdma_waitq,
1102 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Alan Kwong9487de22016-01-16 22:06:36 -05001103 KOFF_TIMEOUT);
1104
Benjamin Chane7ca72e2016-12-22 18:42:34 -05001105 ATRACE_INT("sde_rot_done", 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001106 spin_lock_irqsave(&rot->rotisr_lock, flags);
1107
1108 last_isr = ctx->last_regdma_isr_status;
1109 last_ts = ctx->last_regdma_timestamp;
1110 status = last_isr & REGDMA_INT_MASK;
1111 int_id = last_ts & 1;
1112 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
1113 status, int_id, last_ts);
1114
1115 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
Alan Kwong818b7fc2016-07-24 22:07:41 -04001116 bool pending;
1117
1118 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001119 SDEROT_ERR(
Alan Kwong818b7fc2016-07-24 22:07:41 -04001120 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
1121 ctx->timestamp, swts, pending);
Alan Kwong9487de22016-01-16 22:06:36 -05001122
1123 if (status & REGDMA_WATCHDOG_INT)
1124 SDEROT_ERR("REGDMA watchdog interrupt\n");
1125 else if (status & REGDMA_INVALID_DESCRIPTOR)
1126 SDEROT_ERR("REGDMA invalid descriptor\n");
1127 else if (status & REGDMA_INCOMPLETE_CMD)
1128 SDEROT_ERR("REGDMA incomplete command\n");
1129 else if (status & REGDMA_INVALID_CMD)
1130 SDEROT_ERR("REGDMA invalid command\n");
1131
Alan Kwong818b7fc2016-07-24 22:07:41 -04001132 sde_hw_rotator_dump_status(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001133 status = ROT_ERROR_BIT;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001134 } else {
1135 if (rc == 1)
1136 SDEROT_WARN(
1137 "REGDMA done but no irq, ts:0x%X/0x%X\n",
1138 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001139 status = 0;
1140 }
1141
Alan Kwong9487de22016-01-16 22:06:36 -05001142 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1143 } else {
1144 int cnt = 200;
Alan Kwongb0679602016-11-27 17:04:13 -08001145 bool pending;
Alan Kwong9487de22016-01-16 22:06:36 -05001146
1147 do {
1148 udelay(500);
Alan Kwongb0679602016-11-27 17:04:13 -08001149 last_isr = SDE_ROTREG_READ(rot->mdss_base,
1150 REGDMA_CSR_REGDMA_INT_STATUS);
1151 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001152 cnt--;
Alan Kwongb0679602016-11-27 17:04:13 -08001153 } while ((cnt > 0) && pending &&
1154 ((last_isr & REGDMA_INT_ERR_MASK) == 0));
Alan Kwong9487de22016-01-16 22:06:36 -05001155
Alan Kwongb0679602016-11-27 17:04:13 -08001156 if (last_isr & REGDMA_INT_ERR_MASK) {
1157 SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
1158 ctx->timestamp, swts, last_isr);
1159 sde_hw_rotator_dump_status(rot);
1160 status = ROT_ERROR_BIT;
1161 } else if (pending) {
1162 SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
1163 ctx->timestamp, swts, last_isr);
1164 sde_hw_rotator_dump_status(rot);
1165 status = ROT_ERROR_BIT;
1166 } else {
1167 status = 0;
1168 }
Alan Kwong9487de22016-01-16 22:06:36 -05001169
1170 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
Alan Kwongb0679602016-11-27 17:04:13 -08001171 last_isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001172 }
1173
1174 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1175
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001176 if (status & ROT_ERROR_BIT)
1177 SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus", "panic");
1178
Alan Kwong9487de22016-01-16 22:06:36 -05001179 return sts;
1180}
1181
1182/*
1183 * setup_rotator_ops - setup callback functions for the low-level HAL
1184 * @ops: Pointer to low-level ops callback
1185 * @mode: Operation mode (non-regdma or regdma)
1186 */
1187static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
1188 enum sde_rotator_regdma_mode mode)
1189{
1190 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
1191 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
1192 if (mode == ROT_REGDMA_ON) {
1193 ops->start_rotator = sde_hw_rotator_start_regdma;
1194 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
1195 } else {
1196 ops->start_rotator = sde_hw_rotator_start_no_regdma;
1197 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
1198 }
1199}
1200
1201/*
1202 * sde_hw_rotator_swts_create - create software timestamp buffer
1203 * @rot: Pointer to rotator hw
1204 *
1205 * This buffer is used by regdma to keep track of last completed command.
1206 */
1207static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
1208{
1209 int rc = 0;
1210 struct ion_handle *handle;
1211 struct sde_mdp_img_data *data;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001212 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001213 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
1214
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001215 rot->iclient = mdata->iclient;
Alan Kwong9487de22016-01-16 22:06:36 -05001216
1217 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
1218 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
1219 if (IS_ERR_OR_NULL(handle)) {
1220 SDEROT_ERR("ion memory allocation failed\n");
1221 return -ENOMEM;
1222 }
1223
1224 data = &rot->swts_buf;
1225 data->len = bufsize;
1226 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
1227 if (IS_ERR(data->srcp_dma_buf)) {
1228 SDEROT_ERR("ion_dma_buf setup failed\n");
1229 rc = -ENOMEM;
1230 goto imap_err;
1231 }
1232
1233 sde_smmu_ctrl(1);
1234
1235 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
1236 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
1237 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
1238 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
1239 rc = -ENOMEM;
1240 goto err_put;
1241 }
1242
1243 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
1244 DMA_BIDIRECTIONAL);
1245 if (IS_ERR_OR_NULL(data->srcp_table)) {
1246 SDEROT_ERR("dma_buf_map_attachment error\n");
1247 rc = -ENOMEM;
1248 goto err_detach;
1249 }
1250
1251 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
1252 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
1253 &data->len, DMA_BIDIRECTIONAL);
1254 if (IS_ERR_VALUE(rc)) {
1255 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
1256 goto err_unmap;
1257 }
1258
1259 dma_buf_begin_cpu_access(data->srcp_dma_buf, 0, data->len,
1260 DMA_FROM_DEVICE);
1261 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
1262 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
1263 SDEROT_ERR("ion kernel memory mapping failed\n");
1264 rc = IS_ERR(rot->swts_buffer);
1265 goto kmap_err;
1266 }
1267
1268 data->mapped = true;
1269 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
1270 data->len, rot->swts_buffer);
1271
1272 ion_free(rot->iclient, handle);
1273
1274 sde_smmu_ctrl(0);
1275
1276 return rc;
1277kmap_err:
1278 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1279 DMA_FROM_DEVICE, data->srcp_dma_buf);
1280err_unmap:
1281 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1282 DMA_FROM_DEVICE);
1283err_detach:
1284 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1285err_put:
1286 dma_buf_put(data->srcp_dma_buf);
1287 data->srcp_dma_buf = NULL;
1288imap_err:
1289 ion_free(rot->iclient, handle);
1290
1291 return rc;
1292}
1293
1294/*
1295 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
1296 * @rot: Pointer to rotator hw
1297 */
1298static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
1299{
1300 struct sde_mdp_img_data *data;
1301
1302 data = &rot->swts_buf;
1303
1304 dma_buf_end_cpu_access(data->srcp_dma_buf, 0, data->len,
1305 DMA_FROM_DEVICE);
1306 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
1307
1308 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1309 DMA_FROM_DEVICE, data->srcp_dma_buf);
1310 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1311 DMA_FROM_DEVICE);
1312 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1313 dma_buf_put(data->srcp_dma_buf);
1314 data->srcp_dma_buf = NULL;
1315}
1316
1317/*
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001318 * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
1319 * PM event occurs
1320 * @mgr: Pointer to rotator manager
1321 * @pmon: Boolean indicate an on/off power event
1322 */
1323void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1324{
1325 struct sde_hw_rotator *rot;
1326 u32 l_ts, h_ts, swts, hwts;
1327 u32 rotsts, regdmasts;
1328
1329 /*
1330 * Check last HW timestamp with SW timestamp before power off event.
1331 * If there is a mismatch, that will be quite possible the rotator HW
1332 * is either hang or not finishing last submitted job. In that case,
1333 * it is best to do a timeout eventlog to capture some good events
1334 * log data for analysis.
1335 */
1336 if (!pmon && mgr && mgr->hw_data) {
1337 rot = mgr->hw_data;
1338 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1339 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1340
1341 /* contruct the combined timstamp */
1342 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1343 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1344 SDE_REGDMA_SWTS_SHIFT);
1345
1346 /* Need to turn on clock to access rotator register */
1347 sde_rotator_clk_ctrl(mgr, true);
1348 hwts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1349 regdmasts = SDE_ROTREG_READ(rot->mdss_base,
1350 REGDMA_CSR_REGDMA_BLOCK_STATUS);
1351 rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1352
1353 SDEROT_DBG(
1354 "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1355 swts, hwts, regdmasts, rotsts);
1356 SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
1357
1358 if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
1359 (rotsts & ROT_STATUS_MASK))) {
1360 SDEROT_ERR(
1361 "Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1362 swts, hwts, regdmasts, rotsts);
1363 SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus",
1364 "panic");
1365 }
1366
1367 /* Turn off rotator clock after checking rotator registers */
1368 sde_rotator_clk_ctrl(mgr, false);
1369 }
1370}
1371
1372/*
1373 * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
1374 * PM event occurs
1375 * @mgr: Pointer to rotator manager
1376 * @pmon: Boolean indicate an on/off power event
1377 */
1378void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1379{
1380 struct sde_hw_rotator *rot;
1381 u32 l_ts, h_ts, swts;
1382
1383 /*
1384 * After a power on event, the rotator HW is reset to default setting.
1385 * It is necessary to synchronize the SW timestamp with the HW.
1386 */
1387 if (pmon && mgr && mgr->hw_data) {
1388 rot = mgr->hw_data;
1389 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1390 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1391
1392 /* contruct the combined timstamp */
1393 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1394 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1395 SDE_REGDMA_SWTS_SHIFT);
1396
1397 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
1398 swts, h_ts, l_ts);
1399 SDEROT_EVTLOG(swts, h_ts, l_ts);
1400 rot->reset_hw_ts = true;
1401 rot->last_hw_ts = swts;
1402 }
1403}
1404
1405/*
Alan Kwong9487de22016-01-16 22:06:36 -05001406 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
1407 * @mgr: Pointer to rotator manager
1408 */
1409static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
1410{
1411 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1412 struct sde_hw_rotator *rot;
1413
1414 if (!mgr || !mgr->pdev || !mgr->hw_data) {
1415 SDEROT_ERR("null parameters\n");
1416 return;
1417 }
1418
1419 rot = mgr->hw_data;
1420 if (rot->irq_num >= 0)
1421 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
1422
1423 if (rot->mode == ROT_REGDMA_ON)
1424 sde_hw_rotator_swtc_destroy(rot);
1425
1426 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
1427 mgr->hw_data = NULL;
1428}
1429
1430/*
1431 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
1432 * @mgr: Pointer to rotator manager
1433 * @pipe_id: pipe identifier (not used)
1434 * @wb_id: writeback identifier/priority queue identifier
1435 *
1436 * This function allocates a new hw rotator resource for the given priority.
1437 */
1438static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
1439 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
1440{
1441 struct sde_hw_rotator_resource_info *resinfo;
1442
1443 if (!mgr || !mgr->hw_data) {
1444 SDEROT_ERR("null parameters\n");
1445 return NULL;
1446 }
1447
1448 /*
1449 * Allocate rotator resource info. Each allocation is per
1450 * HW priority queue
1451 */
1452 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
1453 if (!resinfo) {
1454 SDEROT_ERR("Failed allocation HW rotator resource info\n");
1455 return NULL;
1456 }
1457
1458 resinfo->rot = mgr->hw_data;
1459 resinfo->hw.wb_id = wb_id;
1460 atomic_set(&resinfo->hw.num_active, 0);
1461 init_waitqueue_head(&resinfo->hw.wait_queue);
1462
1463 /* For non-regdma, only support one active session */
1464 if (resinfo->rot->mode == ROT_REGDMA_OFF)
1465 resinfo->hw.max_active = 1;
1466 else {
1467 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
1468
1469 if (resinfo->rot->iclient == NULL)
1470 sde_hw_rotator_swts_create(resinfo->rot);
1471 }
1472
Alan Kwongf987ea32016-07-06 12:11:44 -04001473 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001474 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04001475
Alan Kwong9487de22016-01-16 22:06:36 -05001476 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
1477 resinfo, wb_id);
1478
1479 return &resinfo->hw;
1480}
1481
1482/*
1483 * sde_hw_rotator_free_ext - free the given rotator resource
1484 * @mgr: Pointer to rotator manager
1485 * @hw: Pointer to rotator resource
1486 */
1487static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
1488 struct sde_rot_hw_resource *hw)
1489{
1490 struct sde_hw_rotator_resource_info *resinfo;
1491
1492 if (!mgr || !mgr->hw_data)
1493 return;
1494
1495 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1496
1497 SDEROT_DBG(
1498 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
1499 resinfo, hw->wb_id, atomic_read(&hw->num_active),
1500 hw->pending_count);
1501
Alan Kwongf987ea32016-07-06 12:11:44 -04001502 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001503 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04001504
Alan Kwong9487de22016-01-16 22:06:36 -05001505 devm_kfree(&mgr->pdev->dev, resinfo);
1506}
1507
1508/*
1509 * sde_hw_rotator_alloc_rotctx - allocate rotator context
1510 * @rot: Pointer to rotator hw
1511 * @hw: Pointer to rotator resource
1512 * @session_id: Session identifier of this context
1513 *
1514 * This function allocates a new rotator context for the given session id.
1515 */
1516static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
1517 struct sde_hw_rotator *rot,
1518 struct sde_rot_hw_resource *hw,
1519 u32 session_id)
1520{
1521 struct sde_hw_rotator_context *ctx;
1522
1523 /* Allocate rotator context */
1524 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1525 if (!ctx) {
1526 SDEROT_ERR("Failed allocation HW rotator context\n");
1527 return NULL;
1528 }
1529
1530 ctx->rot = rot;
1531 ctx->q_id = hw->wb_id;
1532 ctx->session_id = session_id;
1533 ctx->hwres = hw;
1534 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
1535 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
1536 ctx->is_secure = false;
1537
1538 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
1539 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
1540 ctx->regdma_wrptr = ctx->regdma_base;
1541 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
1542 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
1543 sde_hw_rotator_get_regdma_ctxidx(ctx));
1544
Alan Kwong818b7fc2016-07-24 22:07:41 -04001545 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
1546
Alan Kwong9487de22016-01-16 22:06:36 -05001547 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001548 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001549
1550 /* Store rotator context for lookup purpose */
1551 sde_hw_rotator_put_ctx(ctx);
1552
1553 SDEROT_DBG(
1554 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1555 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1556 ctx->q_id, ctx->timestamp,
1557 atomic_read(&ctx->hwres->num_active));
1558
1559 return ctx;
1560}
1561
1562/*
1563 * sde_hw_rotator_free_rotctx - free the given rotator context
1564 * @rot: Pointer to rotator hw
1565 * @ctx: Pointer to rotator context
1566 */
1567static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
1568 struct sde_hw_rotator_context *ctx)
1569{
1570 if (!rot || !ctx)
1571 return;
1572
1573 SDEROT_DBG(
1574 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1575 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1576 ctx->q_id, ctx->timestamp,
1577 atomic_read(&ctx->hwres->num_active));
1578
Benjamin Chanc3e185f2016-11-08 21:48:21 -05001579 /* Clear rotator context from lookup purpose */
1580 sde_hw_rotator_clr_ctx(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05001581
1582 devm_kfree(&rot->pdev->dev, ctx);
1583}
1584
1585/*
1586 * sde_hw_rotator_config - configure hw for the given rotation entry
1587 * @hw: Pointer to rotator resource
1588 * @entry: Pointer to rotation entry
1589 *
1590 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
1591 * based on the given rotation entry.
1592 */
1593static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
1594 struct sde_rot_entry *entry)
1595{
1596 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1597 struct sde_hw_rotator *rot;
1598 struct sde_hw_rotator_resource_info *resinfo;
1599 struct sde_hw_rotator_context *ctx;
1600 struct sde_hw_rot_sspp_cfg sspp_cfg;
1601 struct sde_hw_rot_wb_cfg wb_cfg;
1602 u32 danger_lut = 0; /* applicable for realtime client only */
1603 u32 safe_lut = 0; /* applicable for realtime client only */
1604 u32 flags = 0;
1605 struct sde_rotation_item *item;
1606
1607 if (!hw || !entry) {
1608 SDEROT_ERR("null hw resource/entry\n");
1609 return -EINVAL;
1610 }
1611
1612 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1613 rot = resinfo->rot;
1614 item = &entry->item;
1615
1616 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id);
1617 if (!ctx) {
1618 SDEROT_ERR("Failed allocating rotator context!!\n");
1619 return -EINVAL;
1620 }
1621
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001622 if (rot->reset_hw_ts) {
1623 SDEROT_EVTLOG(rot->last_hw_ts);
1624 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
1625 rot->last_hw_ts);
1626 /* ensure write is issued to the rotator HW */
1627 wmb();
1628 rot->reset_hw_ts = false;
1629 }
1630
Alan Kwong9487de22016-01-16 22:06:36 -05001631 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
1632 SDE_ROT_FLAG_FLIP_LR : 0;
1633 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
1634 SDE_ROT_FLAG_FLIP_UD : 0;
1635 flags |= (item->flags & SDE_ROTATION_90) ?
1636 SDE_ROT_FLAG_ROT_90 : 0;
1637 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
1638 SDE_ROT_FLAG_DEINTERLACE : 0;
1639 flags |= (item->flags & SDE_ROTATION_SECURE) ?
1640 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001641 flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
1642 SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
1643
Alan Kwong9487de22016-01-16 22:06:36 -05001644
1645 sspp_cfg.img_width = item->input.width;
1646 sspp_cfg.img_height = item->input.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001647 sspp_cfg.fps = entry->perf->config.frame_rate;
1648 sspp_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05001649 sspp_cfg.fmt = sde_get_format_params(item->input.format);
1650 if (!sspp_cfg.fmt) {
1651 SDEROT_ERR("null format\n");
1652 return -EINVAL;
1653 }
1654 sspp_cfg.src_rect = &item->src_rect;
1655 sspp_cfg.data = &entry->src_buf;
1656 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
1657 item->input.height, &sspp_cfg.src_plane,
1658 0, /* No bwc_mode */
1659 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
1660 true : false);
1661
1662 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001663 &sspp_cfg, danger_lut, safe_lut,
1664 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05001665
1666 wb_cfg.img_width = item->output.width;
1667 wb_cfg.img_height = item->output.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001668 wb_cfg.fps = entry->perf->config.frame_rate;
1669 wb_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05001670 wb_cfg.fmt = sde_get_format_params(item->output.format);
1671 wb_cfg.dst_rect = &item->dst_rect;
1672 wb_cfg.data = &entry->dst_buf;
1673 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
1674 item->output.height, &wb_cfg.dst_plane,
1675 0, /* No bwc_mode */
1676 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
1677
1678 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
1679 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
1680
1681 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
1682
1683 /* setup VA mapping for debugfs */
1684 if (rot->dbgmem) {
1685 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
1686 &item->input,
1687 &entry->src_buf);
1688
1689 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
1690 &item->output,
1691 &entry->dst_buf);
1692 }
1693
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001694 SDEROT_EVTLOG(ctx->timestamp, flags,
1695 item->input.width, item->input.height,
Benjamin Chan53e3bce2016-08-31 14:43:29 -04001696 item->output.width, item->output.height,
Benjamin Chan59a06052017-01-12 18:06:03 -05001697 entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
Benjamin Chan1b94f952017-01-23 17:42:30 -05001698 item->input.format, item->output.format,
1699 entry->perf->config.frame_rate);
Benjamin Chan53e3bce2016-08-31 14:43:29 -04001700
Alan Kwong9487de22016-01-16 22:06:36 -05001701 if (mdata->default_ot_rd_limit) {
1702 struct sde_mdp_set_ot_params ot_params;
1703
1704 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1705 ot_params.xin_id = XIN_SSPP;
1706 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001707 ot_params.width = entry->perf->config.input.width;
1708 ot_params.height = entry->perf->config.input.height;
1709 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001710 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
1711 ot_params.reg_off_mdp_clk_ctrl =
1712 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1713 ot_params.bit_off_mdp_clk_ctrl =
1714 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001715 ot_params.fmt = ctx->is_traffic_shaping ?
1716 SDE_PIX_FMT_ABGR_8888 :
1717 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05001718 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
1719 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05001720 sde_mdp_set_ot_limit(&ot_params);
1721 }
1722
1723 if (mdata->default_ot_wr_limit) {
1724 struct sde_mdp_set_ot_params ot_params;
1725
1726 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1727 ot_params.xin_id = XIN_WRITEBACK;
1728 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001729 ot_params.width = entry->perf->config.input.width;
1730 ot_params.height = entry->perf->config.input.height;
1731 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001732 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
1733 ot_params.reg_off_mdp_clk_ctrl =
1734 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1735 ot_params.bit_off_mdp_clk_ctrl =
1736 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001737 ot_params.fmt = ctx->is_traffic_shaping ?
1738 SDE_PIX_FMT_ABGR_8888 :
1739 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05001740 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
1741 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05001742 sde_mdp_set_ot_limit(&ot_params);
1743 }
1744
1745 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
1746 u32 qos_lut = 0; /* low priority for nrt read client */
1747
1748 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
1749 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
1750
1751 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
1752 }
1753
Jayant Shekhardee61a02017-02-08 11:59:00 +05301754 /* Set CDP control registers to 0 if CDP is disabled */
1755 if (!test_bit(SDE_QOS_CDP, mdata->sde_qos_map)) {
1756 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CDP_CNTL, 0x0);
1757 SDE_ROTREG_WRITE(rot->mdss_base, ROT_WB_CDP_CNTL, 0x0);
1758 }
1759
Alan Kwong9487de22016-01-16 22:06:36 -05001760 if (mdata->npriority_lvl > 0) {
1761 u32 mask, reg_val, i, vbif_qos;
1762
1763 for (i = 0; i < mdata->npriority_lvl; i++) {
1764 reg_val = SDE_VBIF_READ(mdata,
1765 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
1766 mask = 0x3 << (XIN_SSPP * 2);
1767 reg_val &= ~(mask);
1768 vbif_qos = mdata->vbif_nrt_qos[i];
1769 reg_val |= vbif_qos << (XIN_SSPP * 2);
1770 /* ensure write is issued after the read operation */
1771 mb();
1772 SDE_VBIF_WRITE(mdata,
1773 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
1774 reg_val);
1775 }
1776 }
1777
1778 /* Enable write gather for writeback to remove write gaps, which
1779 * may hang AXI/BIMC/SDE.
1780 */
1781 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
1782 BIT(XIN_WRITEBACK));
1783
1784 return 0;
1785}
1786
1787/*
1788 * sde_hw_rotator_kickoff - kickoff processing on the given entry
1789 * @hw: Pointer to rotator resource
1790 * @entry: Pointer to rotation entry
1791 */
1792static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
1793 struct sde_rot_entry *entry)
1794{
1795 struct sde_hw_rotator *rot;
1796 struct sde_hw_rotator_resource_info *resinfo;
1797 struct sde_hw_rotator_context *ctx;
Alan Kwong9487de22016-01-16 22:06:36 -05001798
1799 if (!hw || !entry) {
1800 SDEROT_ERR("null hw resource/entry\n");
1801 return -EINVAL;
1802 }
1803
1804 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1805 rot = resinfo->rot;
1806
1807 /* Lookup rotator context from session-id */
1808 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1809 if (!ctx) {
1810 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1811 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001812 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05001813 }
Alan Kwong9487de22016-01-16 22:06:36 -05001814
Alan Kwong9487de22016-01-16 22:06:36 -05001815 rot->ops.start_rotator(ctx, ctx->q_id);
1816
1817 return 0;
1818}
1819
1820/*
1821 * sde_hw_rotator_wait4done - wait for completion notification
1822 * @hw: Pointer to rotator resource
1823 * @entry: Pointer to rotation entry
1824 *
1825 * This function blocks until the given entry is complete, error
1826 * is detected, or timeout.
1827 */
1828static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
1829 struct sde_rot_entry *entry)
1830{
1831 struct sde_hw_rotator *rot;
1832 struct sde_hw_rotator_resource_info *resinfo;
1833 struct sde_hw_rotator_context *ctx;
1834 int ret;
1835
1836 if (!hw || !entry) {
1837 SDEROT_ERR("null hw resource/entry\n");
1838 return -EINVAL;
1839 }
1840
1841 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1842 rot = resinfo->rot;
1843
1844 /* Lookup rotator context from session-id */
1845 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1846 if (!ctx) {
1847 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1848 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001849 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05001850 }
Alan Kwong9487de22016-01-16 22:06:36 -05001851
1852 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
1853
Alan Kwong9487de22016-01-16 22:06:36 -05001854 if (rot->dbgmem) {
1855 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
1856 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
1857 }
1858
1859 /* Current rotator context job is finished, time to free up*/
1860 sde_hw_rotator_free_rotctx(rot, ctx);
1861
1862 return ret;
1863}
1864
1865/*
1866 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
1867 * @rot: Pointer to hw rotator
1868 *
1869 * This function initializes feature and/or capability bitmask based on
1870 * h/w version read from the device.
1871 */
1872static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
1873{
1874 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1875 u32 hw_version;
1876
1877 if (!mdata) {
1878 SDEROT_ERR("null rotator data\n");
1879 return -EINVAL;
1880 }
1881
1882 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
1883 SDEROT_DBG("hw version %8.8x\n", hw_version);
1884
1885 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
1886 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
1887 clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
1888 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
1889 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
1890 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
1891
1892 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
1893
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001894 if (hw_version != SDE_ROT_TYPE_V1_0) {
1895 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
1896 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
1897 }
1898
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001899 set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
1900
Benjamin Chan53e3bce2016-08-31 14:43:29 -04001901 mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
1902 mdata->nrt_vbif_dbg_bus_size =
1903 ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
1904
1905 mdata->regdump = sde_rot_r3_regdump;
1906 mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001907 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001908 return 0;
1909}
1910
1911/*
1912 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
1913 * @irq: Interrupt number
1914 * @ptr: Pointer to private handle provided during registration
1915 *
1916 * This function services rotator interrupt and wakes up waiting client
1917 * with pending rotation requests already submitted to h/w.
1918 */
1919static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
1920{
1921 struct sde_hw_rotator *rot = ptr;
1922 struct sde_hw_rotator_context *ctx;
1923 irqreturn_t ret = IRQ_NONE;
1924 u32 isr;
1925
1926 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
1927
1928 SDEROT_DBG("intr_status = %8.8x\n", isr);
1929
1930 if (isr & ROT_DONE_MASK) {
1931 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001932 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001933 SDEROT_DBG("Notify rotator complete\n");
1934
1935 /* Normal rotator only 1 session, no need to lookup */
1936 ctx = rot->rotCtx[0][0];
1937 WARN_ON(ctx == NULL);
1938 complete_all(&ctx->rot_comp);
1939
1940 spin_lock(&rot->rotisr_lock);
1941 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1942 ROT_DONE_CLEAR);
1943 spin_unlock(&rot->rotisr_lock);
1944 ret = IRQ_HANDLED;
1945 }
1946
1947 return ret;
1948}
1949
1950/*
1951 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
1952 * @irq: Interrupt number
1953 * @ptr: Pointer to private handle provided during registration
1954 *
1955 * This function services rotator interrupt, decoding the source of
1956 * events (high/low priority queue), and wakes up all waiting clients
1957 * with pending rotation requests already submitted to h/w.
1958 */
1959static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
1960{
1961 struct sde_hw_rotator *rot = ptr;
1962 struct sde_hw_rotator_context *ctx;
1963 irqreturn_t ret = IRQ_NONE;
1964 u32 isr;
1965 u32 ts;
1966 u32 q_id;
1967
1968 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001969 /* acknowledge interrupt before reading latest timestamp */
1970 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001971 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1972
1973 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
1974
1975 /* Any REGDMA status, including error and watchdog timer, should
1976 * trigger and wake up waiting thread
1977 */
1978 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
1979 spin_lock(&rot->rotisr_lock);
1980
1981 /*
1982 * Obtain rotator context based on timestamp from regdma
1983 * and low/high interrupt status
1984 */
1985 if (isr & REGDMA_INT_HIGH_MASK) {
1986 q_id = ROT_QUEUE_HIGH_PRIORITY;
1987 ts = ts & SDE_REGDMA_SWTS_MASK;
1988 } else if (isr & REGDMA_INT_LOW_MASK) {
1989 q_id = ROT_QUEUE_LOW_PRIORITY;
1990 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
1991 SDE_REGDMA_SWTS_MASK;
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001992 } else {
1993 SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
1994 goto done_isr_handle;
Alan Kwong9487de22016-01-16 22:06:36 -05001995 }
Alan Kwong9487de22016-01-16 22:06:36 -05001996 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05001997
1998 /*
1999 * Wake up all waiting context from the current and previous
2000 * SW Timestamp.
2001 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04002002 while (ctx &&
2003 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05002004 ctx->last_regdma_isr_status = isr;
2005 ctx->last_regdma_timestamp = ts;
2006 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04002007 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002008 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002009
2010 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
2011 ctx = rot->rotCtx[q_id]
2012 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04002013 };
Alan Kwong9487de22016-01-16 22:06:36 -05002014
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002015done_isr_handle:
Alan Kwong9487de22016-01-16 22:06:36 -05002016 spin_unlock(&rot->rotisr_lock);
2017 ret = IRQ_HANDLED;
2018 } else if (isr & REGDMA_INT_ERR_MASK) {
2019 /*
2020 * For REGDMA Err, we save the isr info and wake up
2021 * all waiting contexts
2022 */
2023 int i, j;
2024
2025 SDEROT_ERR(
2026 "regdma err isr:%X, wake up all waiting contexts\n",
2027 isr);
2028
2029 spin_lock(&rot->rotisr_lock);
2030
2031 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2032 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
2033 ctx = rot->rotCtx[i][j];
2034 if (ctx && ctx->last_regdma_isr_status == 0) {
2035 ctx->last_regdma_isr_status = isr;
2036 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04002037 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002038 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
2039 i, j, ctx);
2040 }
2041 }
2042 }
2043
Alan Kwong9487de22016-01-16 22:06:36 -05002044 spin_unlock(&rot->rotisr_lock);
2045 ret = IRQ_HANDLED;
2046 }
2047
2048 return ret;
2049}
2050
2051/*
2052 * sde_hw_rotator_validate_entry - validate rotation entry
2053 * @mgr: Pointer to rotator manager
2054 * @entry: Pointer to rotation entry
2055 *
2056 * This function validates the given rotation entry and provides possible
2057 * fixup (future improvement) if available. This function returns 0 if
2058 * the entry is valid, and returns error code otherwise.
2059 */
2060static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
2061 struct sde_rot_entry *entry)
2062{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002063 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002064 int ret = 0;
2065 u16 src_w, src_h, dst_w, dst_h;
2066 struct sde_rotation_item *item = &entry->item;
2067 struct sde_mdp_format_params *fmt;
2068
2069 src_w = item->src_rect.w;
2070 src_h = item->src_rect.h;
2071
2072 if (item->flags & SDE_ROTATION_90) {
2073 dst_w = item->dst_rect.h;
2074 dst_h = item->dst_rect.w;
2075 } else {
2076 dst_w = item->dst_rect.w;
2077 dst_h = item->dst_rect.h;
2078 }
2079
2080 entry->dnsc_factor_w = 0;
2081 entry->dnsc_factor_h = 0;
2082
2083 if ((src_w != dst_w) || (src_h != dst_h)) {
2084 if ((src_w % dst_w) || (src_h % dst_h)) {
2085 SDEROT_DBG("non integral scale not support\n");
2086 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002087 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05002088 }
2089 entry->dnsc_factor_w = src_w / dst_w;
2090 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
2091 (entry->dnsc_factor_w > 64)) {
2092 SDEROT_DBG("non power-of-2 w_scale not support\n");
2093 ret = -EINVAL;
2094 goto dnsc_err;
2095 }
2096 entry->dnsc_factor_h = src_h / dst_h;
2097 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
2098 (entry->dnsc_factor_h > 64)) {
2099 SDEROT_DBG("non power-of-2 h_scale not support\n");
2100 ret = -EINVAL;
2101 goto dnsc_err;
2102 }
2103 }
2104
Benjamin Chan0e96afd2017-01-17 16:49:12 -05002105 fmt = sde_get_format_params(item->output.format);
Benjamin Chan886ff672016-11-07 15:23:17 -05002106 /*
2107 * Rotator downscale support max 4 times for UBWC format and
2108 * max 2 times for TP10/TP10_UBWC format
2109 */
2110 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
2111 SDEROT_DBG("max downscale for UBWC format is 4\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002112 ret = -EINVAL;
2113 goto dnsc_err;
2114 }
Benjamin Chan886ff672016-11-07 15:23:17 -05002115 if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
2116 SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002117 ret = -EINVAL;
2118 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002119 goto dnsc_err;
2120
2121dnsc_1p5_check:
2122 /* Check for 1.5 downscale that only applies to V2 HW */
2123 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
2124 entry->dnsc_factor_w = src_w / dst_w;
2125 if ((entry->dnsc_factor_w != 1) ||
2126 ((dst_w * 3) != (src_w * 2))) {
2127 SDEROT_DBG(
2128 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
2129 src_w, dst_w);
2130 ret = -EINVAL;
2131 goto dnsc_err;
2132 }
2133
2134 entry->dnsc_factor_h = src_h / dst_h;
2135 if ((entry->dnsc_factor_h != 1) ||
2136 ((dst_h * 3) != (src_h * 2))) {
2137 SDEROT_DBG(
2138 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
2139 src_h, dst_h);
2140 ret = -EINVAL;
2141 goto dnsc_err;
2142 }
2143 ret = 0;
2144 }
Alan Kwong9487de22016-01-16 22:06:36 -05002145
2146dnsc_err:
2147 /* Downscaler does not support asymmetrical dnsc */
2148 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
2149 SDEROT_DBG("asymmetric downscale not support\n");
2150 ret = -EINVAL;
2151 }
2152
2153 if (ret) {
2154 entry->dnsc_factor_w = 0;
2155 entry->dnsc_factor_h = 0;
2156 }
2157 return ret;
2158}
2159
2160/*
2161 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
2162 * @mgr: Pointer to rotator manager
2163 * @attr: Pointer to device attribute interface
2164 * @buf: Pointer to output buffer
2165 * @len: Length of output buffer
2166 */
2167static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
2168 struct device_attribute *attr, char *buf, ssize_t len)
2169{
2170 struct sde_hw_rotator *hw_data;
Benjamin Chan886ff672016-11-07 15:23:17 -05002171 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002172 int cnt = 0;
2173
2174 if (!mgr || !buf)
2175 return 0;
2176
2177 hw_data = mgr->hw_data;
2178
2179#define SPRINT(fmt, ...) \
2180 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2181
2182 /* insert capabilities here */
Benjamin Chan886ff672016-11-07 15:23:17 -05002183 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
2184 SPRINT("min_downscale=1.5\n");
2185 else
2186 SPRINT("min_downscale=2.0\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002187
Benjamin Chan42db2c92016-11-22 22:50:01 -05002188 SPRINT("downscale_compression=1\n");
2189
Alan Kwong9487de22016-01-16 22:06:36 -05002190#undef SPRINT
2191 return cnt;
2192}
2193
2194/*
2195 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
2196 * @mgr: Pointer to rotator manager
2197 * @attr: Pointer to device attribute interface
2198 * @buf: Pointer to output buffer
2199 * @len: Length of output buffer
2200 */
2201static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
2202 struct device_attribute *attr, char *buf, ssize_t len)
2203{
2204 struct sde_hw_rotator *rot;
2205 struct sde_hw_rotator_context *ctx;
2206 int cnt = 0;
2207 int num_active = 0;
2208 int i, j;
2209
2210 if (!mgr || !buf) {
2211 SDEROT_ERR("null parameters\n");
2212 return 0;
2213 }
2214
2215 rot = mgr->hw_data;
2216
2217#define SPRINT(fmt, ...) \
2218 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2219
2220 if (rot) {
2221 SPRINT("rot_mode=%d\n", rot->mode);
2222 SPRINT("irq_num=%d\n", rot->irq_num);
2223
2224 if (rot->mode == ROT_REGDMA_OFF) {
2225 SPRINT("max_active=1\n");
2226 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
2227 } else {
2228 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2229 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
2230 j++) {
2231 ctx = rot->rotCtx[i][j];
2232
2233 if (ctx) {
2234 SPRINT(
2235 "rotCtx[%d][%d]:%p\n",
2236 i, j, ctx);
2237 ++num_active;
2238 }
2239 }
2240 }
2241
2242 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
2243 SPRINT("num_active=%d\n", num_active);
2244 }
2245 }
2246
2247#undef SPRINT
2248 return cnt;
2249}
2250
2251/*
Alan Kwongda16e442016-08-14 20:47:18 -04002252 * sde_hw_rotator_get_pixfmt - get the indexed pixel format
2253 * @mgr: Pointer to rotator manager
2254 * @index: index of pixel format
2255 * @input: true for input port; false for output port
2256 */
2257static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
2258 int index, bool input)
2259{
2260 if (input) {
2261 if (index < ARRAY_SIZE(sde_hw_rotator_input_pixfmts))
2262 return sde_hw_rotator_input_pixfmts[index];
2263 else
2264 return 0;
2265 } else {
2266 if (index < ARRAY_SIZE(sde_hw_rotator_output_pixfmts))
2267 return sde_hw_rotator_output_pixfmts[index];
2268 else
2269 return 0;
2270 }
2271}
2272
2273/*
2274 * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
2275 * @mgr: Pointer to rotator manager
2276 * @pixfmt: pixel format to be verified
2277 * @input: true for input port; false for output port
2278 */
2279static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
2280 bool input)
2281{
2282 int i;
2283
2284 if (input) {
2285 for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_input_pixfmts); i++)
2286 if (sde_hw_rotator_input_pixfmts[i] == pixfmt)
2287 return true;
2288 } else {
2289 for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_output_pixfmts); i++)
2290 if (sde_hw_rotator_output_pixfmts[i] == pixfmt)
2291 return true;
2292 }
2293
2294 return false;
2295}
2296
2297/*
Alan Kwong9487de22016-01-16 22:06:36 -05002298 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
2299 * @hw_data: Pointer to rotator hw
2300 * @dev: Pointer to platform device
2301 */
2302static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
2303 struct platform_device *dev)
2304{
2305 int ret = 0;
2306 u32 data;
2307
2308 if (!hw_data || !dev)
2309 return -EINVAL;
2310
2311 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
2312 &data);
2313 if (ret) {
2314 SDEROT_DBG("default to regdma off\n");
2315 ret = 0;
2316 hw_data->mode = ROT_REGDMA_OFF;
2317 } else if (data < ROT_REGDMA_MAX) {
2318 SDEROT_DBG("set to regdma mode %d\n", data);
2319 hw_data->mode = data;
2320 } else {
2321 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
2322 hw_data->mode = ROT_REGDMA_OFF;
2323 }
2324
2325 ret = of_property_read_u32(dev->dev.of_node,
2326 "qcom,mdss-highest-bank-bit", &data);
2327 if (ret) {
2328 SDEROT_DBG("default to A5X bank\n");
2329 ret = 0;
2330 hw_data->highest_bank = 2;
2331 } else {
2332 SDEROT_DBG("set highest bank bit to %d\n", data);
2333 hw_data->highest_bank = data;
2334 }
2335
2336 return ret;
2337}
2338
2339/*
2340 * sde_rotator_r3_init - initialize the r3 module
2341 * @mgr: Pointer to rotator manager
2342 *
2343 * This function setup r3 callback functions, parses r3 specific
2344 * device tree settings, installs r3 specific interrupt handler,
2345 * as well as initializes r3 internal data structure.
2346 */
2347int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
2348{
2349 struct sde_hw_rotator *rot;
2350 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2351 int i;
2352 int ret;
2353
2354 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
2355 if (!rot)
2356 return -ENOMEM;
2357
2358 mgr->hw_data = rot;
2359 mgr->queue_count = ROT_QUEUE_MAX;
2360
2361 rot->mdss_base = mdata->sde_io.base;
2362 rot->pdev = mgr->pdev;
2363
2364 /* Assign ops */
2365 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
2366 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
2367 mgr->ops_hw_free = sde_hw_rotator_free_ext;
2368 mgr->ops_config_hw = sde_hw_rotator_config;
2369 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
2370 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
2371 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
2372 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
2373 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
2374 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
Alan Kwongda16e442016-08-14 20:47:18 -04002375 mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
2376 mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002377 mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
2378 mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
Alan Kwong9487de22016-01-16 22:06:36 -05002379
2380 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
2381 if (ret)
2382 goto error_parse_dt;
2383
2384 rot->irq_num = platform_get_irq(mgr->pdev, 0);
2385 if (rot->irq_num < 0) {
2386 SDEROT_ERR("fail to get rotator irq\n");
2387 } else {
2388 if (rot->mode == ROT_REGDMA_OFF)
2389 ret = devm_request_threaded_irq(&mgr->pdev->dev,
2390 rot->irq_num,
2391 sde_hw_rotator_rotirq_handler,
2392 NULL, 0, "sde_rotator_r3", rot);
2393 else
2394 ret = devm_request_threaded_irq(&mgr->pdev->dev,
2395 rot->irq_num,
2396 sde_hw_rotator_regdmairq_handler,
2397 NULL, 0, "sde_rotator_r3", rot);
2398 if (ret) {
2399 SDEROT_ERR("fail to request irq r:%d\n", ret);
2400 rot->irq_num = -1;
2401 } else {
2402 disable_irq(rot->irq_num);
2403 }
2404 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04002405 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05002406
2407 setup_rotator_ops(&rot->ops, rot->mode);
2408
2409 spin_lock_init(&rot->rotctx_lock);
2410 spin_lock_init(&rot->rotisr_lock);
2411
2412 /* REGDMA initialization */
2413 if (rot->mode == ROT_REGDMA_OFF) {
2414 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2415 rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
2416 SDE_HW_ROT_REGDMA_SEG_SIZE * i];
2417 } else {
2418 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2419 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
2420 (u32 *)(rot->mdss_base +
2421 REGDMA_RAM_REGDMA_CMD_RAM +
2422 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
2423
2424 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2425 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
2426 (u32 *)(rot->mdss_base +
2427 REGDMA_RAM_REGDMA_CMD_RAM +
2428 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
2429 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
2430 }
2431
2432 atomic_set(&rot->timestamp[0], 0);
2433 atomic_set(&rot->timestamp[1], 0);
Alan Kwong9487de22016-01-16 22:06:36 -05002434
2435 ret = sde_rotator_hw_rev_init(rot);
2436 if (ret)
2437 goto error_hw_rev_init;
2438
Alan Kwong315cd772016-08-03 22:29:42 -04002439 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
Benjamin Chan77aed192016-10-17 17:49:41 -04002440 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04002441 CLKFLAG_NORETAIN_MEM);
Benjamin Chan77aed192016-10-17 17:49:41 -04002442 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04002443 CLKFLAG_NORETAIN_PERIPH);
2444
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002445 mdata->sde_rot_hw = rot;
Alan Kwong9487de22016-01-16 22:06:36 -05002446 return 0;
2447error_hw_rev_init:
2448 if (rot->irq_num >= 0)
2449 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2450 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2451error_parse_dt:
2452 return ret;
2453}