blob: f6c7d5094927bb1761a46fd0cbc9e1eb4795f825 [file] [log] [blame]
Alan Kwong9487de22016-01-16 22:06:36 -05001/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
20#include <linux/sync.h>
21#include <linux/delay.h>
22#include <linux/debugfs.h>
23#include <linux/interrupt.h>
24#include <linux/dma-mapping.h>
25#include <linux/dma-buf.h>
26#include <linux/msm_ion.h>
Alan Kwong315cd772016-08-03 22:29:42 -040027#include <linux/clk/msm-clk.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
Benjamin Chan53e3bce2016-08-31 14:43:29 -040037#include "sde_rotator_debug.h"
Alan Kwong9487de22016-01-16 22:06:36 -050038
39/* XIN mapping */
40#define XIN_SSPP 0
41#define XIN_WRITEBACK 1
42
43/* wait for at most 2 vsync for lowest refresh rate (24hz) */
44#define KOFF_TIMEOUT msecs_to_jiffies(42 * 32)
45
46/* Macro for constructing the REGDMA command */
47#define SDE_REGDMA_WRITE(p, off, data) \
48 do { \
49 *p++ = REGDMA_OP_REGWRITE | \
50 ((off) & REGDMA_ADDR_OFFSET_MASK); \
51 *p++ = (data); \
52 } while (0)
53
54#define SDE_REGDMA_MODIFY(p, off, mask, data) \
55 do { \
56 *p++ = REGDMA_OP_REGMODIFY | \
57 ((off) & REGDMA_ADDR_OFFSET_MASK); \
58 *p++ = (mask); \
59 *p++ = (data); \
60 } while (0)
61
62#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
63 do { \
64 *p++ = REGDMA_OP_BLKWRITE_INC | \
65 ((off) & REGDMA_ADDR_OFFSET_MASK); \
66 *p++ = (len); \
67 } while (0)
68
69#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
70 do { \
71 *(p) = (data); \
72 (p)++; \
73 } while (0)
74
75/* Macro for directly accessing mapped registers */
76#define SDE_ROTREG_WRITE(base, off, data) \
77 writel_relaxed(data, (base + (off)))
78
79#define SDE_ROTREG_READ(base, off) \
80 readl_relaxed(base + (off))
81
Alan Kwongda16e442016-08-14 20:47:18 -040082static u32 sde_hw_rotator_input_pixfmts[] = {
83 SDE_PIX_FMT_XRGB_8888,
84 SDE_PIX_FMT_ARGB_8888,
85 SDE_PIX_FMT_ABGR_8888,
86 SDE_PIX_FMT_RGBA_8888,
87 SDE_PIX_FMT_BGRA_8888,
88 SDE_PIX_FMT_RGBX_8888,
89 SDE_PIX_FMT_BGRX_8888,
90 SDE_PIX_FMT_XBGR_8888,
91 SDE_PIX_FMT_RGBA_5551,
92 SDE_PIX_FMT_ARGB_1555,
93 SDE_PIX_FMT_ABGR_1555,
94 SDE_PIX_FMT_BGRA_5551,
95 SDE_PIX_FMT_BGRX_5551,
96 SDE_PIX_FMT_RGBX_5551,
97 SDE_PIX_FMT_XBGR_1555,
98 SDE_PIX_FMT_XRGB_1555,
99 SDE_PIX_FMT_ARGB_4444,
100 SDE_PIX_FMT_RGBA_4444,
101 SDE_PIX_FMT_BGRA_4444,
102 SDE_PIX_FMT_ABGR_4444,
103 SDE_PIX_FMT_RGBX_4444,
104 SDE_PIX_FMT_XRGB_4444,
105 SDE_PIX_FMT_BGRX_4444,
106 SDE_PIX_FMT_XBGR_4444,
107 SDE_PIX_FMT_RGB_888,
108 SDE_PIX_FMT_BGR_888,
109 SDE_PIX_FMT_RGB_565,
110 SDE_PIX_FMT_BGR_565,
111 SDE_PIX_FMT_Y_CB_CR_H2V2,
112 SDE_PIX_FMT_Y_CR_CB_H2V2,
113 SDE_PIX_FMT_Y_CR_CB_GH2V2,
114 SDE_PIX_FMT_Y_CBCR_H2V2,
115 SDE_PIX_FMT_Y_CRCB_H2V2,
116 SDE_PIX_FMT_Y_CBCR_H1V2,
117 SDE_PIX_FMT_Y_CRCB_H1V2,
118 SDE_PIX_FMT_Y_CBCR_H2V1,
119 SDE_PIX_FMT_Y_CRCB_H2V1,
120 SDE_PIX_FMT_YCBYCR_H2V1,
121 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
122 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
123 SDE_PIX_FMT_RGBA_8888_UBWC,
124 SDE_PIX_FMT_RGBX_8888_UBWC,
125 SDE_PIX_FMT_RGB_565_UBWC,
126 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
127 SDE_PIX_FMT_RGBA_1010102,
128 SDE_PIX_FMT_RGBX_1010102,
129 SDE_PIX_FMT_ARGB_2101010,
130 SDE_PIX_FMT_XRGB_2101010,
131 SDE_PIX_FMT_BGRA_1010102,
132 SDE_PIX_FMT_BGRX_1010102,
133 SDE_PIX_FMT_ABGR_2101010,
134 SDE_PIX_FMT_XBGR_2101010,
135 SDE_PIX_FMT_RGBA_1010102_UBWC,
136 SDE_PIX_FMT_RGBX_1010102_UBWC,
137 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
138 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
139 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
140};
141
142static u32 sde_hw_rotator_output_pixfmts[] = {
143 SDE_PIX_FMT_XRGB_8888,
144 SDE_PIX_FMT_ARGB_8888,
145 SDE_PIX_FMT_ABGR_8888,
146 SDE_PIX_FMT_RGBA_8888,
147 SDE_PIX_FMT_BGRA_8888,
148 SDE_PIX_FMT_RGBX_8888,
149 SDE_PIX_FMT_BGRX_8888,
150 SDE_PIX_FMT_XBGR_8888,
151 SDE_PIX_FMT_RGBA_5551,
152 SDE_PIX_FMT_ARGB_1555,
153 SDE_PIX_FMT_ABGR_1555,
154 SDE_PIX_FMT_BGRA_5551,
155 SDE_PIX_FMT_BGRX_5551,
156 SDE_PIX_FMT_RGBX_5551,
157 SDE_PIX_FMT_XBGR_1555,
158 SDE_PIX_FMT_XRGB_1555,
159 SDE_PIX_FMT_ARGB_4444,
160 SDE_PIX_FMT_RGBA_4444,
161 SDE_PIX_FMT_BGRA_4444,
162 SDE_PIX_FMT_ABGR_4444,
163 SDE_PIX_FMT_RGBX_4444,
164 SDE_PIX_FMT_XRGB_4444,
165 SDE_PIX_FMT_BGRX_4444,
166 SDE_PIX_FMT_XBGR_4444,
167 SDE_PIX_FMT_RGB_888,
168 SDE_PIX_FMT_BGR_888,
169 SDE_PIX_FMT_RGB_565,
170 SDE_PIX_FMT_BGR_565,
171 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
172 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
173 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
174 SDE_PIX_FMT_Y_CBCR_H2V2,
175 SDE_PIX_FMT_Y_CRCB_H2V2,
176 SDE_PIX_FMT_Y_CBCR_H1V2,
177 SDE_PIX_FMT_Y_CRCB_H1V2,
178 SDE_PIX_FMT_Y_CBCR_H2V1,
179 SDE_PIX_FMT_Y_CRCB_H2V1,
180 /* SDE_PIX_FMT_YCBYCR_H2V1 */
181 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
182 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
183 SDE_PIX_FMT_RGBA_8888_UBWC,
184 SDE_PIX_FMT_RGBX_8888_UBWC,
185 SDE_PIX_FMT_RGB_565_UBWC,
186 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
187 SDE_PIX_FMT_RGBA_1010102,
188 SDE_PIX_FMT_RGBX_1010102,
189 /* SDE_PIX_FMT_ARGB_2101010 */
190 /* SDE_PIX_FMT_XRGB_2101010 */
191 SDE_PIX_FMT_BGRA_1010102,
192 SDE_PIX_FMT_BGRX_1010102,
193 /* SDE_PIX_FMT_ABGR_2101010 */
194 /* SDE_PIX_FMT_XBGR_2101010 */
195 SDE_PIX_FMT_RGBA_1010102_UBWC,
196 SDE_PIX_FMT_RGBX_1010102_UBWC,
197 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
198 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
199 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
200};
201
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400202static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
203 {0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
204 {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
205 {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
206};
207
208static struct sde_rot_regdump sde_rot_r3_regdump[] = {
209 { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
210 { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
211 { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
212 { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
213 SDE_ROT_REGDUMP_READ },
214 /*
215 * Need to perform a SW reset to REGDMA in order to access the
216 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
217 * REGDMA RAM should be dump at last.
218 */
219 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
220 SDE_ROT_REGDUMP_WRITE },
221 { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
222 SDE_ROT_REGDUMP_READ },
223};
224
Alan Kwong818b7fc2016-07-24 22:07:41 -0400225/* Invalid software timestamp value for initialization */
226#define SDE_REGDMA_SWTS_INVALID (~0)
227
228/**
229 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
230 * @ts_curr: current software timestamp
231 * @ts_prev: previous software timestamp
232 * @return: the amount ts_curr is ahead of ts_prev
233 */
234static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
235{
236 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
237
238 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
239}
240
241/**
242 * sde_hw_rotator_pending_swts - Check if the given context is still pending
243 * @rot: Pointer to hw rotator
244 * @ctx: Pointer to rotator context
245 * @pswts: Pointer to returned reference software timestamp, optional
246 * @return: true if context has pending requests
247 */
248static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
249 struct sde_hw_rotator_context *ctx, u32 *pswts)
250{
251 u32 swts;
252 int ts_diff;
253 bool pending;
254
255 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
256 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
257 else
258 swts = ctx->last_regdma_timestamp;
259
260 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
261 swts >>= SDE_REGDMA_SWTS_SHIFT;
262
263 swts &= SDE_REGDMA_SWTS_MASK;
264
265 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
266
267 if (pswts)
268 *pswts = swts;
269
270 pending = (ts_diff > 0) ? true : false;
271
272 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
273 ctx->timestamp, ctx->q_id, swts, pending);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -0400274 SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400275 return pending;
276}
277
278/**
279 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
280 * Also, clear rotator/regdma irq status.
281 * @rot: Pointer to hw rotator
282 */
283static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
284{
285 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
286 atomic_read(&rot->irq_enabled));
287
288 if (!atomic_read(&rot->irq_enabled)) {
289 if (rot->mode == ROT_REGDMA_OFF)
290 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
291 ROT_DONE_MASK);
292 else
293 SDE_ROTREG_WRITE(rot->mdss_base,
294 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
295
296 enable_irq(rot->irq_num);
297 }
298 atomic_inc(&rot->irq_enabled);
299}
300
301/**
302 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
303 * Also, clear rotator/regdma irq enable masks.
304 * @rot: Pointer to hw rotator
305 */
306static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
307{
308 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
309 atomic_read(&rot->irq_enabled));
310
311 if (!atomic_read(&rot->irq_enabled)) {
312 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
313 return;
314 }
315
316 if (!atomic_dec_return(&rot->irq_enabled)) {
317 if (rot->mode == ROT_REGDMA_OFF)
318 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
319 else
320 SDE_ROTREG_WRITE(rot->mdss_base,
321 REGDMA_CSR_REGDMA_INT_EN, 0);
322 /* disable irq after last pending irq is handled, if any */
323 synchronize_irq(rot->irq_num);
324 disable_irq_nosync(rot->irq_num);
325 }
326}
327
328/**
329 * sde_hw_rotator_dump_status - Dump hw rotator status on error
330 * @rot: Pointer to hw rotator
331 */
332static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
333{
334 SDEROT_ERR(
335 "op_mode = %x, int_en = %x, int_status = %x\n",
336 SDE_ROTREG_READ(rot->mdss_base,
337 REGDMA_CSR_REGDMA_OP_MODE),
338 SDE_ROTREG_READ(rot->mdss_base,
339 REGDMA_CSR_REGDMA_INT_EN),
340 SDE_ROTREG_READ(rot->mdss_base,
341 REGDMA_CSR_REGDMA_INT_STATUS));
342
343 SDEROT_ERR(
344 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
345 SDE_ROTREG_READ(rot->mdss_base,
346 REGDMA_TIMESTAMP_REG),
347 SDE_ROTREG_READ(rot->mdss_base,
348 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
349 SDE_ROTREG_READ(rot->mdss_base,
350 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
351 SDE_ROTREG_READ(rot->mdss_base,
352 REGDMA_CSR_REGDMA_BLOCK_STATUS));
353
354 SDEROT_ERR(
355 "invalid_cmd_offset = %x, fsm_state = %x\n",
356 SDE_ROTREG_READ(rot->mdss_base,
357 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
358 SDE_ROTREG_READ(rot->mdss_base,
359 REGDMA_CSR_REGDMA_FSM_STATE));
360}
361
Alan Kwong9487de22016-01-16 22:06:36 -0500362/**
363 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
364 * on provided session_id. Each rotator has a different session_id.
365 */
366static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
367 struct sde_hw_rotator *rot, u32 session_id,
368 enum sde_rot_queue_prio q_id)
369{
370 int i;
371 struct sde_hw_rotator_context *ctx = NULL;
372
373 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
374 ctx = rot->rotCtx[q_id][i];
375
376 if (ctx && (ctx->session_id == session_id)) {
377 SDEROT_DBG(
378 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d\n",
379 q_id, i, ctx, ctx->session_id);
380 return ctx;
381 }
382 }
383
384 return NULL;
385}
386
387/*
388 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
389 * @dbgbuf: Pointer to debug buffer
390 * @buf: Pointer to layer buffer structure
391 * @data: Pointer to h/w mapped buffer structure
392 */
393static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
394 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
395{
396 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
397 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
398
399 dbgbuf->vaddr = NULL;
400 dbgbuf->width = buf->width;
401 dbgbuf->height = buf->height;
402
403 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
404 dma_buf_begin_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
405 DMA_FROM_DEVICE);
406 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
407 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
408 dbgbuf->vaddr, dbgbuf->buflen,
409 dbgbuf->width, dbgbuf->height);
410 }
411}
412
413/*
414 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
415 * @dbgbuf: Pointer to debug buffer
416 */
417static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
418{
419 if (dbgbuf->vaddr) {
420 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
421 dma_buf_end_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
422 DMA_FROM_DEVICE);
423 }
424
425 dbgbuf->vaddr = NULL;
426 dbgbuf->dmabuf = NULL;
427 dbgbuf->buflen = 0;
428 dbgbuf->width = 0;
429 dbgbuf->height = 0;
430}
431
432/*
433 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
434 * @ctx: Pointer to rotator context
435 * @mask: Bit mask location of the timestamp
436 * @swts: Software timestamp
437 */
438static void sde_hw_rotator_setup_timestamp_packet(
439 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
440{
441 u32 *wrptr;
442
443 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
444
445 /*
446 * Create a dummy packet write out to 1 location for timestamp
447 * generation.
448 */
449 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
450 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
451 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
452 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
453 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
454 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
455 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
456 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
457 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
458 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
459 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
460 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
461 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
Benjamin Chan15c93d82016-08-29 10:04:22 -0400462 /*
463 * Must clear secure buffer setting for SW timestamp because
464 * SW timstamp buffer allocation is always non-secure region.
465 */
466 if (ctx->is_secure) {
467 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
468 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
469 }
Alan Kwong9487de22016-01-16 22:06:36 -0500470 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
471 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
472 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
473 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
474 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
475 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
476 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
477 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
478 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
479 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
480 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
481 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
482 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
483
484 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
485}
486
487/*
488 * sde_hw_rotator_setup_fetchengine - setup fetch engine
489 * @ctx: Pointer to rotator context
490 * @queue_id: Priority queue identifier
491 * @cfg: Fetch configuration
492 * @danger_lut: real-time QoS LUT for danger setting (not used)
493 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400494 * @dnsc_factor_w: downscale factor for width
495 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -0500496 * @flags: Control flag
497 */
498static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
499 enum sde_rot_queue_prio queue_id,
500 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400501 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -0500502{
503 struct sde_hw_rotator *rot = ctx->rot;
504 struct sde_mdp_format_params *fmt;
505 struct sde_mdp_data *data;
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400506 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -0500507 u32 *wrptr;
508 u32 opmode = 0;
509 u32 chroma_samp = 0;
510 u32 src_format = 0;
511 u32 unpack = 0;
512 u32 width = cfg->img_width;
513 u32 height = cfg->img_height;
514 u32 fetch_blocksize = 0;
515 int i;
516
517 if (ctx->rot->mode == ROT_REGDMA_ON) {
518 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_EN,
519 REGDMA_INT_MASK);
520 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
521 REGDMA_EN);
522 }
523
524 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
525
526 /* source image setup */
527 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
528 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
529 for (i = 0; i < cfg->src_plane.num_planes; i++)
530 cfg->src_plane.ystride[i] *= 2;
531 width *= 2;
532 height /= 2;
533 }
534
535 /*
536 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
537 */
538 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
539
540 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
541 SDE_REGDMA_BLKWRITE_DATA(wrptr,
542 cfg->src_rect->w | (cfg->src_rect->h << 16));
543 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
544 SDE_REGDMA_BLKWRITE_DATA(wrptr,
545 cfg->src_rect->x | (cfg->src_rect->y << 16));
546 SDE_REGDMA_BLKWRITE_DATA(wrptr,
547 cfg->src_rect->w | (cfg->src_rect->h << 16));
548 SDE_REGDMA_BLKWRITE_DATA(wrptr,
549 cfg->src_rect->x | (cfg->src_rect->y << 16));
550
551 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
552 data = cfg->data;
553 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
554 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
555 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
556 (cfg->src_plane.ystride[1] << 16));
557 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
558 (cfg->src_plane.ystride[3] << 16));
559
560 /* UNUSED, write 0 */
561 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
562
563 /* setup source format */
564 fmt = cfg->fmt;
565
566 chroma_samp = fmt->chroma_sample;
567 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
568 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
569 chroma_samp = SDE_MDP_CHROMA_H1V2;
570 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
571 chroma_samp = SDE_MDP_CHROMA_H2V1;
572 }
573
574 src_format = (chroma_samp << 23) |
575 (fmt->fetch_planes << 19) |
576 (fmt->bits[C3_ALPHA] << 6) |
577 (fmt->bits[C2_R_Cr] << 4) |
578 (fmt->bits[C1_B_Cb] << 2) |
579 (fmt->bits[C0_G_Y] << 0);
580
581 if (fmt->alpha_enable &&
582 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
583 src_format |= BIT(8); /* SRCC3_EN */
584
585 src_format |= ((fmt->unpack_count - 1) << 12) |
586 (fmt->unpack_tight << 17) |
587 (fmt->unpack_align_msb << 18) |
588 ((fmt->bpp - 1) << 9) |
589 ((fmt->frame_format & 3) << 30);
590
591 if (flags & SDE_ROT_FLAG_ROT_90)
592 src_format |= BIT(11); /* ROT90 */
593
594 if (sde_mdp_is_ubwc_format(fmt))
595 opmode |= BIT(0); /* BWC_DEC_EN */
596
597 /* if this is YUV pixel format, enable CSC */
598 if (sde_mdp_is_yuv_format(fmt))
599 src_format |= BIT(15); /* SRC_COLOR_SPACE */
600
601 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
602 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
603
604 /* SRC_FORMAT */
605 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
606
607 /* setup source unpack pattern */
608 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
609 (fmt->element[1] << 8) | (fmt->element[0] << 0);
610
611 /* SRC_UNPACK_PATTERN */
612 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
613
614 /* setup source op mode */
615 if (flags & SDE_ROT_FLAG_FLIP_LR)
616 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
617 if (flags & SDE_ROT_FLAG_FLIP_UD)
618 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
619 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
620
621 /* SRC_OP_MODE */
622 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
623
624 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400625 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
626 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
627 if (sde_mdp_is_tp10_format(fmt))
628 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
629 else
630 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
631 } else {
632 if (sde_mdp_is_tp10_format(fmt))
633 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
634 else
635 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
636 }
637
Alan Kwong9487de22016-01-16 22:06:36 -0500638 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
639 fetch_blocksize |
640 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
641 ((rot->highest_bank & 0x3) << 18));
642
643 /* setup source buffer plane security status */
Abhijit Kulkarni298c8232016-09-26 22:32:10 -0700644 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
645 SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
Alan Kwong9487de22016-01-16 22:06:36 -0500646 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
647 ctx->is_secure = true;
Benjamin Chan15c93d82016-08-29 10:04:22 -0400648 } else {
649 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
650 ctx->is_secure = false;
Alan Kwong9487de22016-01-16 22:06:36 -0500651 }
652
653 /* Update command queue write ptr */
654 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
655}
656
657/*
658 * sde_hw_rotator_setup_wbengine - setup writeback engine
659 * @ctx: Pointer to rotator context
660 * @queue_id: Priority queue identifier
661 * @cfg: Writeback configuration
662 * @flags: Control flag
663 */
664static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
665 enum sde_rot_queue_prio queue_id,
666 struct sde_hw_rot_wb_cfg *cfg,
667 u32 flags)
668{
669 struct sde_mdp_format_params *fmt;
670 u32 *wrptr;
671 u32 pack = 0;
672 u32 dst_format = 0;
673 int i;
674
675 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
676
677 fmt = cfg->fmt;
678
679 /* setup WB DST format */
680 dst_format |= (fmt->chroma_sample << 23) |
681 (fmt->fetch_planes << 19) |
682 (fmt->bits[C3_ALPHA] << 6) |
683 (fmt->bits[C2_R_Cr] << 4) |
684 (fmt->bits[C1_B_Cb] << 2) |
685 (fmt->bits[C0_G_Y] << 0);
686
687 /* alpha control */
688 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
689 dst_format |= BIT(8);
690 if (!fmt->alpha_enable) {
691 dst_format |= BIT(14);
692 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
693 }
694 }
695
696 dst_format |= ((fmt->unpack_count - 1) << 12) |
697 (fmt->unpack_tight << 17) |
698 (fmt->unpack_align_msb << 18) |
699 ((fmt->bpp - 1) << 9) |
700 ((fmt->frame_format & 3) << 30);
701
702 if (sde_mdp_is_yuv_format(fmt))
703 dst_format |= BIT(15);
704
705 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
706 dst_format |= BIT(21); /* PACK_DX_FORMAT */
707
708 /*
709 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
710 */
711 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
712
713 /* DST_FORMAT */
714 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
715
716 /* DST_OP_MODE */
717 if (sde_mdp_is_ubwc_format(fmt))
718 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
719 else
720 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
721
722 /* DST_PACK_PATTERN */
723 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
724 (fmt->element[1] << 8) | (fmt->element[0] << 0);
725 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
726
727 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
728 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
729 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
730 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
731 (cfg->dst_plane.ystride[1] << 16));
732 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
733 (cfg->dst_plane.ystride[3] << 16));
734
735 /* setup WB out image size and ROI */
736 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
737 cfg->img_width | (cfg->img_height << 16));
738 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
739 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
740 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
741 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
742
Abhijit Kulkarni298c8232016-09-26 22:32:10 -0700743 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
744 SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
Benjamin Chan15c93d82016-08-29 10:04:22 -0400745 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
746 else
747 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
748
Alan Kwong9487de22016-01-16 22:06:36 -0500749 /*
750 * setup Downscale factor
751 */
752 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
753 cfg->v_downscale_factor |
754 (cfg->h_downscale_factor << 16));
755
756 /* write config setup for bank configration */
757 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
758 (ctx->rot->highest_bank & 0x3) << 8);
759
760 if (flags & SDE_ROT_FLAG_ROT_90)
761 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x3);
762 else
763 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x1);
764
765 /* Update command queue write ptr */
766 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
767}
768
769/*
770 * sde_hw_rotator_start_no_regdma - start non-regdma operation
771 * @ctx: Pointer to rotator context
772 * @queue_id: Priority queue identifier
773 */
774static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
775 enum sde_rot_queue_prio queue_id)
776{
777 struct sde_hw_rotator *rot = ctx->rot;
778 u32 *wrptr;
779 u32 *rdptr;
780 u8 *addr;
781 u32 mask;
782 u32 blksize;
783
784 rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
785 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
786
787 if (rot->irq_num >= 0) {
788 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
789 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
790 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400791 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -0500792 }
793
794 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
795
796 /* Update command queue write ptr */
797 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
798
799 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
800 /* Write all command stream to Rotator blocks */
801 /* Rotator will start right away after command stream finish writing */
802 while (rdptr < wrptr) {
803 u32 op = REGDMA_OP_MASK & *rdptr;
804
805 switch (op) {
806 case REGDMA_OP_NOP:
807 SDEROT_DBG("NOP\n");
808 rdptr++;
809 break;
810 case REGDMA_OP_REGWRITE:
811 SDEROT_DBG("REGW %6.6x %8.8x\n",
812 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
813 rdptr[1]);
814 addr = rot->mdss_base +
815 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
816 writel_relaxed(*rdptr++, addr);
817 break;
818 case REGDMA_OP_REGMODIFY:
819 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
820 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
821 rdptr[1], rdptr[2]);
822 addr = rot->mdss_base +
823 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
824 mask = *rdptr++;
825 writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
826 addr);
827 break;
828 case REGDMA_OP_BLKWRITE_SINGLE:
829 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
830 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
831 rdptr[1]);
832 addr = rot->mdss_base +
833 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
834 blksize = *rdptr++;
835 while (blksize--) {
836 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
837 writel_relaxed(*rdptr++, addr);
838 }
839 break;
840 case REGDMA_OP_BLKWRITE_INC:
841 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
842 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
843 rdptr[1]);
844 addr = rot->mdss_base +
845 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
846 blksize = *rdptr++;
847 while (blksize--) {
848 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
849 writel_relaxed(*rdptr++, addr);
850 addr += 4;
851 }
852 break;
853 default:
854 /* Other not supported OP mode
855 * Skip data for now for unregonized OP mode
856 */
857 SDEROT_DBG("UNDEFINED\n");
858 rdptr++;
859 break;
860 }
861 }
862 SDEROT_DBG("END %d\n", ctx->timestamp);
863
864 return ctx->timestamp;
865}
866
867/*
868 * sde_hw_rotator_start_regdma - start regdma operation
869 * @ctx: Pointer to rotator context
870 * @queue_id: Priority queue identifier
871 */
872static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
873 enum sde_rot_queue_prio queue_id)
874{
875 struct sde_hw_rotator *rot = ctx->rot;
876 u32 *wrptr;
877 u32 regdmaSlot;
878 u32 offset;
879 long length;
880 long ts_length;
881 u32 enableInt;
882 u32 swts = 0;
883 u32 mask = 0;
884
885 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
886
Alan Kwong9487de22016-01-16 22:06:36 -0500887 /*
888 * Last ROT command must be ROT_START before REGDMA start
889 */
890 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
891 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
892
893 /*
894 * Start REGDMA with command offset and size
895 */
896 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
897 length = ((long)wrptr - (long)ctx->regdma_base) / 4;
898 offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
899 REGDMA_RAM_REGDMA_CMD_RAM));
900 enableInt = ((ctx->timestamp & 1) + 1) << 30;
901
902 SDEROT_DBG(
903 "regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
904 queue_id, regdmaSlot, enableInt, length, offset,
905 ctx->timestamp);
906
907 /* ensure the command packet is issued before the submit command */
908 wmb();
909
910 /* REGDMA submission for current context */
911 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
912 SDE_ROTREG_WRITE(rot->mdss_base,
913 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
914 (length << 14) | offset);
915 swts = ctx->timestamp;
916 mask = ~SDE_REGDMA_SWTS_MASK;
917 } else {
918 SDE_ROTREG_WRITE(rot->mdss_base,
919 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
920 (length << 14) | offset);
921 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
922 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
923 }
924
925 /* Write timestamp after previous rotator job finished */
926 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
927 offset += length;
928 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
929 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
930
931 /* ensure command packet is issue before the submit command */
932 wmb();
933
934 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
935 SDE_ROTREG_WRITE(rot->mdss_base,
936 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
937 enableInt | (ts_length << 14) | offset);
938 } else {
939 SDE_ROTREG_WRITE(rot->mdss_base,
940 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
941 enableInt | (ts_length << 14) | offset);
942 }
943
Alan Kwong9487de22016-01-16 22:06:36 -0500944 /* Update command queue write ptr */
945 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
946
947 return ctx->timestamp;
948}
949
950/*
951 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
952 * @ctx: Pointer to rotator context
953 * @queue_id: Priority queue identifier
954 * @flags: Option flag
955 */
956static u32 sde_hw_rotator_wait_done_no_regdma(
957 struct sde_hw_rotator_context *ctx,
958 enum sde_rot_queue_prio queue_id, u32 flag)
959{
960 struct sde_hw_rotator *rot = ctx->rot;
961 int rc = 0;
962 u32 sts = 0;
963 u32 status;
964 unsigned long flags;
965
966 if (rot->irq_num >= 0) {
967 SDEROT_DBG("Wait for Rotator completion\n");
968 rc = wait_for_completion_timeout(&ctx->rot_comp,
969 KOFF_TIMEOUT);
970
971 spin_lock_irqsave(&rot->rotisr_lock, flags);
972 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
973 if (rc == 0) {
974 /*
975 * Timeout, there might be error,
976 * or rotator still busy
977 */
978 if (status & ROT_BUSY_BIT)
979 SDEROT_ERR(
980 "Timeout waiting for rotator done\n");
981 else if (status & ROT_ERROR_BIT)
982 SDEROT_ERR(
983 "Rotator report error status\n");
984 else
985 SDEROT_WARN(
986 "Timeout waiting, but rotator job is done!!\n");
987
Alan Kwong818b7fc2016-07-24 22:07:41 -0400988 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -0500989 }
990 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
991 } else {
992 int cnt = 200;
993
994 do {
995 udelay(500);
996 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
997 cnt--;
998 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
999 && ((status & ROT_ERROR_BIT) == 0));
1000
1001 if (status & ROT_ERROR_BIT)
1002 SDEROT_ERR("Rotator error\n");
1003 else if (status & ROT_BUSY_BIT)
1004 SDEROT_ERR("Rotator busy\n");
1005
1006 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1007 ROT_DONE_CLEAR);
1008 }
1009
1010 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1011
1012 return sts;
1013}
1014
1015/*
1016 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
1017 * @ctx: Pointer to rotator context
1018 * @queue_id: Priority queue identifier
1019 * @flags: Option flag
1020 */
1021static u32 sde_hw_rotator_wait_done_regdma(
1022 struct sde_hw_rotator_context *ctx,
1023 enum sde_rot_queue_prio queue_id, u32 flag)
1024{
1025 struct sde_hw_rotator *rot = ctx->rot;
1026 int rc = 0;
1027 u32 status;
1028 u32 last_isr;
1029 u32 last_ts;
1030 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001031 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -05001032 u32 sts = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001033 unsigned long flags;
1034
1035 if (rot->irq_num >= 0) {
1036 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
1037 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001038 rc = wait_event_timeout(ctx->regdma_waitq,
1039 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Alan Kwong9487de22016-01-16 22:06:36 -05001040 KOFF_TIMEOUT);
1041
1042 spin_lock_irqsave(&rot->rotisr_lock, flags);
1043
1044 last_isr = ctx->last_regdma_isr_status;
1045 last_ts = ctx->last_regdma_timestamp;
1046 status = last_isr & REGDMA_INT_MASK;
1047 int_id = last_ts & 1;
1048 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
1049 status, int_id, last_ts);
1050
1051 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
Alan Kwong818b7fc2016-07-24 22:07:41 -04001052 bool pending;
1053
1054 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001055 SDEROT_ERR(
Alan Kwong818b7fc2016-07-24 22:07:41 -04001056 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
1057 ctx->timestamp, swts, pending);
Alan Kwong9487de22016-01-16 22:06:36 -05001058
1059 if (status & REGDMA_WATCHDOG_INT)
1060 SDEROT_ERR("REGDMA watchdog interrupt\n");
1061 else if (status & REGDMA_INVALID_DESCRIPTOR)
1062 SDEROT_ERR("REGDMA invalid descriptor\n");
1063 else if (status & REGDMA_INCOMPLETE_CMD)
1064 SDEROT_ERR("REGDMA incomplete command\n");
1065 else if (status & REGDMA_INVALID_CMD)
1066 SDEROT_ERR("REGDMA invalid command\n");
1067
Alan Kwong818b7fc2016-07-24 22:07:41 -04001068 sde_hw_rotator_dump_status(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001069 status = ROT_ERROR_BIT;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001070 } else {
1071 if (rc == 1)
1072 SDEROT_WARN(
1073 "REGDMA done but no irq, ts:0x%X/0x%X\n",
1074 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001075 status = 0;
1076 }
1077
Alan Kwong9487de22016-01-16 22:06:36 -05001078 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1079 } else {
1080 int cnt = 200;
1081
1082 do {
1083 udelay(500);
1084 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1085 cnt--;
1086 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
1087 && ((status & ROT_ERROR_BIT) == 0));
1088
1089 if (status & ROT_ERROR_BIT)
1090 SDEROT_ERR("Rotator error\n");
1091 else if (status & ROT_BUSY_BIT)
1092 SDEROT_ERR("Rotator busy\n");
1093
1094 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
1095 0xFFFF);
1096 }
1097
1098 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1099
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001100 if (status & ROT_ERROR_BIT)
1101 SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus", "panic");
1102
Alan Kwong9487de22016-01-16 22:06:36 -05001103 return sts;
1104}
1105
1106/*
1107 * setup_rotator_ops - setup callback functions for the low-level HAL
1108 * @ops: Pointer to low-level ops callback
1109 * @mode: Operation mode (non-regdma or regdma)
1110 */
1111static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
1112 enum sde_rotator_regdma_mode mode)
1113{
1114 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
1115 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
1116 if (mode == ROT_REGDMA_ON) {
1117 ops->start_rotator = sde_hw_rotator_start_regdma;
1118 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
1119 } else {
1120 ops->start_rotator = sde_hw_rotator_start_no_regdma;
1121 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
1122 }
1123}
1124
1125/*
1126 * sde_hw_rotator_swts_create - create software timestamp buffer
1127 * @rot: Pointer to rotator hw
1128 *
1129 * This buffer is used by regdma to keep track of last completed command.
1130 */
1131static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
1132{
1133 int rc = 0;
1134 struct ion_handle *handle;
1135 struct sde_mdp_img_data *data;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001136 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001137 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
1138
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001139 rot->iclient = mdata->iclient;
Alan Kwong9487de22016-01-16 22:06:36 -05001140
1141 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
1142 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
1143 if (IS_ERR_OR_NULL(handle)) {
1144 SDEROT_ERR("ion memory allocation failed\n");
1145 return -ENOMEM;
1146 }
1147
1148 data = &rot->swts_buf;
1149 data->len = bufsize;
1150 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
1151 if (IS_ERR(data->srcp_dma_buf)) {
1152 SDEROT_ERR("ion_dma_buf setup failed\n");
1153 rc = -ENOMEM;
1154 goto imap_err;
1155 }
1156
1157 sde_smmu_ctrl(1);
1158
1159 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
1160 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
1161 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
1162 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
1163 rc = -ENOMEM;
1164 goto err_put;
1165 }
1166
1167 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
1168 DMA_BIDIRECTIONAL);
1169 if (IS_ERR_OR_NULL(data->srcp_table)) {
1170 SDEROT_ERR("dma_buf_map_attachment error\n");
1171 rc = -ENOMEM;
1172 goto err_detach;
1173 }
1174
1175 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
1176 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
1177 &data->len, DMA_BIDIRECTIONAL);
1178 if (IS_ERR_VALUE(rc)) {
1179 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
1180 goto err_unmap;
1181 }
1182
1183 dma_buf_begin_cpu_access(data->srcp_dma_buf, 0, data->len,
1184 DMA_FROM_DEVICE);
1185 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
1186 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
1187 SDEROT_ERR("ion kernel memory mapping failed\n");
1188 rc = IS_ERR(rot->swts_buffer);
1189 goto kmap_err;
1190 }
1191
1192 data->mapped = true;
1193 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
1194 data->len, rot->swts_buffer);
1195
1196 ion_free(rot->iclient, handle);
1197
1198 sde_smmu_ctrl(0);
1199
1200 return rc;
1201kmap_err:
1202 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1203 DMA_FROM_DEVICE, data->srcp_dma_buf);
1204err_unmap:
1205 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1206 DMA_FROM_DEVICE);
1207err_detach:
1208 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1209err_put:
1210 dma_buf_put(data->srcp_dma_buf);
1211 data->srcp_dma_buf = NULL;
1212imap_err:
1213 ion_free(rot->iclient, handle);
1214
1215 return rc;
1216}
1217
1218/*
1219 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
1220 * @rot: Pointer to rotator hw
1221 */
1222static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
1223{
1224 struct sde_mdp_img_data *data;
1225
1226 data = &rot->swts_buf;
1227
1228 dma_buf_end_cpu_access(data->srcp_dma_buf, 0, data->len,
1229 DMA_FROM_DEVICE);
1230 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
1231
1232 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1233 DMA_FROM_DEVICE, data->srcp_dma_buf);
1234 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1235 DMA_FROM_DEVICE);
1236 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1237 dma_buf_put(data->srcp_dma_buf);
1238 data->srcp_dma_buf = NULL;
1239}
1240
1241/*
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001242 * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
1243 * PM event occurs
1244 * @mgr: Pointer to rotator manager
1245 * @pmon: Boolean indicate an on/off power event
1246 */
1247void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1248{
1249 struct sde_hw_rotator *rot;
1250 u32 l_ts, h_ts, swts, hwts;
1251 u32 rotsts, regdmasts;
1252
1253 /*
1254 * Check last HW timestamp with SW timestamp before power off event.
1255 * If there is a mismatch, that will be quite possible the rotator HW
1256 * is either hang or not finishing last submitted job. In that case,
1257 * it is best to do a timeout eventlog to capture some good events
1258 * log data for analysis.
1259 */
1260 if (!pmon && mgr && mgr->hw_data) {
1261 rot = mgr->hw_data;
1262 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1263 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1264
1265 /* contruct the combined timstamp */
1266 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1267 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1268 SDE_REGDMA_SWTS_SHIFT);
1269
1270 /* Need to turn on clock to access rotator register */
1271 sde_rotator_clk_ctrl(mgr, true);
1272 hwts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1273 regdmasts = SDE_ROTREG_READ(rot->mdss_base,
1274 REGDMA_CSR_REGDMA_BLOCK_STATUS);
1275 rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1276
1277 SDEROT_DBG(
1278 "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1279 swts, hwts, regdmasts, rotsts);
1280 SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
1281
1282 if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
1283 (rotsts & ROT_STATUS_MASK))) {
1284 SDEROT_ERR(
1285 "Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1286 swts, hwts, regdmasts, rotsts);
1287 SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus",
1288 "panic");
1289 }
1290
1291 /* Turn off rotator clock after checking rotator registers */
1292 sde_rotator_clk_ctrl(mgr, false);
1293 }
1294}
1295
1296/*
1297 * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
1298 * PM event occurs
1299 * @mgr: Pointer to rotator manager
1300 * @pmon: Boolean indicate an on/off power event
1301 */
1302void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1303{
1304 struct sde_hw_rotator *rot;
1305 u32 l_ts, h_ts, swts;
1306
1307 /*
1308 * After a power on event, the rotator HW is reset to default setting.
1309 * It is necessary to synchronize the SW timestamp with the HW.
1310 */
1311 if (pmon && mgr && mgr->hw_data) {
1312 rot = mgr->hw_data;
1313 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1314 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1315
1316 /* contruct the combined timstamp */
1317 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1318 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1319 SDE_REGDMA_SWTS_SHIFT);
1320
1321 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
1322 swts, h_ts, l_ts);
1323 SDEROT_EVTLOG(swts, h_ts, l_ts);
1324 rot->reset_hw_ts = true;
1325 rot->last_hw_ts = swts;
1326 }
1327}
1328
1329/*
Alan Kwong9487de22016-01-16 22:06:36 -05001330 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
1331 * @mgr: Pointer to rotator manager
1332 */
1333static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
1334{
1335 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1336 struct sde_hw_rotator *rot;
1337
1338 if (!mgr || !mgr->pdev || !mgr->hw_data) {
1339 SDEROT_ERR("null parameters\n");
1340 return;
1341 }
1342
1343 rot = mgr->hw_data;
1344 if (rot->irq_num >= 0)
1345 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
1346
1347 if (rot->mode == ROT_REGDMA_ON)
1348 sde_hw_rotator_swtc_destroy(rot);
1349
1350 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
1351 mgr->hw_data = NULL;
1352}
1353
1354/*
1355 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
1356 * @mgr: Pointer to rotator manager
1357 * @pipe_id: pipe identifier (not used)
1358 * @wb_id: writeback identifier/priority queue identifier
1359 *
1360 * This function allocates a new hw rotator resource for the given priority.
1361 */
1362static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
1363 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
1364{
1365 struct sde_hw_rotator_resource_info *resinfo;
1366
1367 if (!mgr || !mgr->hw_data) {
1368 SDEROT_ERR("null parameters\n");
1369 return NULL;
1370 }
1371
1372 /*
1373 * Allocate rotator resource info. Each allocation is per
1374 * HW priority queue
1375 */
1376 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
1377 if (!resinfo) {
1378 SDEROT_ERR("Failed allocation HW rotator resource info\n");
1379 return NULL;
1380 }
1381
1382 resinfo->rot = mgr->hw_data;
1383 resinfo->hw.wb_id = wb_id;
1384 atomic_set(&resinfo->hw.num_active, 0);
1385 init_waitqueue_head(&resinfo->hw.wait_queue);
1386
1387 /* For non-regdma, only support one active session */
1388 if (resinfo->rot->mode == ROT_REGDMA_OFF)
1389 resinfo->hw.max_active = 1;
1390 else {
1391 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
1392
1393 if (resinfo->rot->iclient == NULL)
1394 sde_hw_rotator_swts_create(resinfo->rot);
1395 }
1396
Alan Kwongf987ea32016-07-06 12:11:44 -04001397 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001398 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04001399
Alan Kwong9487de22016-01-16 22:06:36 -05001400 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
1401 resinfo, wb_id);
1402
1403 return &resinfo->hw;
1404}
1405
1406/*
1407 * sde_hw_rotator_free_ext - free the given rotator resource
1408 * @mgr: Pointer to rotator manager
1409 * @hw: Pointer to rotator resource
1410 */
1411static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
1412 struct sde_rot_hw_resource *hw)
1413{
1414 struct sde_hw_rotator_resource_info *resinfo;
1415
1416 if (!mgr || !mgr->hw_data)
1417 return;
1418
1419 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1420
1421 SDEROT_DBG(
1422 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
1423 resinfo, hw->wb_id, atomic_read(&hw->num_active),
1424 hw->pending_count);
1425
Alan Kwongf987ea32016-07-06 12:11:44 -04001426 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001427 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04001428
Alan Kwong9487de22016-01-16 22:06:36 -05001429 devm_kfree(&mgr->pdev->dev, resinfo);
1430}
1431
1432/*
1433 * sde_hw_rotator_alloc_rotctx - allocate rotator context
1434 * @rot: Pointer to rotator hw
1435 * @hw: Pointer to rotator resource
1436 * @session_id: Session identifier of this context
1437 *
1438 * This function allocates a new rotator context for the given session id.
1439 */
1440static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
1441 struct sde_hw_rotator *rot,
1442 struct sde_rot_hw_resource *hw,
1443 u32 session_id)
1444{
1445 struct sde_hw_rotator_context *ctx;
1446
1447 /* Allocate rotator context */
1448 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1449 if (!ctx) {
1450 SDEROT_ERR("Failed allocation HW rotator context\n");
1451 return NULL;
1452 }
1453
1454 ctx->rot = rot;
1455 ctx->q_id = hw->wb_id;
1456 ctx->session_id = session_id;
1457 ctx->hwres = hw;
1458 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
1459 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
1460 ctx->is_secure = false;
1461
1462 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
1463 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
1464 ctx->regdma_wrptr = ctx->regdma_base;
1465 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
1466 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
1467 sde_hw_rotator_get_regdma_ctxidx(ctx));
1468
Alan Kwong818b7fc2016-07-24 22:07:41 -04001469 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
1470
Alan Kwong9487de22016-01-16 22:06:36 -05001471 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001472 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001473
1474 /* Store rotator context for lookup purpose */
1475 sde_hw_rotator_put_ctx(ctx);
1476
1477 SDEROT_DBG(
1478 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1479 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1480 ctx->q_id, ctx->timestamp,
1481 atomic_read(&ctx->hwres->num_active));
1482
1483 return ctx;
1484}
1485
1486/*
1487 * sde_hw_rotator_free_rotctx - free the given rotator context
1488 * @rot: Pointer to rotator hw
1489 * @ctx: Pointer to rotator context
1490 */
1491static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
1492 struct sde_hw_rotator_context *ctx)
1493{
1494 if (!rot || !ctx)
1495 return;
1496
1497 SDEROT_DBG(
1498 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1499 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1500 ctx->q_id, ctx->timestamp,
1501 atomic_read(&ctx->hwres->num_active));
1502
Benjamin Chanc3e185f2016-11-08 21:48:21 -05001503 /* Clear rotator context from lookup purpose */
1504 sde_hw_rotator_clr_ctx(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05001505
1506 devm_kfree(&rot->pdev->dev, ctx);
1507}
1508
1509/*
1510 * sde_hw_rotator_config - configure hw for the given rotation entry
1511 * @hw: Pointer to rotator resource
1512 * @entry: Pointer to rotation entry
1513 *
1514 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
1515 * based on the given rotation entry.
1516 */
1517static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
1518 struct sde_rot_entry *entry)
1519{
1520 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1521 struct sde_hw_rotator *rot;
1522 struct sde_hw_rotator_resource_info *resinfo;
1523 struct sde_hw_rotator_context *ctx;
1524 struct sde_hw_rot_sspp_cfg sspp_cfg;
1525 struct sde_hw_rot_wb_cfg wb_cfg;
1526 u32 danger_lut = 0; /* applicable for realtime client only */
1527 u32 safe_lut = 0; /* applicable for realtime client only */
1528 u32 flags = 0;
1529 struct sde_rotation_item *item;
1530
1531 if (!hw || !entry) {
1532 SDEROT_ERR("null hw resource/entry\n");
1533 return -EINVAL;
1534 }
1535
1536 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1537 rot = resinfo->rot;
1538 item = &entry->item;
1539
1540 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id);
1541 if (!ctx) {
1542 SDEROT_ERR("Failed allocating rotator context!!\n");
1543 return -EINVAL;
1544 }
1545
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001546 if (rot->reset_hw_ts) {
1547 SDEROT_EVTLOG(rot->last_hw_ts);
1548 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
1549 rot->last_hw_ts);
1550 /* ensure write is issued to the rotator HW */
1551 wmb();
1552 rot->reset_hw_ts = false;
1553 }
1554
Alan Kwong9487de22016-01-16 22:06:36 -05001555 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
1556 SDE_ROT_FLAG_FLIP_LR : 0;
1557 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
1558 SDE_ROT_FLAG_FLIP_UD : 0;
1559 flags |= (item->flags & SDE_ROTATION_90) ?
1560 SDE_ROT_FLAG_ROT_90 : 0;
1561 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
1562 SDE_ROT_FLAG_DEINTERLACE : 0;
1563 flags |= (item->flags & SDE_ROTATION_SECURE) ?
1564 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001565 flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
1566 SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
1567
Alan Kwong9487de22016-01-16 22:06:36 -05001568
1569 sspp_cfg.img_width = item->input.width;
1570 sspp_cfg.img_height = item->input.height;
1571 sspp_cfg.fmt = sde_get_format_params(item->input.format);
1572 if (!sspp_cfg.fmt) {
1573 SDEROT_ERR("null format\n");
1574 return -EINVAL;
1575 }
1576 sspp_cfg.src_rect = &item->src_rect;
1577 sspp_cfg.data = &entry->src_buf;
1578 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
1579 item->input.height, &sspp_cfg.src_plane,
1580 0, /* No bwc_mode */
1581 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
1582 true : false);
1583
1584 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001585 &sspp_cfg, danger_lut, safe_lut,
1586 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05001587
1588 wb_cfg.img_width = item->output.width;
1589 wb_cfg.img_height = item->output.height;
1590 wb_cfg.fmt = sde_get_format_params(item->output.format);
1591 wb_cfg.dst_rect = &item->dst_rect;
1592 wb_cfg.data = &entry->dst_buf;
1593 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
1594 item->output.height, &wb_cfg.dst_plane,
1595 0, /* No bwc_mode */
1596 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
1597
1598 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
1599 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
1600
1601 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
1602
1603 /* setup VA mapping for debugfs */
1604 if (rot->dbgmem) {
1605 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
1606 &item->input,
1607 &entry->src_buf);
1608
1609 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
1610 &item->output,
1611 &entry->dst_buf);
1612 }
1613
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001614 SDEROT_EVTLOG(ctx->timestamp, flags,
1615 item->input.width, item->input.height,
Benjamin Chan53e3bce2016-08-31 14:43:29 -04001616 item->output.width, item->output.height,
1617 entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr);
1618
Alan Kwong9487de22016-01-16 22:06:36 -05001619 if (mdata->default_ot_rd_limit) {
1620 struct sde_mdp_set_ot_params ot_params;
1621
1622 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1623 ot_params.xin_id = XIN_SSPP;
1624 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001625 ot_params.width = entry->perf->config.input.width;
1626 ot_params.height = entry->perf->config.input.height;
1627 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001628 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
1629 ot_params.reg_off_mdp_clk_ctrl =
1630 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1631 ot_params.bit_off_mdp_clk_ctrl =
1632 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001633 ot_params.fmt = entry->perf->config.input.format;
Alan Kwong9487de22016-01-16 22:06:36 -05001634 sde_mdp_set_ot_limit(&ot_params);
1635 }
1636
1637 if (mdata->default_ot_wr_limit) {
1638 struct sde_mdp_set_ot_params ot_params;
1639
1640 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1641 ot_params.xin_id = XIN_WRITEBACK;
1642 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001643 ot_params.width = entry->perf->config.input.width;
1644 ot_params.height = entry->perf->config.input.height;
1645 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001646 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
1647 ot_params.reg_off_mdp_clk_ctrl =
1648 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1649 ot_params.bit_off_mdp_clk_ctrl =
1650 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001651 ot_params.fmt = entry->perf->config.input.format;
Alan Kwong9487de22016-01-16 22:06:36 -05001652 sde_mdp_set_ot_limit(&ot_params);
1653 }
1654
1655 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
1656 u32 qos_lut = 0; /* low priority for nrt read client */
1657
1658 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
1659 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
1660
1661 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
1662 }
1663
1664 if (mdata->npriority_lvl > 0) {
1665 u32 mask, reg_val, i, vbif_qos;
1666
1667 for (i = 0; i < mdata->npriority_lvl; i++) {
1668 reg_val = SDE_VBIF_READ(mdata,
1669 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
1670 mask = 0x3 << (XIN_SSPP * 2);
1671 reg_val &= ~(mask);
1672 vbif_qos = mdata->vbif_nrt_qos[i];
1673 reg_val |= vbif_qos << (XIN_SSPP * 2);
1674 /* ensure write is issued after the read operation */
1675 mb();
1676 SDE_VBIF_WRITE(mdata,
1677 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
1678 reg_val);
1679 }
1680 }
1681
1682 /* Enable write gather for writeback to remove write gaps, which
1683 * may hang AXI/BIMC/SDE.
1684 */
1685 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
1686 BIT(XIN_WRITEBACK));
1687
1688 return 0;
1689}
1690
1691/*
1692 * sde_hw_rotator_kickoff - kickoff processing on the given entry
1693 * @hw: Pointer to rotator resource
1694 * @entry: Pointer to rotation entry
1695 */
1696static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
1697 struct sde_rot_entry *entry)
1698{
1699 struct sde_hw_rotator *rot;
1700 struct sde_hw_rotator_resource_info *resinfo;
1701 struct sde_hw_rotator_context *ctx;
1702 int ret = 0;
1703
1704 if (!hw || !entry) {
1705 SDEROT_ERR("null hw resource/entry\n");
1706 return -EINVAL;
1707 }
1708
1709 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1710 rot = resinfo->rot;
1711
1712 /* Lookup rotator context from session-id */
1713 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1714 if (!ctx) {
1715 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1716 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001717 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05001718 }
Alan Kwong9487de22016-01-16 22:06:36 -05001719
1720 ret = sde_smmu_ctrl(1);
1721 if (IS_ERR_VALUE(ret)) {
1722 SDEROT_ERR("IOMMU attach failed\n");
1723 return ret;
1724 }
1725
1726 rot->ops.start_rotator(ctx, ctx->q_id);
1727
1728 return 0;
1729}
1730
1731/*
1732 * sde_hw_rotator_wait4done - wait for completion notification
1733 * @hw: Pointer to rotator resource
1734 * @entry: Pointer to rotation entry
1735 *
1736 * This function blocks until the given entry is complete, error
1737 * is detected, or timeout.
1738 */
1739static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
1740 struct sde_rot_entry *entry)
1741{
1742 struct sde_hw_rotator *rot;
1743 struct sde_hw_rotator_resource_info *resinfo;
1744 struct sde_hw_rotator_context *ctx;
1745 int ret;
1746
1747 if (!hw || !entry) {
1748 SDEROT_ERR("null hw resource/entry\n");
1749 return -EINVAL;
1750 }
1751
1752 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1753 rot = resinfo->rot;
1754
1755 /* Lookup rotator context from session-id */
1756 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1757 if (!ctx) {
1758 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1759 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001760 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05001761 }
Alan Kwong9487de22016-01-16 22:06:36 -05001762
1763 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
1764
1765 sde_smmu_ctrl(0);
1766
1767 if (rot->dbgmem) {
1768 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
1769 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
1770 }
1771
1772 /* Current rotator context job is finished, time to free up*/
1773 sde_hw_rotator_free_rotctx(rot, ctx);
1774
1775 return ret;
1776}
1777
1778/*
1779 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
1780 * @rot: Pointer to hw rotator
1781 *
1782 * This function initializes feature and/or capability bitmask based on
1783 * h/w version read from the device.
1784 */
1785static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
1786{
1787 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1788 u32 hw_version;
1789
1790 if (!mdata) {
1791 SDEROT_ERR("null rotator data\n");
1792 return -EINVAL;
1793 }
1794
1795 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
1796 SDEROT_DBG("hw version %8.8x\n", hw_version);
1797
1798 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
1799 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
1800 clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
1801 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
1802 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
1803 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
1804
1805 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
1806
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001807 if (hw_version != SDE_ROT_TYPE_V1_0) {
1808 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
1809 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
1810 }
1811
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001812 set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
1813
Benjamin Chan53e3bce2016-08-31 14:43:29 -04001814 mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
1815 mdata->nrt_vbif_dbg_bus_size =
1816 ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
1817
1818 mdata->regdump = sde_rot_r3_regdump;
1819 mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001820 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001821 return 0;
1822}
1823
1824/*
1825 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
1826 * @irq: Interrupt number
1827 * @ptr: Pointer to private handle provided during registration
1828 *
1829 * This function services rotator interrupt and wakes up waiting client
1830 * with pending rotation requests already submitted to h/w.
1831 */
1832static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
1833{
1834 struct sde_hw_rotator *rot = ptr;
1835 struct sde_hw_rotator_context *ctx;
1836 irqreturn_t ret = IRQ_NONE;
1837 u32 isr;
1838
1839 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
1840
1841 SDEROT_DBG("intr_status = %8.8x\n", isr);
1842
1843 if (isr & ROT_DONE_MASK) {
1844 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001845 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001846 SDEROT_DBG("Notify rotator complete\n");
1847
1848 /* Normal rotator only 1 session, no need to lookup */
1849 ctx = rot->rotCtx[0][0];
1850 WARN_ON(ctx == NULL);
1851 complete_all(&ctx->rot_comp);
1852
1853 spin_lock(&rot->rotisr_lock);
1854 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1855 ROT_DONE_CLEAR);
1856 spin_unlock(&rot->rotisr_lock);
1857 ret = IRQ_HANDLED;
1858 }
1859
1860 return ret;
1861}
1862
1863/*
1864 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
1865 * @irq: Interrupt number
1866 * @ptr: Pointer to private handle provided during registration
1867 *
1868 * This function services rotator interrupt, decoding the source of
1869 * events (high/low priority queue), and wakes up all waiting clients
1870 * with pending rotation requests already submitted to h/w.
1871 */
1872static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
1873{
1874 struct sde_hw_rotator *rot = ptr;
1875 struct sde_hw_rotator_context *ctx;
1876 irqreturn_t ret = IRQ_NONE;
1877 u32 isr;
1878 u32 ts;
1879 u32 q_id;
1880
1881 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001882 /* acknowledge interrupt before reading latest timestamp */
1883 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001884 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1885
1886 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
1887
1888 /* Any REGDMA status, including error and watchdog timer, should
1889 * trigger and wake up waiting thread
1890 */
1891 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
1892 spin_lock(&rot->rotisr_lock);
1893
1894 /*
1895 * Obtain rotator context based on timestamp from regdma
1896 * and low/high interrupt status
1897 */
1898 if (isr & REGDMA_INT_HIGH_MASK) {
1899 q_id = ROT_QUEUE_HIGH_PRIORITY;
1900 ts = ts & SDE_REGDMA_SWTS_MASK;
1901 } else if (isr & REGDMA_INT_LOW_MASK) {
1902 q_id = ROT_QUEUE_LOW_PRIORITY;
1903 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
1904 SDE_REGDMA_SWTS_MASK;
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001905 } else {
1906 SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
1907 goto done_isr_handle;
Alan Kwong9487de22016-01-16 22:06:36 -05001908 }
Alan Kwong9487de22016-01-16 22:06:36 -05001909 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05001910
1911 /*
1912 * Wake up all waiting context from the current and previous
1913 * SW Timestamp.
1914 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04001915 while (ctx &&
1916 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05001917 ctx->last_regdma_isr_status = isr;
1918 ctx->last_regdma_timestamp = ts;
1919 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04001920 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001921 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001922
1923 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
1924 ctx = rot->rotCtx[q_id]
1925 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04001926 };
Alan Kwong9487de22016-01-16 22:06:36 -05001927
Benjamin Chan62b94ed2016-08-18 23:55:21 -04001928done_isr_handle:
Alan Kwong9487de22016-01-16 22:06:36 -05001929 spin_unlock(&rot->rotisr_lock);
1930 ret = IRQ_HANDLED;
1931 } else if (isr & REGDMA_INT_ERR_MASK) {
1932 /*
1933 * For REGDMA Err, we save the isr info and wake up
1934 * all waiting contexts
1935 */
1936 int i, j;
1937
1938 SDEROT_ERR(
1939 "regdma err isr:%X, wake up all waiting contexts\n",
1940 isr);
1941
1942 spin_lock(&rot->rotisr_lock);
1943
1944 for (i = 0; i < ROT_QUEUE_MAX; i++) {
1945 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
1946 ctx = rot->rotCtx[i][j];
1947 if (ctx && ctx->last_regdma_isr_status == 0) {
1948 ctx->last_regdma_isr_status = isr;
1949 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001950 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001951 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
1952 i, j, ctx);
1953 }
1954 }
1955 }
1956
Alan Kwong9487de22016-01-16 22:06:36 -05001957 spin_unlock(&rot->rotisr_lock);
1958 ret = IRQ_HANDLED;
1959 }
1960
1961 return ret;
1962}
1963
1964/*
1965 * sde_hw_rotator_validate_entry - validate rotation entry
1966 * @mgr: Pointer to rotator manager
1967 * @entry: Pointer to rotation entry
1968 *
1969 * This function validates the given rotation entry and provides possible
1970 * fixup (future improvement) if available. This function returns 0 if
1971 * the entry is valid, and returns error code otherwise.
1972 */
1973static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
1974 struct sde_rot_entry *entry)
1975{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001976 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001977 int ret = 0;
1978 u16 src_w, src_h, dst_w, dst_h;
1979 struct sde_rotation_item *item = &entry->item;
1980 struct sde_mdp_format_params *fmt;
1981
1982 src_w = item->src_rect.w;
1983 src_h = item->src_rect.h;
1984
1985 if (item->flags & SDE_ROTATION_90) {
1986 dst_w = item->dst_rect.h;
1987 dst_h = item->dst_rect.w;
1988 } else {
1989 dst_w = item->dst_rect.w;
1990 dst_h = item->dst_rect.h;
1991 }
1992
1993 entry->dnsc_factor_w = 0;
1994 entry->dnsc_factor_h = 0;
1995
1996 if ((src_w != dst_w) || (src_h != dst_h)) {
1997 if ((src_w % dst_w) || (src_h % dst_h)) {
1998 SDEROT_DBG("non integral scale not support\n");
1999 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002000 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05002001 }
2002 entry->dnsc_factor_w = src_w / dst_w;
2003 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
2004 (entry->dnsc_factor_w > 64)) {
2005 SDEROT_DBG("non power-of-2 w_scale not support\n");
2006 ret = -EINVAL;
2007 goto dnsc_err;
2008 }
2009 entry->dnsc_factor_h = src_h / dst_h;
2010 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
2011 (entry->dnsc_factor_h > 64)) {
2012 SDEROT_DBG("non power-of-2 h_scale not support\n");
2013 ret = -EINVAL;
2014 goto dnsc_err;
2015 }
2016 }
2017
Benjamin Chan886ff672016-11-07 15:23:17 -05002018 fmt = sde_get_format_params(item->input.format);
2019 /*
2020 * Rotator downscale support max 4 times for UBWC format and
2021 * max 2 times for TP10/TP10_UBWC format
2022 */
2023 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
2024 SDEROT_DBG("max downscale for UBWC format is 4\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002025 ret = -EINVAL;
2026 goto dnsc_err;
2027 }
Benjamin Chan886ff672016-11-07 15:23:17 -05002028 if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
2029 SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002030 ret = -EINVAL;
2031 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002032 goto dnsc_err;
2033
2034dnsc_1p5_check:
2035 /* Check for 1.5 downscale that only applies to V2 HW */
2036 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
2037 entry->dnsc_factor_w = src_w / dst_w;
2038 if ((entry->dnsc_factor_w != 1) ||
2039 ((dst_w * 3) != (src_w * 2))) {
2040 SDEROT_DBG(
2041 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
2042 src_w, dst_w);
2043 ret = -EINVAL;
2044 goto dnsc_err;
2045 }
2046
2047 entry->dnsc_factor_h = src_h / dst_h;
2048 if ((entry->dnsc_factor_h != 1) ||
2049 ((dst_h * 3) != (src_h * 2))) {
2050 SDEROT_DBG(
2051 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
2052 src_h, dst_h);
2053 ret = -EINVAL;
2054 goto dnsc_err;
2055 }
2056 ret = 0;
2057 }
Alan Kwong9487de22016-01-16 22:06:36 -05002058
2059dnsc_err:
2060 /* Downscaler does not support asymmetrical dnsc */
2061 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
2062 SDEROT_DBG("asymmetric downscale not support\n");
2063 ret = -EINVAL;
2064 }
2065
2066 if (ret) {
2067 entry->dnsc_factor_w = 0;
2068 entry->dnsc_factor_h = 0;
2069 }
2070 return ret;
2071}
2072
2073/*
2074 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
2075 * @mgr: Pointer to rotator manager
2076 * @attr: Pointer to device attribute interface
2077 * @buf: Pointer to output buffer
2078 * @len: Length of output buffer
2079 */
2080static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
2081 struct device_attribute *attr, char *buf, ssize_t len)
2082{
2083 struct sde_hw_rotator *hw_data;
Benjamin Chan886ff672016-11-07 15:23:17 -05002084 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002085 int cnt = 0;
2086
2087 if (!mgr || !buf)
2088 return 0;
2089
2090 hw_data = mgr->hw_data;
2091
2092#define SPRINT(fmt, ...) \
2093 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2094
2095 /* insert capabilities here */
Benjamin Chan886ff672016-11-07 15:23:17 -05002096 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
2097 SPRINT("min_downscale=1.5\n");
2098 else
2099 SPRINT("min_downscale=2.0\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002100
Benjamin Chan42db2c92016-11-22 22:50:01 -05002101 SPRINT("downscale_compression=1\n");
2102
Alan Kwong9487de22016-01-16 22:06:36 -05002103#undef SPRINT
2104 return cnt;
2105}
2106
2107/*
2108 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
2109 * @mgr: Pointer to rotator manager
2110 * @attr: Pointer to device attribute interface
2111 * @buf: Pointer to output buffer
2112 * @len: Length of output buffer
2113 */
2114static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
2115 struct device_attribute *attr, char *buf, ssize_t len)
2116{
2117 struct sde_hw_rotator *rot;
2118 struct sde_hw_rotator_context *ctx;
2119 int cnt = 0;
2120 int num_active = 0;
2121 int i, j;
2122
2123 if (!mgr || !buf) {
2124 SDEROT_ERR("null parameters\n");
2125 return 0;
2126 }
2127
2128 rot = mgr->hw_data;
2129
2130#define SPRINT(fmt, ...) \
2131 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2132
2133 if (rot) {
2134 SPRINT("rot_mode=%d\n", rot->mode);
2135 SPRINT("irq_num=%d\n", rot->irq_num);
2136
2137 if (rot->mode == ROT_REGDMA_OFF) {
2138 SPRINT("max_active=1\n");
2139 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
2140 } else {
2141 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2142 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
2143 j++) {
2144 ctx = rot->rotCtx[i][j];
2145
2146 if (ctx) {
2147 SPRINT(
2148 "rotCtx[%d][%d]:%p\n",
2149 i, j, ctx);
2150 ++num_active;
2151 }
2152 }
2153 }
2154
2155 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
2156 SPRINT("num_active=%d\n", num_active);
2157 }
2158 }
2159
2160#undef SPRINT
2161 return cnt;
2162}
2163
2164/*
Alan Kwongda16e442016-08-14 20:47:18 -04002165 * sde_hw_rotator_get_pixfmt - get the indexed pixel format
2166 * @mgr: Pointer to rotator manager
2167 * @index: index of pixel format
2168 * @input: true for input port; false for output port
2169 */
2170static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
2171 int index, bool input)
2172{
2173 if (input) {
2174 if (index < ARRAY_SIZE(sde_hw_rotator_input_pixfmts))
2175 return sde_hw_rotator_input_pixfmts[index];
2176 else
2177 return 0;
2178 } else {
2179 if (index < ARRAY_SIZE(sde_hw_rotator_output_pixfmts))
2180 return sde_hw_rotator_output_pixfmts[index];
2181 else
2182 return 0;
2183 }
2184}
2185
2186/*
2187 * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
2188 * @mgr: Pointer to rotator manager
2189 * @pixfmt: pixel format to be verified
2190 * @input: true for input port; false for output port
2191 */
2192static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
2193 bool input)
2194{
2195 int i;
2196
2197 if (input) {
2198 for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_input_pixfmts); i++)
2199 if (sde_hw_rotator_input_pixfmts[i] == pixfmt)
2200 return true;
2201 } else {
2202 for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_output_pixfmts); i++)
2203 if (sde_hw_rotator_output_pixfmts[i] == pixfmt)
2204 return true;
2205 }
2206
2207 return false;
2208}
2209
2210/*
Alan Kwong9487de22016-01-16 22:06:36 -05002211 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
2212 * @hw_data: Pointer to rotator hw
2213 * @dev: Pointer to platform device
2214 */
2215static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
2216 struct platform_device *dev)
2217{
2218 int ret = 0;
2219 u32 data;
2220
2221 if (!hw_data || !dev)
2222 return -EINVAL;
2223
2224 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
2225 &data);
2226 if (ret) {
2227 SDEROT_DBG("default to regdma off\n");
2228 ret = 0;
2229 hw_data->mode = ROT_REGDMA_OFF;
2230 } else if (data < ROT_REGDMA_MAX) {
2231 SDEROT_DBG("set to regdma mode %d\n", data);
2232 hw_data->mode = data;
2233 } else {
2234 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
2235 hw_data->mode = ROT_REGDMA_OFF;
2236 }
2237
2238 ret = of_property_read_u32(dev->dev.of_node,
2239 "qcom,mdss-highest-bank-bit", &data);
2240 if (ret) {
2241 SDEROT_DBG("default to A5X bank\n");
2242 ret = 0;
2243 hw_data->highest_bank = 2;
2244 } else {
2245 SDEROT_DBG("set highest bank bit to %d\n", data);
2246 hw_data->highest_bank = data;
2247 }
2248
2249 return ret;
2250}
2251
2252/*
2253 * sde_rotator_r3_init - initialize the r3 module
2254 * @mgr: Pointer to rotator manager
2255 *
2256 * This function setup r3 callback functions, parses r3 specific
2257 * device tree settings, installs r3 specific interrupt handler,
2258 * as well as initializes r3 internal data structure.
2259 */
2260int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
2261{
2262 struct sde_hw_rotator *rot;
2263 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2264 int i;
2265 int ret;
2266
2267 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
2268 if (!rot)
2269 return -ENOMEM;
2270
2271 mgr->hw_data = rot;
2272 mgr->queue_count = ROT_QUEUE_MAX;
2273
2274 rot->mdss_base = mdata->sde_io.base;
2275 rot->pdev = mgr->pdev;
2276
2277 /* Assign ops */
2278 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
2279 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
2280 mgr->ops_hw_free = sde_hw_rotator_free_ext;
2281 mgr->ops_config_hw = sde_hw_rotator_config;
2282 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
2283 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
2284 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
2285 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
2286 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
2287 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
Alan Kwongda16e442016-08-14 20:47:18 -04002288 mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
2289 mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002290 mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
2291 mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
Alan Kwong9487de22016-01-16 22:06:36 -05002292
2293 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
2294 if (ret)
2295 goto error_parse_dt;
2296
2297 rot->irq_num = platform_get_irq(mgr->pdev, 0);
2298 if (rot->irq_num < 0) {
2299 SDEROT_ERR("fail to get rotator irq\n");
2300 } else {
2301 if (rot->mode == ROT_REGDMA_OFF)
2302 ret = devm_request_threaded_irq(&mgr->pdev->dev,
2303 rot->irq_num,
2304 sde_hw_rotator_rotirq_handler,
2305 NULL, 0, "sde_rotator_r3", rot);
2306 else
2307 ret = devm_request_threaded_irq(&mgr->pdev->dev,
2308 rot->irq_num,
2309 sde_hw_rotator_regdmairq_handler,
2310 NULL, 0, "sde_rotator_r3", rot);
2311 if (ret) {
2312 SDEROT_ERR("fail to request irq r:%d\n", ret);
2313 rot->irq_num = -1;
2314 } else {
2315 disable_irq(rot->irq_num);
2316 }
2317 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04002318 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05002319
2320 setup_rotator_ops(&rot->ops, rot->mode);
2321
2322 spin_lock_init(&rot->rotctx_lock);
2323 spin_lock_init(&rot->rotisr_lock);
2324
2325 /* REGDMA initialization */
2326 if (rot->mode == ROT_REGDMA_OFF) {
2327 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2328 rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
2329 SDE_HW_ROT_REGDMA_SEG_SIZE * i];
2330 } else {
2331 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2332 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
2333 (u32 *)(rot->mdss_base +
2334 REGDMA_RAM_REGDMA_CMD_RAM +
2335 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
2336
2337 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
2338 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
2339 (u32 *)(rot->mdss_base +
2340 REGDMA_RAM_REGDMA_CMD_RAM +
2341 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
2342 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
2343 }
2344
2345 atomic_set(&rot->timestamp[0], 0);
2346 atomic_set(&rot->timestamp[1], 0);
Alan Kwong9487de22016-01-16 22:06:36 -05002347
2348 ret = sde_rotator_hw_rev_init(rot);
2349 if (ret)
2350 goto error_hw_rev_init;
2351
Alan Kwong315cd772016-08-03 22:29:42 -04002352 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
Benjamin Chan77aed192016-10-17 17:49:41 -04002353 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04002354 CLKFLAG_NORETAIN_MEM);
Benjamin Chan77aed192016-10-17 17:49:41 -04002355 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04002356 CLKFLAG_NORETAIN_PERIPH);
2357
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002358 mdata->sde_rot_hw = rot;
Alan Kwong9487de22016-01-16 22:06:36 -05002359 return 0;
2360error_hw_rev_init:
2361 if (rot->irq_num >= 0)
2362 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2363 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2364error_parse_dt:
2365 return ret;
2366}