blob: fa15fac9740225df2094e0cc8376b10d13ef4333 [file] [log] [blame]
Alan Kwong9487de22016-01-16 22:06:36 -05001/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
20#include <linux/sync.h>
21#include <linux/delay.h>
22#include <linux/debugfs.h>
23#include <linux/interrupt.h>
24#include <linux/dma-mapping.h>
25#include <linux/dma-buf.h>
26#include <linux/msm_ion.h>
27
28#include "sde_rotator_core.h"
29#include "sde_rotator_util.h"
30#include "sde_rotator_smmu.h"
31#include "sde_rotator_r3.h"
32#include "sde_rotator_r3_internal.h"
33#include "sde_rotator_r3_hwio.h"
34#include "sde_rotator_r3_debug.h"
35#include "sde_rotator_trace.h"
36
37/* XIN mapping */
38#define XIN_SSPP 0
39#define XIN_WRITEBACK 1
40
41/* wait for at most 2 vsync for lowest refresh rate (24hz) */
42#define KOFF_TIMEOUT msecs_to_jiffies(42 * 32)
43
44/* Macro for constructing the REGDMA command */
45#define SDE_REGDMA_WRITE(p, off, data) \
46 do { \
47 *p++ = REGDMA_OP_REGWRITE | \
48 ((off) & REGDMA_ADDR_OFFSET_MASK); \
49 *p++ = (data); \
50 } while (0)
51
52#define SDE_REGDMA_MODIFY(p, off, mask, data) \
53 do { \
54 *p++ = REGDMA_OP_REGMODIFY | \
55 ((off) & REGDMA_ADDR_OFFSET_MASK); \
56 *p++ = (mask); \
57 *p++ = (data); \
58 } while (0)
59
60#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
61 do { \
62 *p++ = REGDMA_OP_BLKWRITE_INC | \
63 ((off) & REGDMA_ADDR_OFFSET_MASK); \
64 *p++ = (len); \
65 } while (0)
66
67#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
68 do { \
69 *(p) = (data); \
70 (p)++; \
71 } while (0)
72
73/* Macro for directly accessing mapped registers */
74#define SDE_ROTREG_WRITE(base, off, data) \
75 writel_relaxed(data, (base + (off)))
76
77#define SDE_ROTREG_READ(base, off) \
78 readl_relaxed(base + (off))
79
80/**
81 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
82 * on provided session_id. Each rotator has a different session_id.
83 */
84static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
85 struct sde_hw_rotator *rot, u32 session_id,
86 enum sde_rot_queue_prio q_id)
87{
88 int i;
89 struct sde_hw_rotator_context *ctx = NULL;
90
91 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
92 ctx = rot->rotCtx[q_id][i];
93
94 if (ctx && (ctx->session_id == session_id)) {
95 SDEROT_DBG(
96 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d\n",
97 q_id, i, ctx, ctx->session_id);
98 return ctx;
99 }
100 }
101
102 return NULL;
103}
104
105/*
106 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
107 * @dbgbuf: Pointer to debug buffer
108 * @buf: Pointer to layer buffer structure
109 * @data: Pointer to h/w mapped buffer structure
110 */
111static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
112 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
113{
114 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
115 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
116
117 dbgbuf->vaddr = NULL;
118 dbgbuf->width = buf->width;
119 dbgbuf->height = buf->height;
120
121 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
122 dma_buf_begin_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
123 DMA_FROM_DEVICE);
124 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
125 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
126 dbgbuf->vaddr, dbgbuf->buflen,
127 dbgbuf->width, dbgbuf->height);
128 }
129}
130
131/*
132 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
133 * @dbgbuf: Pointer to debug buffer
134 */
135static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
136{
137 if (dbgbuf->vaddr) {
138 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
139 dma_buf_end_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
140 DMA_FROM_DEVICE);
141 }
142
143 dbgbuf->vaddr = NULL;
144 dbgbuf->dmabuf = NULL;
145 dbgbuf->buflen = 0;
146 dbgbuf->width = 0;
147 dbgbuf->height = 0;
148}
149
150/*
151 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
152 * @ctx: Pointer to rotator context
153 * @mask: Bit mask location of the timestamp
154 * @swts: Software timestamp
155 */
156static void sde_hw_rotator_setup_timestamp_packet(
157 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
158{
159 u32 *wrptr;
160
161 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
162
163 /*
164 * Create a dummy packet write out to 1 location for timestamp
165 * generation.
166 */
167 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
168 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
169 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
170 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
171 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
172 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
173 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
174 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
175 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
176 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
177 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
178 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
179 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
180 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
181 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
182 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
183 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
184 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
185 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
186 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
187 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
188 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
189 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
190 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
191 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
192 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
193
194 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
195}
196
197/*
198 * sde_hw_rotator_setup_fetchengine - setup fetch engine
199 * @ctx: Pointer to rotator context
200 * @queue_id: Priority queue identifier
201 * @cfg: Fetch configuration
202 * @danger_lut: real-time QoS LUT for danger setting (not used)
203 * @safe_lut: real-time QoS LUT for safe setting (not used)
204 * @flags: Control flag
205 */
206static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
207 enum sde_rot_queue_prio queue_id,
208 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
209 u32 flags)
210{
211 struct sde_hw_rotator *rot = ctx->rot;
212 struct sde_mdp_format_params *fmt;
213 struct sde_mdp_data *data;
214 u32 *wrptr;
215 u32 opmode = 0;
216 u32 chroma_samp = 0;
217 u32 src_format = 0;
218 u32 unpack = 0;
219 u32 width = cfg->img_width;
220 u32 height = cfg->img_height;
221 u32 fetch_blocksize = 0;
222 int i;
223
224 if (ctx->rot->mode == ROT_REGDMA_ON) {
225 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_EN,
226 REGDMA_INT_MASK);
227 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
228 REGDMA_EN);
229 }
230
231 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
232
233 /* source image setup */
234 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
235 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
236 for (i = 0; i < cfg->src_plane.num_planes; i++)
237 cfg->src_plane.ystride[i] *= 2;
238 width *= 2;
239 height /= 2;
240 }
241
242 /*
243 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
244 */
245 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
246
247 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
248 SDE_REGDMA_BLKWRITE_DATA(wrptr,
249 cfg->src_rect->w | (cfg->src_rect->h << 16));
250 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
251 SDE_REGDMA_BLKWRITE_DATA(wrptr,
252 cfg->src_rect->x | (cfg->src_rect->y << 16));
253 SDE_REGDMA_BLKWRITE_DATA(wrptr,
254 cfg->src_rect->w | (cfg->src_rect->h << 16));
255 SDE_REGDMA_BLKWRITE_DATA(wrptr,
256 cfg->src_rect->x | (cfg->src_rect->y << 16));
257
258 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
259 data = cfg->data;
260 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
261 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
262 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
263 (cfg->src_plane.ystride[1] << 16));
264 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
265 (cfg->src_plane.ystride[3] << 16));
266
267 /* UNUSED, write 0 */
268 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
269
270 /* setup source format */
271 fmt = cfg->fmt;
272
273 chroma_samp = fmt->chroma_sample;
274 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
275 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
276 chroma_samp = SDE_MDP_CHROMA_H1V2;
277 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
278 chroma_samp = SDE_MDP_CHROMA_H2V1;
279 }
280
281 src_format = (chroma_samp << 23) |
282 (fmt->fetch_planes << 19) |
283 (fmt->bits[C3_ALPHA] << 6) |
284 (fmt->bits[C2_R_Cr] << 4) |
285 (fmt->bits[C1_B_Cb] << 2) |
286 (fmt->bits[C0_G_Y] << 0);
287
288 if (fmt->alpha_enable &&
289 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
290 src_format |= BIT(8); /* SRCC3_EN */
291
292 src_format |= ((fmt->unpack_count - 1) << 12) |
293 (fmt->unpack_tight << 17) |
294 (fmt->unpack_align_msb << 18) |
295 ((fmt->bpp - 1) << 9) |
296 ((fmt->frame_format & 3) << 30);
297
298 if (flags & SDE_ROT_FLAG_ROT_90)
299 src_format |= BIT(11); /* ROT90 */
300
301 if (sde_mdp_is_ubwc_format(fmt))
302 opmode |= BIT(0); /* BWC_DEC_EN */
303
304 /* if this is YUV pixel format, enable CSC */
305 if (sde_mdp_is_yuv_format(fmt))
306 src_format |= BIT(15); /* SRC_COLOR_SPACE */
307
308 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
309 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
310
311 /* SRC_FORMAT */
312 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
313
314 /* setup source unpack pattern */
315 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
316 (fmt->element[1] << 8) | (fmt->element[0] << 0);
317
318 /* SRC_UNPACK_PATTERN */
319 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
320
321 /* setup source op mode */
322 if (flags & SDE_ROT_FLAG_FLIP_LR)
323 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
324 if (flags & SDE_ROT_FLAG_FLIP_UD)
325 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
326 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
327
328 /* SRC_OP_MODE */
329 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
330
331 /* setup source fetch config, TP10 uses different block size */
332 if (sde_mdp_is_tp10_format(fmt))
333 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
334 else
335 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
336 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
337 fetch_blocksize |
338 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
339 ((rot->highest_bank & 0x3) << 18));
340
341 /* setup source buffer plane security status */
342 if (flags & SDE_ROT_FLAG_SECURE_OVERLAY_SESSION) {
343 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
344 ctx->is_secure = true;
345 }
346
347 /* Update command queue write ptr */
348 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
349}
350
351/*
352 * sde_hw_rotator_setup_wbengine - setup writeback engine
353 * @ctx: Pointer to rotator context
354 * @queue_id: Priority queue identifier
355 * @cfg: Writeback configuration
356 * @flags: Control flag
357 */
358static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
359 enum sde_rot_queue_prio queue_id,
360 struct sde_hw_rot_wb_cfg *cfg,
361 u32 flags)
362{
363 struct sde_mdp_format_params *fmt;
364 u32 *wrptr;
365 u32 pack = 0;
366 u32 dst_format = 0;
367 int i;
368
369 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
370
371 fmt = cfg->fmt;
372
373 /* setup WB DST format */
374 dst_format |= (fmt->chroma_sample << 23) |
375 (fmt->fetch_planes << 19) |
376 (fmt->bits[C3_ALPHA] << 6) |
377 (fmt->bits[C2_R_Cr] << 4) |
378 (fmt->bits[C1_B_Cb] << 2) |
379 (fmt->bits[C0_G_Y] << 0);
380
381 /* alpha control */
382 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
383 dst_format |= BIT(8);
384 if (!fmt->alpha_enable) {
385 dst_format |= BIT(14);
386 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
387 }
388 }
389
390 dst_format |= ((fmt->unpack_count - 1) << 12) |
391 (fmt->unpack_tight << 17) |
392 (fmt->unpack_align_msb << 18) |
393 ((fmt->bpp - 1) << 9) |
394 ((fmt->frame_format & 3) << 30);
395
396 if (sde_mdp_is_yuv_format(fmt))
397 dst_format |= BIT(15);
398
399 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
400 dst_format |= BIT(21); /* PACK_DX_FORMAT */
401
402 /*
403 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
404 */
405 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
406
407 /* DST_FORMAT */
408 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
409
410 /* DST_OP_MODE */
411 if (sde_mdp_is_ubwc_format(fmt))
412 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
413 else
414 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
415
416 /* DST_PACK_PATTERN */
417 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
418 (fmt->element[1] << 8) | (fmt->element[0] << 0);
419 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
420
421 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
422 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
423 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
424 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
425 (cfg->dst_plane.ystride[1] << 16));
426 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
427 (cfg->dst_plane.ystride[3] << 16));
428
429 /* setup WB out image size and ROI */
430 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
431 cfg->img_width | (cfg->img_height << 16));
432 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
433 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
434 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
435 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
436
437 /*
438 * setup Downscale factor
439 */
440 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
441 cfg->v_downscale_factor |
442 (cfg->h_downscale_factor << 16));
443
444 /* write config setup for bank configration */
445 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
446 (ctx->rot->highest_bank & 0x3) << 8);
447
448 if (flags & SDE_ROT_FLAG_ROT_90)
449 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x3);
450 else
451 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x1);
452
453 /* Update command queue write ptr */
454 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
455}
456
457/*
458 * sde_hw_rotator_start_no_regdma - start non-regdma operation
459 * @ctx: Pointer to rotator context
460 * @queue_id: Priority queue identifier
461 */
462static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
463 enum sde_rot_queue_prio queue_id)
464{
465 struct sde_hw_rotator *rot = ctx->rot;
466 u32 *wrptr;
467 u32 *rdptr;
468 u8 *addr;
469 u32 mask;
470 u32 blksize;
471
472 rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
473 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
474
475 if (rot->irq_num >= 0) {
476 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
477 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
478 reinit_completion(&ctx->rot_comp);
479 enable_irq(rot->irq_num);
480 }
481
482 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
483
484 /* Update command queue write ptr */
485 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
486
487 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
488 /* Write all command stream to Rotator blocks */
489 /* Rotator will start right away after command stream finish writing */
490 while (rdptr < wrptr) {
491 u32 op = REGDMA_OP_MASK & *rdptr;
492
493 switch (op) {
494 case REGDMA_OP_NOP:
495 SDEROT_DBG("NOP\n");
496 rdptr++;
497 break;
498 case REGDMA_OP_REGWRITE:
499 SDEROT_DBG("REGW %6.6x %8.8x\n",
500 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
501 rdptr[1]);
502 addr = rot->mdss_base +
503 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
504 writel_relaxed(*rdptr++, addr);
505 break;
506 case REGDMA_OP_REGMODIFY:
507 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
508 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
509 rdptr[1], rdptr[2]);
510 addr = rot->mdss_base +
511 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
512 mask = *rdptr++;
513 writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
514 addr);
515 break;
516 case REGDMA_OP_BLKWRITE_SINGLE:
517 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
518 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
519 rdptr[1]);
520 addr = rot->mdss_base +
521 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
522 blksize = *rdptr++;
523 while (blksize--) {
524 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
525 writel_relaxed(*rdptr++, addr);
526 }
527 break;
528 case REGDMA_OP_BLKWRITE_INC:
529 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
530 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
531 rdptr[1]);
532 addr = rot->mdss_base +
533 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
534 blksize = *rdptr++;
535 while (blksize--) {
536 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
537 writel_relaxed(*rdptr++, addr);
538 addr += 4;
539 }
540 break;
541 default:
542 /* Other not supported OP mode
543 * Skip data for now for unregonized OP mode
544 */
545 SDEROT_DBG("UNDEFINED\n");
546 rdptr++;
547 break;
548 }
549 }
550 SDEROT_DBG("END %d\n", ctx->timestamp);
551
552 return ctx->timestamp;
553}
554
555/*
556 * sde_hw_rotator_start_regdma - start regdma operation
557 * @ctx: Pointer to rotator context
558 * @queue_id: Priority queue identifier
559 */
560static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
561 enum sde_rot_queue_prio queue_id)
562{
563 struct sde_hw_rotator *rot = ctx->rot;
564 u32 *wrptr;
565 u32 regdmaSlot;
566 u32 offset;
567 long length;
568 long ts_length;
569 u32 enableInt;
570 u32 swts = 0;
571 u32 mask = 0;
572
573 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
574
575 if (rot->irq_num >= 0)
576 reinit_completion(&ctx->regdma_comp);
577
578 /* enable IRQ for first regdma submission from idle */
579 if (atomic_read(&rot->regdma_submit_count) ==
580 atomic_read(&rot->regdma_done_count)) {
581 SDEROT_DBG("Enable IRQ! regdma submitcnt==donecnt -> %d\n",
582 atomic_read(&rot->regdma_submit_count));
583 enable_irq(rot->irq_num);
584 }
585
586 /*
587 * Last ROT command must be ROT_START before REGDMA start
588 */
589 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
590 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
591
592 /*
593 * Start REGDMA with command offset and size
594 */
595 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
596 length = ((long)wrptr - (long)ctx->regdma_base) / 4;
597 offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
598 REGDMA_RAM_REGDMA_CMD_RAM));
599 enableInt = ((ctx->timestamp & 1) + 1) << 30;
600
601 SDEROT_DBG(
602 "regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
603 queue_id, regdmaSlot, enableInt, length, offset,
604 ctx->timestamp);
605
606 /* ensure the command packet is issued before the submit command */
607 wmb();
608
609 /* REGDMA submission for current context */
610 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
611 SDE_ROTREG_WRITE(rot->mdss_base,
612 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
613 (length << 14) | offset);
614 swts = ctx->timestamp;
615 mask = ~SDE_REGDMA_SWTS_MASK;
616 } else {
617 SDE_ROTREG_WRITE(rot->mdss_base,
618 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
619 (length << 14) | offset);
620 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
621 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
622 }
623
624 /* Write timestamp after previous rotator job finished */
625 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
626 offset += length;
627 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
628 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
629
630 /* ensure command packet is issue before the submit command */
631 wmb();
632
633 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
634 SDE_ROTREG_WRITE(rot->mdss_base,
635 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
636 enableInt | (ts_length << 14) | offset);
637 } else {
638 SDE_ROTREG_WRITE(rot->mdss_base,
639 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
640 enableInt | (ts_length << 14) | offset);
641 }
642
643 /* Update REGDMA submit count */
644 atomic_inc(&rot->regdma_submit_count);
645
646 /* Update command queue write ptr */
647 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
648
649 return ctx->timestamp;
650}
651
652/*
653 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
654 * @ctx: Pointer to rotator context
655 * @queue_id: Priority queue identifier
656 * @flags: Option flag
657 */
658static u32 sde_hw_rotator_wait_done_no_regdma(
659 struct sde_hw_rotator_context *ctx,
660 enum sde_rot_queue_prio queue_id, u32 flag)
661{
662 struct sde_hw_rotator *rot = ctx->rot;
663 int rc = 0;
664 u32 sts = 0;
665 u32 status;
666 unsigned long flags;
667
668 if (rot->irq_num >= 0) {
669 SDEROT_DBG("Wait for Rotator completion\n");
670 rc = wait_for_completion_timeout(&ctx->rot_comp,
671 KOFF_TIMEOUT);
672
673 spin_lock_irqsave(&rot->rotisr_lock, flags);
674 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
675 if (rc == 0) {
676 /*
677 * Timeout, there might be error,
678 * or rotator still busy
679 */
680 if (status & ROT_BUSY_BIT)
681 SDEROT_ERR(
682 "Timeout waiting for rotator done\n");
683 else if (status & ROT_ERROR_BIT)
684 SDEROT_ERR(
685 "Rotator report error status\n");
686 else
687 SDEROT_WARN(
688 "Timeout waiting, but rotator job is done!!\n");
689
690 disable_irq_nosync(rot->irq_num);
691 }
692 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
693 } else {
694 int cnt = 200;
695
696 do {
697 udelay(500);
698 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
699 cnt--;
700 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
701 && ((status & ROT_ERROR_BIT) == 0));
702
703 if (status & ROT_ERROR_BIT)
704 SDEROT_ERR("Rotator error\n");
705 else if (status & ROT_BUSY_BIT)
706 SDEROT_ERR("Rotator busy\n");
707
708 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
709 ROT_DONE_CLEAR);
710 }
711
712 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
713
714 return sts;
715}
716
717/*
718 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
719 * @ctx: Pointer to rotator context
720 * @queue_id: Priority queue identifier
721 * @flags: Option flag
722 */
723static u32 sde_hw_rotator_wait_done_regdma(
724 struct sde_hw_rotator_context *ctx,
725 enum sde_rot_queue_prio queue_id, u32 flag)
726{
727 struct sde_hw_rotator *rot = ctx->rot;
728 int rc = 0;
729 u32 status;
730 u32 last_isr;
731 u32 last_ts;
732 u32 int_id;
733 u32 sts = 0;
734 u32 d_count;
735 unsigned long flags;
736
737 if (rot->irq_num >= 0) {
738 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
739 ctx, ctx->timestamp);
740 rc = wait_for_completion_timeout(&ctx->regdma_comp,
741 KOFF_TIMEOUT);
742
743 spin_lock_irqsave(&rot->rotisr_lock, flags);
744
745 last_isr = ctx->last_regdma_isr_status;
746 last_ts = ctx->last_regdma_timestamp;
747 status = last_isr & REGDMA_INT_MASK;
748 int_id = last_ts & 1;
749 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
750 status, int_id, last_ts);
751
752 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
753 SDEROT_ERR(
754 "Timeout wait for regdma interrupt status, ts:%X\n",
755 ctx->timestamp);
756
757 if (status & REGDMA_WATCHDOG_INT)
758 SDEROT_ERR("REGDMA watchdog interrupt\n");
759 else if (status & REGDMA_INVALID_DESCRIPTOR)
760 SDEROT_ERR("REGDMA invalid descriptor\n");
761 else if (status & REGDMA_INCOMPLETE_CMD)
762 SDEROT_ERR("REGDMA incomplete command\n");
763 else if (status & REGDMA_INVALID_CMD)
764 SDEROT_ERR("REGDMA invalid command\n");
765
766 status = ROT_ERROR_BIT;
767 } else if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
768 /* Got to match exactly with interrupt ID */
769 int_id = REGDMA_QUEUE0_INT0 << int_id;
770
771 SDE_ROTREG_WRITE(rot->mdss_base,
772 REGDMA_CSR_REGDMA_INT_CLEAR,
773 int_id);
774
775 status = 0;
776 } else if (queue_id == ROT_QUEUE_LOW_PRIORITY) {
777 /* Matching interrupt ID */
778 int_id = REGDMA_QUEUE1_INT0 << int_id;
779
780 SDE_ROTREG_WRITE(rot->mdss_base,
781 REGDMA_CSR_REGDMA_INT_CLEAR,
782 int_id);
783
784 status = 0;
785 }
786
787 /* regardless success or timeout, update done count */
788 d_count = atomic_inc_return(&rot->regdma_done_count);
789
790 /* disable IRQ if no more regdma submission in queue */
791 if (d_count == atomic_read(&rot->regdma_submit_count)) {
792 SDEROT_DBG(
793 "Disable IRQ!! regdma donecnt==submitcnt -> %d\n",
794 d_count);
795 disable_irq_nosync(rot->irq_num);
796 }
797
798 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
799 } else {
800 int cnt = 200;
801
802 do {
803 udelay(500);
804 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
805 cnt--;
806 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
807 && ((status & ROT_ERROR_BIT) == 0));
808
809 if (status & ROT_ERROR_BIT)
810 SDEROT_ERR("Rotator error\n");
811 else if (status & ROT_BUSY_BIT)
812 SDEROT_ERR("Rotator busy\n");
813
814 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
815 0xFFFF);
816 }
817
818 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
819
820 return sts;
821}
822
823/*
824 * setup_rotator_ops - setup callback functions for the low-level HAL
825 * @ops: Pointer to low-level ops callback
826 * @mode: Operation mode (non-regdma or regdma)
827 */
828static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
829 enum sde_rotator_regdma_mode mode)
830{
831 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
832 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
833 if (mode == ROT_REGDMA_ON) {
834 ops->start_rotator = sde_hw_rotator_start_regdma;
835 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
836 } else {
837 ops->start_rotator = sde_hw_rotator_start_no_regdma;
838 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
839 }
840}
841
842/*
843 * sde_hw_rotator_swts_create - create software timestamp buffer
844 * @rot: Pointer to rotator hw
845 *
846 * This buffer is used by regdma to keep track of last completed command.
847 */
848static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
849{
850 int rc = 0;
851 struct ion_handle *handle;
852 struct sde_mdp_img_data *data;
853 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
854
855 rot->iclient = msm_ion_client_create(rot->pdev->name);
856 if (IS_ERR_OR_NULL(rot->iclient)) {
857 SDEROT_ERR("msm_ion_client_create() return error (%p)\n",
858 rot->iclient);
859 return -EINVAL;
860 }
861
862 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
863 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
864 if (IS_ERR_OR_NULL(handle)) {
865 SDEROT_ERR("ion memory allocation failed\n");
866 return -ENOMEM;
867 }
868
869 data = &rot->swts_buf;
870 data->len = bufsize;
871 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
872 if (IS_ERR(data->srcp_dma_buf)) {
873 SDEROT_ERR("ion_dma_buf setup failed\n");
874 rc = -ENOMEM;
875 goto imap_err;
876 }
877
878 sde_smmu_ctrl(1);
879
880 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
881 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
882 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
883 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
884 rc = -ENOMEM;
885 goto err_put;
886 }
887
888 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
889 DMA_BIDIRECTIONAL);
890 if (IS_ERR_OR_NULL(data->srcp_table)) {
891 SDEROT_ERR("dma_buf_map_attachment error\n");
892 rc = -ENOMEM;
893 goto err_detach;
894 }
895
896 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
897 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
898 &data->len, DMA_BIDIRECTIONAL);
899 if (IS_ERR_VALUE(rc)) {
900 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
901 goto err_unmap;
902 }
903
904 dma_buf_begin_cpu_access(data->srcp_dma_buf, 0, data->len,
905 DMA_FROM_DEVICE);
906 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
907 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
908 SDEROT_ERR("ion kernel memory mapping failed\n");
909 rc = IS_ERR(rot->swts_buffer);
910 goto kmap_err;
911 }
912
913 data->mapped = true;
914 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
915 data->len, rot->swts_buffer);
916
917 ion_free(rot->iclient, handle);
918
919 sde_smmu_ctrl(0);
920
921 return rc;
922kmap_err:
923 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
924 DMA_FROM_DEVICE, data->srcp_dma_buf);
925err_unmap:
926 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
927 DMA_FROM_DEVICE);
928err_detach:
929 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
930err_put:
931 dma_buf_put(data->srcp_dma_buf);
932 data->srcp_dma_buf = NULL;
933imap_err:
934 ion_free(rot->iclient, handle);
935
936 return rc;
937}
938
939/*
940 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
941 * @rot: Pointer to rotator hw
942 */
943static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
944{
945 struct sde_mdp_img_data *data;
946
947 data = &rot->swts_buf;
948
949 dma_buf_end_cpu_access(data->srcp_dma_buf, 0, data->len,
950 DMA_FROM_DEVICE);
951 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
952
953 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
954 DMA_FROM_DEVICE, data->srcp_dma_buf);
955 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
956 DMA_FROM_DEVICE);
957 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
958 dma_buf_put(data->srcp_dma_buf);
959 data->srcp_dma_buf = NULL;
960}
961
962/*
963 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
964 * @mgr: Pointer to rotator manager
965 */
966static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
967{
968 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
969 struct sde_hw_rotator *rot;
970
971 if (!mgr || !mgr->pdev || !mgr->hw_data) {
972 SDEROT_ERR("null parameters\n");
973 return;
974 }
975
976 rot = mgr->hw_data;
977 if (rot->irq_num >= 0)
978 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
979
980 if (rot->mode == ROT_REGDMA_ON)
981 sde_hw_rotator_swtc_destroy(rot);
982
983 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
984 mgr->hw_data = NULL;
985}
986
987/*
988 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
989 * @mgr: Pointer to rotator manager
990 * @pipe_id: pipe identifier (not used)
991 * @wb_id: writeback identifier/priority queue identifier
992 *
993 * This function allocates a new hw rotator resource for the given priority.
994 */
995static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
996 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
997{
998 struct sde_hw_rotator_resource_info *resinfo;
999
1000 if (!mgr || !mgr->hw_data) {
1001 SDEROT_ERR("null parameters\n");
1002 return NULL;
1003 }
1004
1005 /*
1006 * Allocate rotator resource info. Each allocation is per
1007 * HW priority queue
1008 */
1009 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
1010 if (!resinfo) {
1011 SDEROT_ERR("Failed allocation HW rotator resource info\n");
1012 return NULL;
1013 }
1014
1015 resinfo->rot = mgr->hw_data;
1016 resinfo->hw.wb_id = wb_id;
1017 atomic_set(&resinfo->hw.num_active, 0);
1018 init_waitqueue_head(&resinfo->hw.wait_queue);
1019
1020 /* For non-regdma, only support one active session */
1021 if (resinfo->rot->mode == ROT_REGDMA_OFF)
1022 resinfo->hw.max_active = 1;
1023 else {
1024 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
1025
1026 if (resinfo->rot->iclient == NULL)
1027 sde_hw_rotator_swts_create(resinfo->rot);
1028 }
1029
1030 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
1031 resinfo, wb_id);
1032
1033 return &resinfo->hw;
1034}
1035
1036/*
1037 * sde_hw_rotator_free_ext - free the given rotator resource
1038 * @mgr: Pointer to rotator manager
1039 * @hw: Pointer to rotator resource
1040 */
1041static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
1042 struct sde_rot_hw_resource *hw)
1043{
1044 struct sde_hw_rotator_resource_info *resinfo;
1045
1046 if (!mgr || !mgr->hw_data)
1047 return;
1048
1049 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1050
1051 SDEROT_DBG(
1052 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
1053 resinfo, hw->wb_id, atomic_read(&hw->num_active),
1054 hw->pending_count);
1055
1056 devm_kfree(&mgr->pdev->dev, resinfo);
1057}
1058
1059/*
1060 * sde_hw_rotator_alloc_rotctx - allocate rotator context
1061 * @rot: Pointer to rotator hw
1062 * @hw: Pointer to rotator resource
1063 * @session_id: Session identifier of this context
1064 *
1065 * This function allocates a new rotator context for the given session id.
1066 */
1067static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
1068 struct sde_hw_rotator *rot,
1069 struct sde_rot_hw_resource *hw,
1070 u32 session_id)
1071{
1072 struct sde_hw_rotator_context *ctx;
1073
1074 /* Allocate rotator context */
1075 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1076 if (!ctx) {
1077 SDEROT_ERR("Failed allocation HW rotator context\n");
1078 return NULL;
1079 }
1080
1081 ctx->rot = rot;
1082 ctx->q_id = hw->wb_id;
1083 ctx->session_id = session_id;
1084 ctx->hwres = hw;
1085 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
1086 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
1087 ctx->is_secure = false;
1088
1089 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
1090 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
1091 ctx->regdma_wrptr = ctx->regdma_base;
1092 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
1093 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
1094 sde_hw_rotator_get_regdma_ctxidx(ctx));
1095
1096 init_completion(&ctx->rot_comp);
1097 init_completion(&ctx->regdma_comp);
1098
1099 /* Store rotator context for lookup purpose */
1100 sde_hw_rotator_put_ctx(ctx);
1101
1102 SDEROT_DBG(
1103 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1104 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1105 ctx->q_id, ctx->timestamp,
1106 atomic_read(&ctx->hwres->num_active));
1107
1108 return ctx;
1109}
1110
1111/*
1112 * sde_hw_rotator_free_rotctx - free the given rotator context
1113 * @rot: Pointer to rotator hw
1114 * @ctx: Pointer to rotator context
1115 */
1116static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
1117 struct sde_hw_rotator_context *ctx)
1118{
1119 if (!rot || !ctx)
1120 return;
1121
1122 SDEROT_DBG(
1123 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1124 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1125 ctx->q_id, ctx->timestamp,
1126 atomic_read(&ctx->hwres->num_active));
1127
1128 rot->rotCtx[ctx->q_id][sde_hw_rotator_get_regdma_ctxidx(ctx)] = NULL;
1129
1130 devm_kfree(&rot->pdev->dev, ctx);
1131}
1132
1133/*
1134 * sde_hw_rotator_config - configure hw for the given rotation entry
1135 * @hw: Pointer to rotator resource
1136 * @entry: Pointer to rotation entry
1137 *
1138 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
1139 * based on the given rotation entry.
1140 */
1141static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
1142 struct sde_rot_entry *entry)
1143{
1144 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1145 struct sde_hw_rotator *rot;
1146 struct sde_hw_rotator_resource_info *resinfo;
1147 struct sde_hw_rotator_context *ctx;
1148 struct sde_hw_rot_sspp_cfg sspp_cfg;
1149 struct sde_hw_rot_wb_cfg wb_cfg;
1150 u32 danger_lut = 0; /* applicable for realtime client only */
1151 u32 safe_lut = 0; /* applicable for realtime client only */
1152 u32 flags = 0;
1153 struct sde_rotation_item *item;
1154
1155 if (!hw || !entry) {
1156 SDEROT_ERR("null hw resource/entry\n");
1157 return -EINVAL;
1158 }
1159
1160 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1161 rot = resinfo->rot;
1162 item = &entry->item;
1163
1164 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id);
1165 if (!ctx) {
1166 SDEROT_ERR("Failed allocating rotator context!!\n");
1167 return -EINVAL;
1168 }
1169
1170 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
1171 SDE_ROT_FLAG_FLIP_LR : 0;
1172 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
1173 SDE_ROT_FLAG_FLIP_UD : 0;
1174 flags |= (item->flags & SDE_ROTATION_90) ?
1175 SDE_ROT_FLAG_ROT_90 : 0;
1176 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
1177 SDE_ROT_FLAG_DEINTERLACE : 0;
1178 flags |= (item->flags & SDE_ROTATION_SECURE) ?
1179 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
1180
1181 sspp_cfg.img_width = item->input.width;
1182 sspp_cfg.img_height = item->input.height;
1183 sspp_cfg.fmt = sde_get_format_params(item->input.format);
1184 if (!sspp_cfg.fmt) {
1185 SDEROT_ERR("null format\n");
1186 return -EINVAL;
1187 }
1188 sspp_cfg.src_rect = &item->src_rect;
1189 sspp_cfg.data = &entry->src_buf;
1190 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
1191 item->input.height, &sspp_cfg.src_plane,
1192 0, /* No bwc_mode */
1193 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
1194 true : false);
1195
1196 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
1197 &sspp_cfg, danger_lut, safe_lut, flags);
1198
1199 wb_cfg.img_width = item->output.width;
1200 wb_cfg.img_height = item->output.height;
1201 wb_cfg.fmt = sde_get_format_params(item->output.format);
1202 wb_cfg.dst_rect = &item->dst_rect;
1203 wb_cfg.data = &entry->dst_buf;
1204 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
1205 item->output.height, &wb_cfg.dst_plane,
1206 0, /* No bwc_mode */
1207 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
1208
1209 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
1210 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
1211
1212 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
1213
1214 /* setup VA mapping for debugfs */
1215 if (rot->dbgmem) {
1216 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
1217 &item->input,
1218 &entry->src_buf);
1219
1220 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
1221 &item->output,
1222 &entry->dst_buf);
1223 }
1224
1225 if (mdata->default_ot_rd_limit) {
1226 struct sde_mdp_set_ot_params ot_params;
1227
1228 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1229 ot_params.xin_id = XIN_SSPP;
1230 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001231 ot_params.width = entry->perf->config.input.width;
1232 ot_params.height = entry->perf->config.input.height;
1233 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001234 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
1235 ot_params.reg_off_mdp_clk_ctrl =
1236 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1237 ot_params.bit_off_mdp_clk_ctrl =
1238 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001239 ot_params.fmt = entry->perf->config.input.format;
Alan Kwong9487de22016-01-16 22:06:36 -05001240 sde_mdp_set_ot_limit(&ot_params);
1241 }
1242
1243 if (mdata->default_ot_wr_limit) {
1244 struct sde_mdp_set_ot_params ot_params;
1245
1246 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1247 ot_params.xin_id = XIN_WRITEBACK;
1248 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001249 ot_params.width = entry->perf->config.input.width;
1250 ot_params.height = entry->perf->config.input.height;
1251 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001252 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
1253 ot_params.reg_off_mdp_clk_ctrl =
1254 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1255 ot_params.bit_off_mdp_clk_ctrl =
1256 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001257 ot_params.fmt = entry->perf->config.input.format;
Alan Kwong9487de22016-01-16 22:06:36 -05001258 sde_mdp_set_ot_limit(&ot_params);
1259 }
1260
1261 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
1262 u32 qos_lut = 0; /* low priority for nrt read client */
1263
1264 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
1265 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
1266
1267 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
1268 }
1269
1270 if (mdata->npriority_lvl > 0) {
1271 u32 mask, reg_val, i, vbif_qos;
1272
1273 for (i = 0; i < mdata->npriority_lvl; i++) {
1274 reg_val = SDE_VBIF_READ(mdata,
1275 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
1276 mask = 0x3 << (XIN_SSPP * 2);
1277 reg_val &= ~(mask);
1278 vbif_qos = mdata->vbif_nrt_qos[i];
1279 reg_val |= vbif_qos << (XIN_SSPP * 2);
1280 /* ensure write is issued after the read operation */
1281 mb();
1282 SDE_VBIF_WRITE(mdata,
1283 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
1284 reg_val);
1285 }
1286 }
1287
1288 /* Enable write gather for writeback to remove write gaps, which
1289 * may hang AXI/BIMC/SDE.
1290 */
1291 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
1292 BIT(XIN_WRITEBACK));
1293
1294 return 0;
1295}
1296
1297/*
1298 * sde_hw_rotator_kickoff - kickoff processing on the given entry
1299 * @hw: Pointer to rotator resource
1300 * @entry: Pointer to rotation entry
1301 */
1302static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
1303 struct sde_rot_entry *entry)
1304{
1305 struct sde_hw_rotator *rot;
1306 struct sde_hw_rotator_resource_info *resinfo;
1307 struct sde_hw_rotator_context *ctx;
1308 int ret = 0;
1309
1310 if (!hw || !entry) {
1311 SDEROT_ERR("null hw resource/entry\n");
1312 return -EINVAL;
1313 }
1314
1315 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1316 rot = resinfo->rot;
1317
1318 /* Lookup rotator context from session-id */
1319 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1320 if (!ctx) {
1321 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1322 entry->item.session_id);
1323 }
1324 WARN_ON(ctx == NULL);
1325
1326 ret = sde_smmu_ctrl(1);
1327 if (IS_ERR_VALUE(ret)) {
1328 SDEROT_ERR("IOMMU attach failed\n");
1329 return ret;
1330 }
1331
1332 rot->ops.start_rotator(ctx, ctx->q_id);
1333
1334 return 0;
1335}
1336
1337/*
1338 * sde_hw_rotator_wait4done - wait for completion notification
1339 * @hw: Pointer to rotator resource
1340 * @entry: Pointer to rotation entry
1341 *
1342 * This function blocks until the given entry is complete, error
1343 * is detected, or timeout.
1344 */
1345static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
1346 struct sde_rot_entry *entry)
1347{
1348 struct sde_hw_rotator *rot;
1349 struct sde_hw_rotator_resource_info *resinfo;
1350 struct sde_hw_rotator_context *ctx;
1351 int ret;
1352
1353 if (!hw || !entry) {
1354 SDEROT_ERR("null hw resource/entry\n");
1355 return -EINVAL;
1356 }
1357
1358 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1359 rot = resinfo->rot;
1360
1361 /* Lookup rotator context from session-id */
1362 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1363 if (!ctx) {
1364 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1365 entry->item.session_id);
1366 }
1367 WARN_ON(ctx == NULL);
1368
1369 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
1370
1371 sde_smmu_ctrl(0);
1372
1373 if (rot->dbgmem) {
1374 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
1375 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
1376 }
1377
1378 /* Current rotator context job is finished, time to free up*/
1379 sde_hw_rotator_free_rotctx(rot, ctx);
1380
1381 return ret;
1382}
1383
1384/*
1385 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
1386 * @rot: Pointer to hw rotator
1387 *
1388 * This function initializes feature and/or capability bitmask based on
1389 * h/w version read from the device.
1390 */
1391static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
1392{
1393 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1394 u32 hw_version;
1395
1396 if (!mdata) {
1397 SDEROT_ERR("null rotator data\n");
1398 return -EINVAL;
1399 }
1400
1401 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
1402 SDEROT_DBG("hw version %8.8x\n", hw_version);
1403
1404 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
1405 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
1406 clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
1407 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
1408 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
1409 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
1410
1411 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
1412
1413 return 0;
1414}
1415
1416/*
1417 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
1418 * @irq: Interrupt number
1419 * @ptr: Pointer to private handle provided during registration
1420 *
1421 * This function services rotator interrupt and wakes up waiting client
1422 * with pending rotation requests already submitted to h/w.
1423 */
1424static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
1425{
1426 struct sde_hw_rotator *rot = ptr;
1427 struct sde_hw_rotator_context *ctx;
1428 irqreturn_t ret = IRQ_NONE;
1429 u32 isr;
1430
1431 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
1432
1433 SDEROT_DBG("intr_status = %8.8x\n", isr);
1434
1435 if (isr & ROT_DONE_MASK) {
1436 if (rot->irq_num >= 0)
1437 disable_irq_nosync(rot->irq_num);
1438 SDEROT_DBG("Notify rotator complete\n");
1439
1440 /* Normal rotator only 1 session, no need to lookup */
1441 ctx = rot->rotCtx[0][0];
1442 WARN_ON(ctx == NULL);
1443 complete_all(&ctx->rot_comp);
1444
1445 spin_lock(&rot->rotisr_lock);
1446 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1447 ROT_DONE_CLEAR);
1448 spin_unlock(&rot->rotisr_lock);
1449 ret = IRQ_HANDLED;
1450 }
1451
1452 return ret;
1453}
1454
1455/*
1456 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
1457 * @irq: Interrupt number
1458 * @ptr: Pointer to private handle provided during registration
1459 *
1460 * This function services rotator interrupt, decoding the source of
1461 * events (high/low priority queue), and wakes up all waiting clients
1462 * with pending rotation requests already submitted to h/w.
1463 */
1464static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
1465{
1466 struct sde_hw_rotator *rot = ptr;
1467 struct sde_hw_rotator_context *ctx;
1468 irqreturn_t ret = IRQ_NONE;
1469 u32 isr;
1470 u32 ts;
1471 u32 q_id;
1472
1473 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
1474 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1475
1476 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
1477
1478 /* Any REGDMA status, including error and watchdog timer, should
1479 * trigger and wake up waiting thread
1480 */
1481 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
1482 spin_lock(&rot->rotisr_lock);
1483
1484 /*
1485 * Obtain rotator context based on timestamp from regdma
1486 * and low/high interrupt status
1487 */
1488 if (isr & REGDMA_INT_HIGH_MASK) {
1489 q_id = ROT_QUEUE_HIGH_PRIORITY;
1490 ts = ts & SDE_REGDMA_SWTS_MASK;
1491 } else if (isr & REGDMA_INT_LOW_MASK) {
1492 q_id = ROT_QUEUE_LOW_PRIORITY;
1493 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
1494 SDE_REGDMA_SWTS_MASK;
1495 }
1496
1497 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
1498 WARN_ON(ctx == NULL);
1499
1500 /*
1501 * Wake up all waiting context from the current and previous
1502 * SW Timestamp.
1503 */
1504 do {
1505 ctx->last_regdma_isr_status = isr;
1506 ctx->last_regdma_timestamp = ts;
1507 SDEROT_DBG(
1508 "regdma complete: ctx:%p, ts:%X, dcount:%X\n",
1509 ctx, ts, atomic_read(&rot->regdma_done_count));
1510 complete_all(&ctx->regdma_comp);
1511
1512 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
1513 ctx = rot->rotCtx[q_id]
1514 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
1515 } while (ctx && (ctx->last_regdma_timestamp == 0));
1516
1517 /*
1518 * Clear corresponding regdma interrupt because it is a level
1519 * interrupt
1520 */
1521 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
1522 isr);
1523
1524 spin_unlock(&rot->rotisr_lock);
1525 ret = IRQ_HANDLED;
1526 } else if (isr & REGDMA_INT_ERR_MASK) {
1527 /*
1528 * For REGDMA Err, we save the isr info and wake up
1529 * all waiting contexts
1530 */
1531 int i, j;
1532
1533 SDEROT_ERR(
1534 "regdma err isr:%X, wake up all waiting contexts\n",
1535 isr);
1536
1537 spin_lock(&rot->rotisr_lock);
1538
1539 for (i = 0; i < ROT_QUEUE_MAX; i++) {
1540 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
1541 ctx = rot->rotCtx[i][j];
1542 if (ctx && ctx->last_regdma_isr_status == 0) {
1543 ctx->last_regdma_isr_status = isr;
1544 ctx->last_regdma_timestamp = ts;
1545 complete_all(&ctx->regdma_comp);
1546 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
1547 i, j, ctx);
1548 }
1549 }
1550 }
1551
1552 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
1553 isr);
1554
1555 spin_unlock(&rot->rotisr_lock);
1556 ret = IRQ_HANDLED;
1557 }
1558
1559 return ret;
1560}
1561
1562/*
1563 * sde_hw_rotator_validate_entry - validate rotation entry
1564 * @mgr: Pointer to rotator manager
1565 * @entry: Pointer to rotation entry
1566 *
1567 * This function validates the given rotation entry and provides possible
1568 * fixup (future improvement) if available. This function returns 0 if
1569 * the entry is valid, and returns error code otherwise.
1570 */
1571static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
1572 struct sde_rot_entry *entry)
1573{
1574 int ret = 0;
1575 u16 src_w, src_h, dst_w, dst_h;
1576 struct sde_rotation_item *item = &entry->item;
1577 struct sde_mdp_format_params *fmt;
1578
1579 src_w = item->src_rect.w;
1580 src_h = item->src_rect.h;
1581
1582 if (item->flags & SDE_ROTATION_90) {
1583 dst_w = item->dst_rect.h;
1584 dst_h = item->dst_rect.w;
1585 } else {
1586 dst_w = item->dst_rect.w;
1587 dst_h = item->dst_rect.h;
1588 }
1589
1590 entry->dnsc_factor_w = 0;
1591 entry->dnsc_factor_h = 0;
1592
1593 if ((src_w != dst_w) || (src_h != dst_h)) {
1594 if ((src_w % dst_w) || (src_h % dst_h)) {
1595 SDEROT_DBG("non integral scale not support\n");
1596 ret = -EINVAL;
1597 goto dnsc_err;
1598 }
1599 entry->dnsc_factor_w = src_w / dst_w;
1600 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
1601 (entry->dnsc_factor_w > 64)) {
1602 SDEROT_DBG("non power-of-2 w_scale not support\n");
1603 ret = -EINVAL;
1604 goto dnsc_err;
1605 }
1606 entry->dnsc_factor_h = src_h / dst_h;
1607 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
1608 (entry->dnsc_factor_h > 64)) {
1609 SDEROT_DBG("non power-of-2 h_scale not support\n");
1610 ret = -EINVAL;
1611 goto dnsc_err;
1612 }
1613 }
1614
1615 fmt = sde_get_format_params(item->output.format);
1616 /* Tiled format downscale support not applied to AYUV tiled */
1617 if (sde_mdp_is_tilea5x_format(fmt) && (entry->dnsc_factor_h > 4)) {
1618 SDEROT_DBG("max downscale for tiled format is 4\n");
1619 ret = -EINVAL;
1620 goto dnsc_err;
1621 }
1622 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 2)) {
1623 SDEROT_DBG("downscale with ubwc cannot be more than 2\n");
1624 ret = -EINVAL;
1625 }
1626
1627dnsc_err:
1628 /* Downscaler does not support asymmetrical dnsc */
1629 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
1630 SDEROT_DBG("asymmetric downscale not support\n");
1631 ret = -EINVAL;
1632 }
1633
1634 if (ret) {
1635 entry->dnsc_factor_w = 0;
1636 entry->dnsc_factor_h = 0;
1637 }
1638 return ret;
1639}
1640
1641/*
1642 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
1643 * @mgr: Pointer to rotator manager
1644 * @attr: Pointer to device attribute interface
1645 * @buf: Pointer to output buffer
1646 * @len: Length of output buffer
1647 */
1648static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
1649 struct device_attribute *attr, char *buf, ssize_t len)
1650{
1651 struct sde_hw_rotator *hw_data;
1652 int cnt = 0;
1653
1654 if (!mgr || !buf)
1655 return 0;
1656
1657 hw_data = mgr->hw_data;
1658
1659#define SPRINT(fmt, ...) \
1660 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
1661
1662 /* insert capabilities here */
1663
1664#undef SPRINT
1665 return cnt;
1666}
1667
1668/*
1669 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
1670 * @mgr: Pointer to rotator manager
1671 * @attr: Pointer to device attribute interface
1672 * @buf: Pointer to output buffer
1673 * @len: Length of output buffer
1674 */
1675static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
1676 struct device_attribute *attr, char *buf, ssize_t len)
1677{
1678 struct sde_hw_rotator *rot;
1679 struct sde_hw_rotator_context *ctx;
1680 int cnt = 0;
1681 int num_active = 0;
1682 int i, j;
1683
1684 if (!mgr || !buf) {
1685 SDEROT_ERR("null parameters\n");
1686 return 0;
1687 }
1688
1689 rot = mgr->hw_data;
1690
1691#define SPRINT(fmt, ...) \
1692 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
1693
1694 if (rot) {
1695 SPRINT("rot_mode=%d\n", rot->mode);
1696 SPRINT("irq_num=%d\n", rot->irq_num);
1697
1698 if (rot->mode == ROT_REGDMA_OFF) {
1699 SPRINT("max_active=1\n");
1700 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
1701 } else {
1702 for (i = 0; i < ROT_QUEUE_MAX; i++) {
1703 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
1704 j++) {
1705 ctx = rot->rotCtx[i][j];
1706
1707 if (ctx) {
1708 SPRINT(
1709 "rotCtx[%d][%d]:%p\n",
1710 i, j, ctx);
1711 ++num_active;
1712 }
1713 }
1714 }
1715
1716 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
1717 SPRINT("num_active=%d\n", num_active);
1718 }
1719 }
1720
1721#undef SPRINT
1722 return cnt;
1723}
1724
1725/*
1726 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
1727 * @hw_data: Pointer to rotator hw
1728 * @dev: Pointer to platform device
1729 */
1730static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
1731 struct platform_device *dev)
1732{
1733 int ret = 0;
1734 u32 data;
1735
1736 if (!hw_data || !dev)
1737 return -EINVAL;
1738
1739 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
1740 &data);
1741 if (ret) {
1742 SDEROT_DBG("default to regdma off\n");
1743 ret = 0;
1744 hw_data->mode = ROT_REGDMA_OFF;
1745 } else if (data < ROT_REGDMA_MAX) {
1746 SDEROT_DBG("set to regdma mode %d\n", data);
1747 hw_data->mode = data;
1748 } else {
1749 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
1750 hw_data->mode = ROT_REGDMA_OFF;
1751 }
1752
1753 ret = of_property_read_u32(dev->dev.of_node,
1754 "qcom,mdss-highest-bank-bit", &data);
1755 if (ret) {
1756 SDEROT_DBG("default to A5X bank\n");
1757 ret = 0;
1758 hw_data->highest_bank = 2;
1759 } else {
1760 SDEROT_DBG("set highest bank bit to %d\n", data);
1761 hw_data->highest_bank = data;
1762 }
1763
1764 return ret;
1765}
1766
1767/*
1768 * sde_rotator_r3_init - initialize the r3 module
1769 * @mgr: Pointer to rotator manager
1770 *
1771 * This function setup r3 callback functions, parses r3 specific
1772 * device tree settings, installs r3 specific interrupt handler,
1773 * as well as initializes r3 internal data structure.
1774 */
1775int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
1776{
1777 struct sde_hw_rotator *rot;
1778 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1779 int i;
1780 int ret;
1781
1782 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
1783 if (!rot)
1784 return -ENOMEM;
1785
1786 mgr->hw_data = rot;
1787 mgr->queue_count = ROT_QUEUE_MAX;
1788
1789 rot->mdss_base = mdata->sde_io.base;
1790 rot->pdev = mgr->pdev;
1791
1792 /* Assign ops */
1793 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
1794 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
1795 mgr->ops_hw_free = sde_hw_rotator_free_ext;
1796 mgr->ops_config_hw = sde_hw_rotator_config;
1797 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
1798 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
1799 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
1800 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
1801 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
1802 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
1803
1804 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
1805 if (ret)
1806 goto error_parse_dt;
1807
1808 rot->irq_num = platform_get_irq(mgr->pdev, 0);
1809 if (rot->irq_num < 0) {
1810 SDEROT_ERR("fail to get rotator irq\n");
1811 } else {
1812 if (rot->mode == ROT_REGDMA_OFF)
1813 ret = devm_request_threaded_irq(&mgr->pdev->dev,
1814 rot->irq_num,
1815 sde_hw_rotator_rotirq_handler,
1816 NULL, 0, "sde_rotator_r3", rot);
1817 else
1818 ret = devm_request_threaded_irq(&mgr->pdev->dev,
1819 rot->irq_num,
1820 sde_hw_rotator_regdmairq_handler,
1821 NULL, 0, "sde_rotator_r3", rot);
1822 if (ret) {
1823 SDEROT_ERR("fail to request irq r:%d\n", ret);
1824 rot->irq_num = -1;
1825 } else {
1826 disable_irq(rot->irq_num);
1827 }
1828 }
1829
1830 setup_rotator_ops(&rot->ops, rot->mode);
1831
1832 spin_lock_init(&rot->rotctx_lock);
1833 spin_lock_init(&rot->rotisr_lock);
1834
1835 /* REGDMA initialization */
1836 if (rot->mode == ROT_REGDMA_OFF) {
1837 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
1838 rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
1839 SDE_HW_ROT_REGDMA_SEG_SIZE * i];
1840 } else {
1841 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
1842 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
1843 (u32 *)(rot->mdss_base +
1844 REGDMA_RAM_REGDMA_CMD_RAM +
1845 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
1846
1847 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
1848 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
1849 (u32 *)(rot->mdss_base +
1850 REGDMA_RAM_REGDMA_CMD_RAM +
1851 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
1852 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
1853 }
1854
1855 atomic_set(&rot->timestamp[0], 0);
1856 atomic_set(&rot->timestamp[1], 0);
1857 atomic_set(&rot->regdma_submit_count, 0);
1858 atomic_set(&rot->regdma_done_count, 0);
1859
1860 ret = sde_rotator_hw_rev_init(rot);
1861 if (ret)
1862 goto error_hw_rev_init;
1863
1864 return 0;
1865error_hw_rev_init:
1866 if (rot->irq_num >= 0)
1867 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
1868 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
1869error_parse_dt:
1870 return ret;
1871}