blob: 6caf50cb50fb4ddcd15db4b6e0cb765a1ca4193d [file] [log] [blame]
Alan Kwong9487de22016-01-16 22:06:36 -05001/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
20#include <linux/sync.h>
21#include <linux/delay.h>
22#include <linux/debugfs.h>
23#include <linux/interrupt.h>
24#include <linux/dma-mapping.h>
25#include <linux/dma-buf.h>
26#include <linux/msm_ion.h>
Alan Kwong315cd772016-08-03 22:29:42 -040027#include <linux/clk/msm-clk.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
37
38/* XIN mapping */
39#define XIN_SSPP 0
40#define XIN_WRITEBACK 1
41
42/* wait for at most 2 vsync for lowest refresh rate (24hz) */
43#define KOFF_TIMEOUT msecs_to_jiffies(42 * 32)
44
45/* Macro for constructing the REGDMA command */
46#define SDE_REGDMA_WRITE(p, off, data) \
47 do { \
48 *p++ = REGDMA_OP_REGWRITE | \
49 ((off) & REGDMA_ADDR_OFFSET_MASK); \
50 *p++ = (data); \
51 } while (0)
52
53#define SDE_REGDMA_MODIFY(p, off, mask, data) \
54 do { \
55 *p++ = REGDMA_OP_REGMODIFY | \
56 ((off) & REGDMA_ADDR_OFFSET_MASK); \
57 *p++ = (mask); \
58 *p++ = (data); \
59 } while (0)
60
61#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
62 do { \
63 *p++ = REGDMA_OP_BLKWRITE_INC | \
64 ((off) & REGDMA_ADDR_OFFSET_MASK); \
65 *p++ = (len); \
66 } while (0)
67
68#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
69 do { \
70 *(p) = (data); \
71 (p)++; \
72 } while (0)
73
74/* Macro for directly accessing mapped registers */
75#define SDE_ROTREG_WRITE(base, off, data) \
76 writel_relaxed(data, (base + (off)))
77
78#define SDE_ROTREG_READ(base, off) \
79 readl_relaxed(base + (off))
80
Alan Kwong818b7fc2016-07-24 22:07:41 -040081/* Invalid software timestamp value for initialization */
82#define SDE_REGDMA_SWTS_INVALID (~0)
83
84/**
85 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
86 * @ts_curr: current software timestamp
87 * @ts_prev: previous software timestamp
88 * @return: the amount ts_curr is ahead of ts_prev
89 */
90static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
91{
92 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
93
94 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
95}
96
97/**
98 * sde_hw_rotator_pending_swts - Check if the given context is still pending
99 * @rot: Pointer to hw rotator
100 * @ctx: Pointer to rotator context
101 * @pswts: Pointer to returned reference software timestamp, optional
102 * @return: true if context has pending requests
103 */
104static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
105 struct sde_hw_rotator_context *ctx, u32 *pswts)
106{
107 u32 swts;
108 int ts_diff;
109 bool pending;
110
111 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
112 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
113 else
114 swts = ctx->last_regdma_timestamp;
115
116 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
117 swts >>= SDE_REGDMA_SWTS_SHIFT;
118
119 swts &= SDE_REGDMA_SWTS_MASK;
120
121 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
122
123 if (pswts)
124 *pswts = swts;
125
126 pending = (ts_diff > 0) ? true : false;
127
128 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
129 ctx->timestamp, ctx->q_id, swts, pending);
130 return pending;
131}
132
133/**
134 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
135 * Also, clear rotator/regdma irq status.
136 * @rot: Pointer to hw rotator
137 */
138static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
139{
140 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
141 atomic_read(&rot->irq_enabled));
142
143 if (!atomic_read(&rot->irq_enabled)) {
144 if (rot->mode == ROT_REGDMA_OFF)
145 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
146 ROT_DONE_MASK);
147 else
148 SDE_ROTREG_WRITE(rot->mdss_base,
149 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
150
151 enable_irq(rot->irq_num);
152 }
153 atomic_inc(&rot->irq_enabled);
154}
155
156/**
157 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
158 * Also, clear rotator/regdma irq enable masks.
159 * @rot: Pointer to hw rotator
160 */
161static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
162{
163 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
164 atomic_read(&rot->irq_enabled));
165
166 if (!atomic_read(&rot->irq_enabled)) {
167 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
168 return;
169 }
170
171 if (!atomic_dec_return(&rot->irq_enabled)) {
172 if (rot->mode == ROT_REGDMA_OFF)
173 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
174 else
175 SDE_ROTREG_WRITE(rot->mdss_base,
176 REGDMA_CSR_REGDMA_INT_EN, 0);
177 /* disable irq after last pending irq is handled, if any */
178 synchronize_irq(rot->irq_num);
179 disable_irq_nosync(rot->irq_num);
180 }
181}
182
183/**
184 * sde_hw_rotator_dump_status - Dump hw rotator status on error
185 * @rot: Pointer to hw rotator
186 */
187static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
188{
189 SDEROT_ERR(
190 "op_mode = %x, int_en = %x, int_status = %x\n",
191 SDE_ROTREG_READ(rot->mdss_base,
192 REGDMA_CSR_REGDMA_OP_MODE),
193 SDE_ROTREG_READ(rot->mdss_base,
194 REGDMA_CSR_REGDMA_INT_EN),
195 SDE_ROTREG_READ(rot->mdss_base,
196 REGDMA_CSR_REGDMA_INT_STATUS));
197
198 SDEROT_ERR(
199 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
200 SDE_ROTREG_READ(rot->mdss_base,
201 REGDMA_TIMESTAMP_REG),
202 SDE_ROTREG_READ(rot->mdss_base,
203 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
204 SDE_ROTREG_READ(rot->mdss_base,
205 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
206 SDE_ROTREG_READ(rot->mdss_base,
207 REGDMA_CSR_REGDMA_BLOCK_STATUS));
208
209 SDEROT_ERR(
210 "invalid_cmd_offset = %x, fsm_state = %x\n",
211 SDE_ROTREG_READ(rot->mdss_base,
212 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
213 SDE_ROTREG_READ(rot->mdss_base,
214 REGDMA_CSR_REGDMA_FSM_STATE));
215}
216
Alan Kwong9487de22016-01-16 22:06:36 -0500217/**
218 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
219 * on provided session_id. Each rotator has a different session_id.
220 */
221static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
222 struct sde_hw_rotator *rot, u32 session_id,
223 enum sde_rot_queue_prio q_id)
224{
225 int i;
226 struct sde_hw_rotator_context *ctx = NULL;
227
228 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
229 ctx = rot->rotCtx[q_id][i];
230
231 if (ctx && (ctx->session_id == session_id)) {
232 SDEROT_DBG(
233 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d\n",
234 q_id, i, ctx, ctx->session_id);
235 return ctx;
236 }
237 }
238
239 return NULL;
240}
241
242/*
243 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
244 * @dbgbuf: Pointer to debug buffer
245 * @buf: Pointer to layer buffer structure
246 * @data: Pointer to h/w mapped buffer structure
247 */
248static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
249 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
250{
251 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
252 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
253
254 dbgbuf->vaddr = NULL;
255 dbgbuf->width = buf->width;
256 dbgbuf->height = buf->height;
257
258 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
259 dma_buf_begin_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
260 DMA_FROM_DEVICE);
261 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
262 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
263 dbgbuf->vaddr, dbgbuf->buflen,
264 dbgbuf->width, dbgbuf->height);
265 }
266}
267
268/*
269 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
270 * @dbgbuf: Pointer to debug buffer
271 */
272static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
273{
274 if (dbgbuf->vaddr) {
275 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
276 dma_buf_end_cpu_access(dbgbuf->dmabuf, 0, dbgbuf->buflen,
277 DMA_FROM_DEVICE);
278 }
279
280 dbgbuf->vaddr = NULL;
281 dbgbuf->dmabuf = NULL;
282 dbgbuf->buflen = 0;
283 dbgbuf->width = 0;
284 dbgbuf->height = 0;
285}
286
287/*
288 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
289 * @ctx: Pointer to rotator context
290 * @mask: Bit mask location of the timestamp
291 * @swts: Software timestamp
292 */
293static void sde_hw_rotator_setup_timestamp_packet(
294 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
295{
296 u32 *wrptr;
297
298 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
299
300 /*
301 * Create a dummy packet write out to 1 location for timestamp
302 * generation.
303 */
304 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
305 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
306 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
307 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
308 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
309 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
310 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
311 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
312 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
313 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
314 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
315 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
316 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
317 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
318 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
319 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
320 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
321 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
322 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
323 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
324 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
325 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
326 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
327 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
328 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
329 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
330
331 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
332}
333
334/*
335 * sde_hw_rotator_setup_fetchengine - setup fetch engine
336 * @ctx: Pointer to rotator context
337 * @queue_id: Priority queue identifier
338 * @cfg: Fetch configuration
339 * @danger_lut: real-time QoS LUT for danger setting (not used)
340 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400341 * @dnsc_factor_w: downscale factor for width
342 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -0500343 * @flags: Control flag
344 */
345static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
346 enum sde_rot_queue_prio queue_id,
347 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400348 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -0500349{
350 struct sde_hw_rotator *rot = ctx->rot;
351 struct sde_mdp_format_params *fmt;
352 struct sde_mdp_data *data;
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400353 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -0500354 u32 *wrptr;
355 u32 opmode = 0;
356 u32 chroma_samp = 0;
357 u32 src_format = 0;
358 u32 unpack = 0;
359 u32 width = cfg->img_width;
360 u32 height = cfg->img_height;
361 u32 fetch_blocksize = 0;
362 int i;
363
364 if (ctx->rot->mode == ROT_REGDMA_ON) {
365 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_EN,
366 REGDMA_INT_MASK);
367 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
368 REGDMA_EN);
369 }
370
371 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
372
373 /* source image setup */
374 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
375 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
376 for (i = 0; i < cfg->src_plane.num_planes; i++)
377 cfg->src_plane.ystride[i] *= 2;
378 width *= 2;
379 height /= 2;
380 }
381
382 /*
383 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
384 */
385 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
386
387 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
388 SDE_REGDMA_BLKWRITE_DATA(wrptr,
389 cfg->src_rect->w | (cfg->src_rect->h << 16));
390 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
391 SDE_REGDMA_BLKWRITE_DATA(wrptr,
392 cfg->src_rect->x | (cfg->src_rect->y << 16));
393 SDE_REGDMA_BLKWRITE_DATA(wrptr,
394 cfg->src_rect->w | (cfg->src_rect->h << 16));
395 SDE_REGDMA_BLKWRITE_DATA(wrptr,
396 cfg->src_rect->x | (cfg->src_rect->y << 16));
397
398 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
399 data = cfg->data;
400 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
401 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
402 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
403 (cfg->src_plane.ystride[1] << 16));
404 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
405 (cfg->src_plane.ystride[3] << 16));
406
407 /* UNUSED, write 0 */
408 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
409
410 /* setup source format */
411 fmt = cfg->fmt;
412
413 chroma_samp = fmt->chroma_sample;
414 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
415 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
416 chroma_samp = SDE_MDP_CHROMA_H1V2;
417 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
418 chroma_samp = SDE_MDP_CHROMA_H2V1;
419 }
420
421 src_format = (chroma_samp << 23) |
422 (fmt->fetch_planes << 19) |
423 (fmt->bits[C3_ALPHA] << 6) |
424 (fmt->bits[C2_R_Cr] << 4) |
425 (fmt->bits[C1_B_Cb] << 2) |
426 (fmt->bits[C0_G_Y] << 0);
427
428 if (fmt->alpha_enable &&
429 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
430 src_format |= BIT(8); /* SRCC3_EN */
431
432 src_format |= ((fmt->unpack_count - 1) << 12) |
433 (fmt->unpack_tight << 17) |
434 (fmt->unpack_align_msb << 18) |
435 ((fmt->bpp - 1) << 9) |
436 ((fmt->frame_format & 3) << 30);
437
438 if (flags & SDE_ROT_FLAG_ROT_90)
439 src_format |= BIT(11); /* ROT90 */
440
441 if (sde_mdp_is_ubwc_format(fmt))
442 opmode |= BIT(0); /* BWC_DEC_EN */
443
444 /* if this is YUV pixel format, enable CSC */
445 if (sde_mdp_is_yuv_format(fmt))
446 src_format |= BIT(15); /* SRC_COLOR_SPACE */
447
448 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
449 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
450
451 /* SRC_FORMAT */
452 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
453
454 /* setup source unpack pattern */
455 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
456 (fmt->element[1] << 8) | (fmt->element[0] << 0);
457
458 /* SRC_UNPACK_PATTERN */
459 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
460
461 /* setup source op mode */
462 if (flags & SDE_ROT_FLAG_FLIP_LR)
463 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
464 if (flags & SDE_ROT_FLAG_FLIP_UD)
465 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
466 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
467
468 /* SRC_OP_MODE */
469 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
470
471 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -0400472 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
473 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
474 if (sde_mdp_is_tp10_format(fmt))
475 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
476 else
477 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
478 } else {
479 if (sde_mdp_is_tp10_format(fmt))
480 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
481 else
482 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
483 }
484
Alan Kwong9487de22016-01-16 22:06:36 -0500485 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
486 fetch_blocksize |
487 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
488 ((rot->highest_bank & 0x3) << 18));
489
490 /* setup source buffer plane security status */
491 if (flags & SDE_ROT_FLAG_SECURE_OVERLAY_SESSION) {
492 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
493 ctx->is_secure = true;
494 }
495
496 /* Update command queue write ptr */
497 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
498}
499
500/*
501 * sde_hw_rotator_setup_wbengine - setup writeback engine
502 * @ctx: Pointer to rotator context
503 * @queue_id: Priority queue identifier
504 * @cfg: Writeback configuration
505 * @flags: Control flag
506 */
507static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
508 enum sde_rot_queue_prio queue_id,
509 struct sde_hw_rot_wb_cfg *cfg,
510 u32 flags)
511{
512 struct sde_mdp_format_params *fmt;
513 u32 *wrptr;
514 u32 pack = 0;
515 u32 dst_format = 0;
516 int i;
517
518 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
519
520 fmt = cfg->fmt;
521
522 /* setup WB DST format */
523 dst_format |= (fmt->chroma_sample << 23) |
524 (fmt->fetch_planes << 19) |
525 (fmt->bits[C3_ALPHA] << 6) |
526 (fmt->bits[C2_R_Cr] << 4) |
527 (fmt->bits[C1_B_Cb] << 2) |
528 (fmt->bits[C0_G_Y] << 0);
529
530 /* alpha control */
531 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
532 dst_format |= BIT(8);
533 if (!fmt->alpha_enable) {
534 dst_format |= BIT(14);
535 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
536 }
537 }
538
539 dst_format |= ((fmt->unpack_count - 1) << 12) |
540 (fmt->unpack_tight << 17) |
541 (fmt->unpack_align_msb << 18) |
542 ((fmt->bpp - 1) << 9) |
543 ((fmt->frame_format & 3) << 30);
544
545 if (sde_mdp_is_yuv_format(fmt))
546 dst_format |= BIT(15);
547
548 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
549 dst_format |= BIT(21); /* PACK_DX_FORMAT */
550
551 /*
552 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
553 */
554 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
555
556 /* DST_FORMAT */
557 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
558
559 /* DST_OP_MODE */
560 if (sde_mdp_is_ubwc_format(fmt))
561 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
562 else
563 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
564
565 /* DST_PACK_PATTERN */
566 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
567 (fmt->element[1] << 8) | (fmt->element[0] << 0);
568 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
569
570 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
571 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
572 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
573 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
574 (cfg->dst_plane.ystride[1] << 16));
575 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
576 (cfg->dst_plane.ystride[3] << 16));
577
578 /* setup WB out image size and ROI */
579 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
580 cfg->img_width | (cfg->img_height << 16));
581 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
582 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
583 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
584 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
585
586 /*
587 * setup Downscale factor
588 */
589 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
590 cfg->v_downscale_factor |
591 (cfg->h_downscale_factor << 16));
592
593 /* write config setup for bank configration */
594 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
595 (ctx->rot->highest_bank & 0x3) << 8);
596
597 if (flags & SDE_ROT_FLAG_ROT_90)
598 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x3);
599 else
600 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x1);
601
602 /* Update command queue write ptr */
603 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
604}
605
606/*
607 * sde_hw_rotator_start_no_regdma - start non-regdma operation
608 * @ctx: Pointer to rotator context
609 * @queue_id: Priority queue identifier
610 */
611static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
612 enum sde_rot_queue_prio queue_id)
613{
614 struct sde_hw_rotator *rot = ctx->rot;
615 u32 *wrptr;
616 u32 *rdptr;
617 u8 *addr;
618 u32 mask;
619 u32 blksize;
620
621 rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
622 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
623
624 if (rot->irq_num >= 0) {
625 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
626 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
627 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400628 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -0500629 }
630
631 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
632
633 /* Update command queue write ptr */
634 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
635
636 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
637 /* Write all command stream to Rotator blocks */
638 /* Rotator will start right away after command stream finish writing */
639 while (rdptr < wrptr) {
640 u32 op = REGDMA_OP_MASK & *rdptr;
641
642 switch (op) {
643 case REGDMA_OP_NOP:
644 SDEROT_DBG("NOP\n");
645 rdptr++;
646 break;
647 case REGDMA_OP_REGWRITE:
648 SDEROT_DBG("REGW %6.6x %8.8x\n",
649 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
650 rdptr[1]);
651 addr = rot->mdss_base +
652 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
653 writel_relaxed(*rdptr++, addr);
654 break;
655 case REGDMA_OP_REGMODIFY:
656 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
657 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
658 rdptr[1], rdptr[2]);
659 addr = rot->mdss_base +
660 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
661 mask = *rdptr++;
662 writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
663 addr);
664 break;
665 case REGDMA_OP_BLKWRITE_SINGLE:
666 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
667 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
668 rdptr[1]);
669 addr = rot->mdss_base +
670 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
671 blksize = *rdptr++;
672 while (blksize--) {
673 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
674 writel_relaxed(*rdptr++, addr);
675 }
676 break;
677 case REGDMA_OP_BLKWRITE_INC:
678 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
679 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
680 rdptr[1]);
681 addr = rot->mdss_base +
682 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
683 blksize = *rdptr++;
684 while (blksize--) {
685 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
686 writel_relaxed(*rdptr++, addr);
687 addr += 4;
688 }
689 break;
690 default:
691 /* Other not supported OP mode
692 * Skip data for now for unregonized OP mode
693 */
694 SDEROT_DBG("UNDEFINED\n");
695 rdptr++;
696 break;
697 }
698 }
699 SDEROT_DBG("END %d\n", ctx->timestamp);
700
701 return ctx->timestamp;
702}
703
704/*
705 * sde_hw_rotator_start_regdma - start regdma operation
706 * @ctx: Pointer to rotator context
707 * @queue_id: Priority queue identifier
708 */
709static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
710 enum sde_rot_queue_prio queue_id)
711{
712 struct sde_hw_rotator *rot = ctx->rot;
713 u32 *wrptr;
714 u32 regdmaSlot;
715 u32 offset;
716 long length;
717 long ts_length;
718 u32 enableInt;
719 u32 swts = 0;
720 u32 mask = 0;
721
722 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
723
Alan Kwong9487de22016-01-16 22:06:36 -0500724 /*
725 * Last ROT command must be ROT_START before REGDMA start
726 */
727 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
728 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
729
730 /*
731 * Start REGDMA with command offset and size
732 */
733 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
734 length = ((long)wrptr - (long)ctx->regdma_base) / 4;
735 offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
736 REGDMA_RAM_REGDMA_CMD_RAM));
737 enableInt = ((ctx->timestamp & 1) + 1) << 30;
738
739 SDEROT_DBG(
740 "regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
741 queue_id, regdmaSlot, enableInt, length, offset,
742 ctx->timestamp);
743
744 /* ensure the command packet is issued before the submit command */
745 wmb();
746
747 /* REGDMA submission for current context */
748 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
749 SDE_ROTREG_WRITE(rot->mdss_base,
750 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
751 (length << 14) | offset);
752 swts = ctx->timestamp;
753 mask = ~SDE_REGDMA_SWTS_MASK;
754 } else {
755 SDE_ROTREG_WRITE(rot->mdss_base,
756 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
757 (length << 14) | offset);
758 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
759 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
760 }
761
762 /* Write timestamp after previous rotator job finished */
763 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
764 offset += length;
765 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
766 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
767
768 /* ensure command packet is issue before the submit command */
769 wmb();
770
771 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
772 SDE_ROTREG_WRITE(rot->mdss_base,
773 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
774 enableInt | (ts_length << 14) | offset);
775 } else {
776 SDE_ROTREG_WRITE(rot->mdss_base,
777 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
778 enableInt | (ts_length << 14) | offset);
779 }
780
Alan Kwong9487de22016-01-16 22:06:36 -0500781 /* Update command queue write ptr */
782 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
783
784 return ctx->timestamp;
785}
786
787/*
788 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
789 * @ctx: Pointer to rotator context
790 * @queue_id: Priority queue identifier
791 * @flags: Option flag
792 */
793static u32 sde_hw_rotator_wait_done_no_regdma(
794 struct sde_hw_rotator_context *ctx,
795 enum sde_rot_queue_prio queue_id, u32 flag)
796{
797 struct sde_hw_rotator *rot = ctx->rot;
798 int rc = 0;
799 u32 sts = 0;
800 u32 status;
801 unsigned long flags;
802
803 if (rot->irq_num >= 0) {
804 SDEROT_DBG("Wait for Rotator completion\n");
805 rc = wait_for_completion_timeout(&ctx->rot_comp,
806 KOFF_TIMEOUT);
807
808 spin_lock_irqsave(&rot->rotisr_lock, flags);
809 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
810 if (rc == 0) {
811 /*
812 * Timeout, there might be error,
813 * or rotator still busy
814 */
815 if (status & ROT_BUSY_BIT)
816 SDEROT_ERR(
817 "Timeout waiting for rotator done\n");
818 else if (status & ROT_ERROR_BIT)
819 SDEROT_ERR(
820 "Rotator report error status\n");
821 else
822 SDEROT_WARN(
823 "Timeout waiting, but rotator job is done!!\n");
824
Alan Kwong818b7fc2016-07-24 22:07:41 -0400825 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -0500826 }
827 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
828 } else {
829 int cnt = 200;
830
831 do {
832 udelay(500);
833 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
834 cnt--;
835 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
836 && ((status & ROT_ERROR_BIT) == 0));
837
838 if (status & ROT_ERROR_BIT)
839 SDEROT_ERR("Rotator error\n");
840 else if (status & ROT_BUSY_BIT)
841 SDEROT_ERR("Rotator busy\n");
842
843 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
844 ROT_DONE_CLEAR);
845 }
846
847 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
848
849 return sts;
850}
851
852/*
853 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
854 * @ctx: Pointer to rotator context
855 * @queue_id: Priority queue identifier
856 * @flags: Option flag
857 */
858static u32 sde_hw_rotator_wait_done_regdma(
859 struct sde_hw_rotator_context *ctx,
860 enum sde_rot_queue_prio queue_id, u32 flag)
861{
862 struct sde_hw_rotator *rot = ctx->rot;
863 int rc = 0;
864 u32 status;
865 u32 last_isr;
866 u32 last_ts;
867 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -0400868 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -0500869 u32 sts = 0;
Alan Kwong9487de22016-01-16 22:06:36 -0500870 unsigned long flags;
871
872 if (rot->irq_num >= 0) {
873 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
874 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400875 rc = wait_event_timeout(ctx->regdma_waitq,
876 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Alan Kwong9487de22016-01-16 22:06:36 -0500877 KOFF_TIMEOUT);
878
879 spin_lock_irqsave(&rot->rotisr_lock, flags);
880
881 last_isr = ctx->last_regdma_isr_status;
882 last_ts = ctx->last_regdma_timestamp;
883 status = last_isr & REGDMA_INT_MASK;
884 int_id = last_ts & 1;
885 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
886 status, int_id, last_ts);
887
888 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
Alan Kwong818b7fc2016-07-24 22:07:41 -0400889 bool pending;
890
891 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -0500892 SDEROT_ERR(
Alan Kwong818b7fc2016-07-24 22:07:41 -0400893 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
894 ctx->timestamp, swts, pending);
Alan Kwong9487de22016-01-16 22:06:36 -0500895
896 if (status & REGDMA_WATCHDOG_INT)
897 SDEROT_ERR("REGDMA watchdog interrupt\n");
898 else if (status & REGDMA_INVALID_DESCRIPTOR)
899 SDEROT_ERR("REGDMA invalid descriptor\n");
900 else if (status & REGDMA_INCOMPLETE_CMD)
901 SDEROT_ERR("REGDMA incomplete command\n");
902 else if (status & REGDMA_INVALID_CMD)
903 SDEROT_ERR("REGDMA invalid command\n");
904
Alan Kwong818b7fc2016-07-24 22:07:41 -0400905 sde_hw_rotator_dump_status(rot);
Alan Kwong9487de22016-01-16 22:06:36 -0500906 status = ROT_ERROR_BIT;
Alan Kwong818b7fc2016-07-24 22:07:41 -0400907 } else {
908 if (rc == 1)
909 SDEROT_WARN(
910 "REGDMA done but no irq, ts:0x%X/0x%X\n",
911 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -0500912 status = 0;
913 }
914
Alan Kwong9487de22016-01-16 22:06:36 -0500915 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
916 } else {
917 int cnt = 200;
918
919 do {
920 udelay(500);
921 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
922 cnt--;
923 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
924 && ((status & ROT_ERROR_BIT) == 0));
925
926 if (status & ROT_ERROR_BIT)
927 SDEROT_ERR("Rotator error\n");
928 else if (status & ROT_BUSY_BIT)
929 SDEROT_ERR("Rotator busy\n");
930
931 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
932 0xFFFF);
933 }
934
935 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
936
937 return sts;
938}
939
940/*
941 * setup_rotator_ops - setup callback functions for the low-level HAL
942 * @ops: Pointer to low-level ops callback
943 * @mode: Operation mode (non-regdma or regdma)
944 */
945static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
946 enum sde_rotator_regdma_mode mode)
947{
948 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
949 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
950 if (mode == ROT_REGDMA_ON) {
951 ops->start_rotator = sde_hw_rotator_start_regdma;
952 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
953 } else {
954 ops->start_rotator = sde_hw_rotator_start_no_regdma;
955 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
956 }
957}
958
959/*
960 * sde_hw_rotator_swts_create - create software timestamp buffer
961 * @rot: Pointer to rotator hw
962 *
963 * This buffer is used by regdma to keep track of last completed command.
964 */
965static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
966{
967 int rc = 0;
968 struct ion_handle *handle;
969 struct sde_mdp_img_data *data;
970 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
971
972 rot->iclient = msm_ion_client_create(rot->pdev->name);
973 if (IS_ERR_OR_NULL(rot->iclient)) {
974 SDEROT_ERR("msm_ion_client_create() return error (%p)\n",
975 rot->iclient);
976 return -EINVAL;
977 }
978
979 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
980 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
981 if (IS_ERR_OR_NULL(handle)) {
982 SDEROT_ERR("ion memory allocation failed\n");
983 return -ENOMEM;
984 }
985
986 data = &rot->swts_buf;
987 data->len = bufsize;
988 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
989 if (IS_ERR(data->srcp_dma_buf)) {
990 SDEROT_ERR("ion_dma_buf setup failed\n");
991 rc = -ENOMEM;
992 goto imap_err;
993 }
994
995 sde_smmu_ctrl(1);
996
997 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
998 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
999 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
1000 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
1001 rc = -ENOMEM;
1002 goto err_put;
1003 }
1004
1005 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
1006 DMA_BIDIRECTIONAL);
1007 if (IS_ERR_OR_NULL(data->srcp_table)) {
1008 SDEROT_ERR("dma_buf_map_attachment error\n");
1009 rc = -ENOMEM;
1010 goto err_detach;
1011 }
1012
1013 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
1014 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
1015 &data->len, DMA_BIDIRECTIONAL);
1016 if (IS_ERR_VALUE(rc)) {
1017 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
1018 goto err_unmap;
1019 }
1020
1021 dma_buf_begin_cpu_access(data->srcp_dma_buf, 0, data->len,
1022 DMA_FROM_DEVICE);
1023 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
1024 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
1025 SDEROT_ERR("ion kernel memory mapping failed\n");
1026 rc = IS_ERR(rot->swts_buffer);
1027 goto kmap_err;
1028 }
1029
1030 data->mapped = true;
1031 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
1032 data->len, rot->swts_buffer);
1033
1034 ion_free(rot->iclient, handle);
1035
1036 sde_smmu_ctrl(0);
1037
1038 return rc;
1039kmap_err:
1040 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1041 DMA_FROM_DEVICE, data->srcp_dma_buf);
1042err_unmap:
1043 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1044 DMA_FROM_DEVICE);
1045err_detach:
1046 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1047err_put:
1048 dma_buf_put(data->srcp_dma_buf);
1049 data->srcp_dma_buf = NULL;
1050imap_err:
1051 ion_free(rot->iclient, handle);
1052
1053 return rc;
1054}
1055
1056/*
1057 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
1058 * @rot: Pointer to rotator hw
1059 */
1060static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
1061{
1062 struct sde_mdp_img_data *data;
1063
1064 data = &rot->swts_buf;
1065
1066 dma_buf_end_cpu_access(data->srcp_dma_buf, 0, data->len,
1067 DMA_FROM_DEVICE);
1068 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
1069
1070 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1071 DMA_FROM_DEVICE, data->srcp_dma_buf);
1072 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1073 DMA_FROM_DEVICE);
1074 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1075 dma_buf_put(data->srcp_dma_buf);
1076 data->srcp_dma_buf = NULL;
1077}
1078
1079/*
1080 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
1081 * @mgr: Pointer to rotator manager
1082 */
1083static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
1084{
1085 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1086 struct sde_hw_rotator *rot;
1087
1088 if (!mgr || !mgr->pdev || !mgr->hw_data) {
1089 SDEROT_ERR("null parameters\n");
1090 return;
1091 }
1092
1093 rot = mgr->hw_data;
1094 if (rot->irq_num >= 0)
1095 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
1096
1097 if (rot->mode == ROT_REGDMA_ON)
1098 sde_hw_rotator_swtc_destroy(rot);
1099
1100 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
1101 mgr->hw_data = NULL;
1102}
1103
1104/*
1105 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
1106 * @mgr: Pointer to rotator manager
1107 * @pipe_id: pipe identifier (not used)
1108 * @wb_id: writeback identifier/priority queue identifier
1109 *
1110 * This function allocates a new hw rotator resource for the given priority.
1111 */
1112static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
1113 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
1114{
1115 struct sde_hw_rotator_resource_info *resinfo;
1116
1117 if (!mgr || !mgr->hw_data) {
1118 SDEROT_ERR("null parameters\n");
1119 return NULL;
1120 }
1121
1122 /*
1123 * Allocate rotator resource info. Each allocation is per
1124 * HW priority queue
1125 */
1126 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
1127 if (!resinfo) {
1128 SDEROT_ERR("Failed allocation HW rotator resource info\n");
1129 return NULL;
1130 }
1131
1132 resinfo->rot = mgr->hw_data;
1133 resinfo->hw.wb_id = wb_id;
1134 atomic_set(&resinfo->hw.num_active, 0);
1135 init_waitqueue_head(&resinfo->hw.wait_queue);
1136
1137 /* For non-regdma, only support one active session */
1138 if (resinfo->rot->mode == ROT_REGDMA_OFF)
1139 resinfo->hw.max_active = 1;
1140 else {
1141 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
1142
1143 if (resinfo->rot->iclient == NULL)
1144 sde_hw_rotator_swts_create(resinfo->rot);
1145 }
1146
Alan Kwongf987ea32016-07-06 12:11:44 -04001147 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001148 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04001149
Alan Kwong9487de22016-01-16 22:06:36 -05001150 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
1151 resinfo, wb_id);
1152
1153 return &resinfo->hw;
1154}
1155
1156/*
1157 * sde_hw_rotator_free_ext - free the given rotator resource
1158 * @mgr: Pointer to rotator manager
1159 * @hw: Pointer to rotator resource
1160 */
1161static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
1162 struct sde_rot_hw_resource *hw)
1163{
1164 struct sde_hw_rotator_resource_info *resinfo;
1165
1166 if (!mgr || !mgr->hw_data)
1167 return;
1168
1169 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1170
1171 SDEROT_DBG(
1172 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
1173 resinfo, hw->wb_id, atomic_read(&hw->num_active),
1174 hw->pending_count);
1175
Alan Kwongf987ea32016-07-06 12:11:44 -04001176 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001177 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04001178
Alan Kwong9487de22016-01-16 22:06:36 -05001179 devm_kfree(&mgr->pdev->dev, resinfo);
1180}
1181
1182/*
1183 * sde_hw_rotator_alloc_rotctx - allocate rotator context
1184 * @rot: Pointer to rotator hw
1185 * @hw: Pointer to rotator resource
1186 * @session_id: Session identifier of this context
1187 *
1188 * This function allocates a new rotator context for the given session id.
1189 */
1190static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
1191 struct sde_hw_rotator *rot,
1192 struct sde_rot_hw_resource *hw,
1193 u32 session_id)
1194{
1195 struct sde_hw_rotator_context *ctx;
1196
1197 /* Allocate rotator context */
1198 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1199 if (!ctx) {
1200 SDEROT_ERR("Failed allocation HW rotator context\n");
1201 return NULL;
1202 }
1203
1204 ctx->rot = rot;
1205 ctx->q_id = hw->wb_id;
1206 ctx->session_id = session_id;
1207 ctx->hwres = hw;
1208 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
1209 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
1210 ctx->is_secure = false;
1211
1212 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
1213 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
1214 ctx->regdma_wrptr = ctx->regdma_base;
1215 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
1216 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
1217 sde_hw_rotator_get_regdma_ctxidx(ctx));
1218
Alan Kwong818b7fc2016-07-24 22:07:41 -04001219 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
1220
Alan Kwong9487de22016-01-16 22:06:36 -05001221 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001222 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001223
1224 /* Store rotator context for lookup purpose */
1225 sde_hw_rotator_put_ctx(ctx);
1226
1227 SDEROT_DBG(
1228 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1229 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1230 ctx->q_id, ctx->timestamp,
1231 atomic_read(&ctx->hwres->num_active));
1232
1233 return ctx;
1234}
1235
1236/*
1237 * sde_hw_rotator_free_rotctx - free the given rotator context
1238 * @rot: Pointer to rotator hw
1239 * @ctx: Pointer to rotator context
1240 */
1241static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
1242 struct sde_hw_rotator_context *ctx)
1243{
1244 if (!rot || !ctx)
1245 return;
1246
1247 SDEROT_DBG(
1248 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
1249 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
1250 ctx->q_id, ctx->timestamp,
1251 atomic_read(&ctx->hwres->num_active));
1252
1253 rot->rotCtx[ctx->q_id][sde_hw_rotator_get_regdma_ctxidx(ctx)] = NULL;
1254
1255 devm_kfree(&rot->pdev->dev, ctx);
1256}
1257
1258/*
1259 * sde_hw_rotator_config - configure hw for the given rotation entry
1260 * @hw: Pointer to rotator resource
1261 * @entry: Pointer to rotation entry
1262 *
1263 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
1264 * based on the given rotation entry.
1265 */
1266static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
1267 struct sde_rot_entry *entry)
1268{
1269 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1270 struct sde_hw_rotator *rot;
1271 struct sde_hw_rotator_resource_info *resinfo;
1272 struct sde_hw_rotator_context *ctx;
1273 struct sde_hw_rot_sspp_cfg sspp_cfg;
1274 struct sde_hw_rot_wb_cfg wb_cfg;
1275 u32 danger_lut = 0; /* applicable for realtime client only */
1276 u32 safe_lut = 0; /* applicable for realtime client only */
1277 u32 flags = 0;
1278 struct sde_rotation_item *item;
1279
1280 if (!hw || !entry) {
1281 SDEROT_ERR("null hw resource/entry\n");
1282 return -EINVAL;
1283 }
1284
1285 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1286 rot = resinfo->rot;
1287 item = &entry->item;
1288
1289 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id);
1290 if (!ctx) {
1291 SDEROT_ERR("Failed allocating rotator context!!\n");
1292 return -EINVAL;
1293 }
1294
1295 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
1296 SDE_ROT_FLAG_FLIP_LR : 0;
1297 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
1298 SDE_ROT_FLAG_FLIP_UD : 0;
1299 flags |= (item->flags & SDE_ROTATION_90) ?
1300 SDE_ROT_FLAG_ROT_90 : 0;
1301 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
1302 SDE_ROT_FLAG_DEINTERLACE : 0;
1303 flags |= (item->flags & SDE_ROTATION_SECURE) ?
1304 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
1305
1306 sspp_cfg.img_width = item->input.width;
1307 sspp_cfg.img_height = item->input.height;
1308 sspp_cfg.fmt = sde_get_format_params(item->input.format);
1309 if (!sspp_cfg.fmt) {
1310 SDEROT_ERR("null format\n");
1311 return -EINVAL;
1312 }
1313 sspp_cfg.src_rect = &item->src_rect;
1314 sspp_cfg.data = &entry->src_buf;
1315 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
1316 item->input.height, &sspp_cfg.src_plane,
1317 0, /* No bwc_mode */
1318 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
1319 true : false);
1320
1321 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001322 &sspp_cfg, danger_lut, safe_lut,
1323 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05001324
1325 wb_cfg.img_width = item->output.width;
1326 wb_cfg.img_height = item->output.height;
1327 wb_cfg.fmt = sde_get_format_params(item->output.format);
1328 wb_cfg.dst_rect = &item->dst_rect;
1329 wb_cfg.data = &entry->dst_buf;
1330 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
1331 item->output.height, &wb_cfg.dst_plane,
1332 0, /* No bwc_mode */
1333 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
1334
1335 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
1336 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
1337
1338 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
1339
1340 /* setup VA mapping for debugfs */
1341 if (rot->dbgmem) {
1342 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
1343 &item->input,
1344 &entry->src_buf);
1345
1346 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
1347 &item->output,
1348 &entry->dst_buf);
1349 }
1350
1351 if (mdata->default_ot_rd_limit) {
1352 struct sde_mdp_set_ot_params ot_params;
1353
1354 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1355 ot_params.xin_id = XIN_SSPP;
1356 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001357 ot_params.width = entry->perf->config.input.width;
1358 ot_params.height = entry->perf->config.input.height;
1359 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001360 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
1361 ot_params.reg_off_mdp_clk_ctrl =
1362 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1363 ot_params.bit_off_mdp_clk_ctrl =
1364 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001365 ot_params.fmt = entry->perf->config.input.format;
Alan Kwong9487de22016-01-16 22:06:36 -05001366 sde_mdp_set_ot_limit(&ot_params);
1367 }
1368
1369 if (mdata->default_ot_wr_limit) {
1370 struct sde_mdp_set_ot_params ot_params;
1371
1372 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
1373 ot_params.xin_id = XIN_WRITEBACK;
1374 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001375 ot_params.width = entry->perf->config.input.width;
1376 ot_params.height = entry->perf->config.input.height;
1377 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05001378 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
1379 ot_params.reg_off_mdp_clk_ctrl =
1380 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
1381 ot_params.bit_off_mdp_clk_ctrl =
1382 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Alan Kwongeffb5ee2016-03-12 19:47:45 -05001383 ot_params.fmt = entry->perf->config.input.format;
Alan Kwong9487de22016-01-16 22:06:36 -05001384 sde_mdp_set_ot_limit(&ot_params);
1385 }
1386
1387 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
1388 u32 qos_lut = 0; /* low priority for nrt read client */
1389
1390 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
1391 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
1392
1393 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
1394 }
1395
1396 if (mdata->npriority_lvl > 0) {
1397 u32 mask, reg_val, i, vbif_qos;
1398
1399 for (i = 0; i < mdata->npriority_lvl; i++) {
1400 reg_val = SDE_VBIF_READ(mdata,
1401 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
1402 mask = 0x3 << (XIN_SSPP * 2);
1403 reg_val &= ~(mask);
1404 vbif_qos = mdata->vbif_nrt_qos[i];
1405 reg_val |= vbif_qos << (XIN_SSPP * 2);
1406 /* ensure write is issued after the read operation */
1407 mb();
1408 SDE_VBIF_WRITE(mdata,
1409 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
1410 reg_val);
1411 }
1412 }
1413
1414 /* Enable write gather for writeback to remove write gaps, which
1415 * may hang AXI/BIMC/SDE.
1416 */
1417 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
1418 BIT(XIN_WRITEBACK));
1419
1420 return 0;
1421}
1422
1423/*
1424 * sde_hw_rotator_kickoff - kickoff processing on the given entry
1425 * @hw: Pointer to rotator resource
1426 * @entry: Pointer to rotation entry
1427 */
1428static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
1429 struct sde_rot_entry *entry)
1430{
1431 struct sde_hw_rotator *rot;
1432 struct sde_hw_rotator_resource_info *resinfo;
1433 struct sde_hw_rotator_context *ctx;
1434 int ret = 0;
1435
1436 if (!hw || !entry) {
1437 SDEROT_ERR("null hw resource/entry\n");
1438 return -EINVAL;
1439 }
1440
1441 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1442 rot = resinfo->rot;
1443
1444 /* Lookup rotator context from session-id */
1445 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1446 if (!ctx) {
1447 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1448 entry->item.session_id);
1449 }
1450 WARN_ON(ctx == NULL);
1451
1452 ret = sde_smmu_ctrl(1);
1453 if (IS_ERR_VALUE(ret)) {
1454 SDEROT_ERR("IOMMU attach failed\n");
1455 return ret;
1456 }
1457
1458 rot->ops.start_rotator(ctx, ctx->q_id);
1459
1460 return 0;
1461}
1462
1463/*
1464 * sde_hw_rotator_wait4done - wait for completion notification
1465 * @hw: Pointer to rotator resource
1466 * @entry: Pointer to rotation entry
1467 *
1468 * This function blocks until the given entry is complete, error
1469 * is detected, or timeout.
1470 */
1471static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
1472 struct sde_rot_entry *entry)
1473{
1474 struct sde_hw_rotator *rot;
1475 struct sde_hw_rotator_resource_info *resinfo;
1476 struct sde_hw_rotator_context *ctx;
1477 int ret;
1478
1479 if (!hw || !entry) {
1480 SDEROT_ERR("null hw resource/entry\n");
1481 return -EINVAL;
1482 }
1483
1484 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
1485 rot = resinfo->rot;
1486
1487 /* Lookup rotator context from session-id */
1488 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
1489 if (!ctx) {
1490 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
1491 entry->item.session_id);
1492 }
1493 WARN_ON(ctx == NULL);
1494
1495 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
1496
1497 sde_smmu_ctrl(0);
1498
1499 if (rot->dbgmem) {
1500 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
1501 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
1502 }
1503
1504 /* Current rotator context job is finished, time to free up*/
1505 sde_hw_rotator_free_rotctx(rot, ctx);
1506
1507 return ret;
1508}
1509
1510/*
1511 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
1512 * @rot: Pointer to hw rotator
1513 *
1514 * This function initializes feature and/or capability bitmask based on
1515 * h/w version read from the device.
1516 */
1517static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
1518{
1519 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1520 u32 hw_version;
1521
1522 if (!mdata) {
1523 SDEROT_ERR("null rotator data\n");
1524 return -EINVAL;
1525 }
1526
1527 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
1528 SDEROT_DBG("hw version %8.8x\n", hw_version);
1529
1530 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
1531 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
1532 clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
1533 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
1534 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
1535 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
1536
1537 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
1538
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001539 if (hw_version != SDE_ROT_TYPE_V1_0) {
1540 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
1541 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
1542 }
1543
Alan Kwong9487de22016-01-16 22:06:36 -05001544 return 0;
1545}
1546
1547/*
1548 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
1549 * @irq: Interrupt number
1550 * @ptr: Pointer to private handle provided during registration
1551 *
1552 * This function services rotator interrupt and wakes up waiting client
1553 * with pending rotation requests already submitted to h/w.
1554 */
1555static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
1556{
1557 struct sde_hw_rotator *rot = ptr;
1558 struct sde_hw_rotator_context *ctx;
1559 irqreturn_t ret = IRQ_NONE;
1560 u32 isr;
1561
1562 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
1563
1564 SDEROT_DBG("intr_status = %8.8x\n", isr);
1565
1566 if (isr & ROT_DONE_MASK) {
1567 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04001568 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001569 SDEROT_DBG("Notify rotator complete\n");
1570
1571 /* Normal rotator only 1 session, no need to lookup */
1572 ctx = rot->rotCtx[0][0];
1573 WARN_ON(ctx == NULL);
1574 complete_all(&ctx->rot_comp);
1575
1576 spin_lock(&rot->rotisr_lock);
1577 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1578 ROT_DONE_CLEAR);
1579 spin_unlock(&rot->rotisr_lock);
1580 ret = IRQ_HANDLED;
1581 }
1582
1583 return ret;
1584}
1585
1586/*
1587 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
1588 * @irq: Interrupt number
1589 * @ptr: Pointer to private handle provided during registration
1590 *
1591 * This function services rotator interrupt, decoding the source of
1592 * events (high/low priority queue), and wakes up all waiting clients
1593 * with pending rotation requests already submitted to h/w.
1594 */
1595static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
1596{
1597 struct sde_hw_rotator *rot = ptr;
1598 struct sde_hw_rotator_context *ctx;
1599 irqreturn_t ret = IRQ_NONE;
1600 u32 isr;
1601 u32 ts;
1602 u32 q_id;
1603
1604 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001605 /* acknowledge interrupt before reading latest timestamp */
1606 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001607 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1608
1609 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
1610
1611 /* Any REGDMA status, including error and watchdog timer, should
1612 * trigger and wake up waiting thread
1613 */
1614 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
1615 spin_lock(&rot->rotisr_lock);
1616
1617 /*
1618 * Obtain rotator context based on timestamp from regdma
1619 * and low/high interrupt status
1620 */
1621 if (isr & REGDMA_INT_HIGH_MASK) {
1622 q_id = ROT_QUEUE_HIGH_PRIORITY;
1623 ts = ts & SDE_REGDMA_SWTS_MASK;
1624 } else if (isr & REGDMA_INT_LOW_MASK) {
1625 q_id = ROT_QUEUE_LOW_PRIORITY;
1626 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
1627 SDE_REGDMA_SWTS_MASK;
1628 }
1629
1630 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05001631
1632 /*
1633 * Wake up all waiting context from the current and previous
1634 * SW Timestamp.
1635 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04001636 while (ctx &&
1637 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05001638 ctx->last_regdma_isr_status = isr;
1639 ctx->last_regdma_timestamp = ts;
1640 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04001641 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001642 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001643
1644 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
1645 ctx = rot->rotCtx[q_id]
1646 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04001647 };
Alan Kwong9487de22016-01-16 22:06:36 -05001648
1649 spin_unlock(&rot->rotisr_lock);
1650 ret = IRQ_HANDLED;
1651 } else if (isr & REGDMA_INT_ERR_MASK) {
1652 /*
1653 * For REGDMA Err, we save the isr info and wake up
1654 * all waiting contexts
1655 */
1656 int i, j;
1657
1658 SDEROT_ERR(
1659 "regdma err isr:%X, wake up all waiting contexts\n",
1660 isr);
1661
1662 spin_lock(&rot->rotisr_lock);
1663
1664 for (i = 0; i < ROT_QUEUE_MAX; i++) {
1665 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
1666 ctx = rot->rotCtx[i][j];
1667 if (ctx && ctx->last_regdma_isr_status == 0) {
1668 ctx->last_regdma_isr_status = isr;
1669 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001670 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05001671 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
1672 i, j, ctx);
1673 }
1674 }
1675 }
1676
Alan Kwong9487de22016-01-16 22:06:36 -05001677 spin_unlock(&rot->rotisr_lock);
1678 ret = IRQ_HANDLED;
1679 }
1680
1681 return ret;
1682}
1683
1684/*
1685 * sde_hw_rotator_validate_entry - validate rotation entry
1686 * @mgr: Pointer to rotator manager
1687 * @entry: Pointer to rotation entry
1688 *
1689 * This function validates the given rotation entry and provides possible
1690 * fixup (future improvement) if available. This function returns 0 if
1691 * the entry is valid, and returns error code otherwise.
1692 */
1693static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
1694 struct sde_rot_entry *entry)
1695{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001696 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001697 int ret = 0;
1698 u16 src_w, src_h, dst_w, dst_h;
1699 struct sde_rotation_item *item = &entry->item;
1700 struct sde_mdp_format_params *fmt;
1701
1702 src_w = item->src_rect.w;
1703 src_h = item->src_rect.h;
1704
1705 if (item->flags & SDE_ROTATION_90) {
1706 dst_w = item->dst_rect.h;
1707 dst_h = item->dst_rect.w;
1708 } else {
1709 dst_w = item->dst_rect.w;
1710 dst_h = item->dst_rect.h;
1711 }
1712
1713 entry->dnsc_factor_w = 0;
1714 entry->dnsc_factor_h = 0;
1715
1716 if ((src_w != dst_w) || (src_h != dst_h)) {
1717 if ((src_w % dst_w) || (src_h % dst_h)) {
1718 SDEROT_DBG("non integral scale not support\n");
1719 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001720 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05001721 }
1722 entry->dnsc_factor_w = src_w / dst_w;
1723 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
1724 (entry->dnsc_factor_w > 64)) {
1725 SDEROT_DBG("non power-of-2 w_scale not support\n");
1726 ret = -EINVAL;
1727 goto dnsc_err;
1728 }
1729 entry->dnsc_factor_h = src_h / dst_h;
1730 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
1731 (entry->dnsc_factor_h > 64)) {
1732 SDEROT_DBG("non power-of-2 h_scale not support\n");
1733 ret = -EINVAL;
1734 goto dnsc_err;
1735 }
1736 }
1737
1738 fmt = sde_get_format_params(item->output.format);
1739 /* Tiled format downscale support not applied to AYUV tiled */
1740 if (sde_mdp_is_tilea5x_format(fmt) && (entry->dnsc_factor_h > 4)) {
1741 SDEROT_DBG("max downscale for tiled format is 4\n");
1742 ret = -EINVAL;
1743 goto dnsc_err;
1744 }
1745 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 2)) {
1746 SDEROT_DBG("downscale with ubwc cannot be more than 2\n");
1747 ret = -EINVAL;
1748 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001749 goto dnsc_err;
1750
1751dnsc_1p5_check:
1752 /* Check for 1.5 downscale that only applies to V2 HW */
1753 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
1754 entry->dnsc_factor_w = src_w / dst_w;
1755 if ((entry->dnsc_factor_w != 1) ||
1756 ((dst_w * 3) != (src_w * 2))) {
1757 SDEROT_DBG(
1758 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
1759 src_w, dst_w);
1760 ret = -EINVAL;
1761 goto dnsc_err;
1762 }
1763
1764 entry->dnsc_factor_h = src_h / dst_h;
1765 if ((entry->dnsc_factor_h != 1) ||
1766 ((dst_h * 3) != (src_h * 2))) {
1767 SDEROT_DBG(
1768 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
1769 src_h, dst_h);
1770 ret = -EINVAL;
1771 goto dnsc_err;
1772 }
1773 ret = 0;
1774 }
Alan Kwong9487de22016-01-16 22:06:36 -05001775
1776dnsc_err:
1777 /* Downscaler does not support asymmetrical dnsc */
1778 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
1779 SDEROT_DBG("asymmetric downscale not support\n");
1780 ret = -EINVAL;
1781 }
1782
1783 if (ret) {
1784 entry->dnsc_factor_w = 0;
1785 entry->dnsc_factor_h = 0;
1786 }
1787 return ret;
1788}
1789
1790/*
1791 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
1792 * @mgr: Pointer to rotator manager
1793 * @attr: Pointer to device attribute interface
1794 * @buf: Pointer to output buffer
1795 * @len: Length of output buffer
1796 */
1797static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
1798 struct device_attribute *attr, char *buf, ssize_t len)
1799{
1800 struct sde_hw_rotator *hw_data;
1801 int cnt = 0;
1802
1803 if (!mgr || !buf)
1804 return 0;
1805
1806 hw_data = mgr->hw_data;
1807
1808#define SPRINT(fmt, ...) \
1809 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
1810
1811 /* insert capabilities here */
1812
1813#undef SPRINT
1814 return cnt;
1815}
1816
1817/*
1818 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
1819 * @mgr: Pointer to rotator manager
1820 * @attr: Pointer to device attribute interface
1821 * @buf: Pointer to output buffer
1822 * @len: Length of output buffer
1823 */
1824static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
1825 struct device_attribute *attr, char *buf, ssize_t len)
1826{
1827 struct sde_hw_rotator *rot;
1828 struct sde_hw_rotator_context *ctx;
1829 int cnt = 0;
1830 int num_active = 0;
1831 int i, j;
1832
1833 if (!mgr || !buf) {
1834 SDEROT_ERR("null parameters\n");
1835 return 0;
1836 }
1837
1838 rot = mgr->hw_data;
1839
1840#define SPRINT(fmt, ...) \
1841 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
1842
1843 if (rot) {
1844 SPRINT("rot_mode=%d\n", rot->mode);
1845 SPRINT("irq_num=%d\n", rot->irq_num);
1846
1847 if (rot->mode == ROT_REGDMA_OFF) {
1848 SPRINT("max_active=1\n");
1849 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
1850 } else {
1851 for (i = 0; i < ROT_QUEUE_MAX; i++) {
1852 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
1853 j++) {
1854 ctx = rot->rotCtx[i][j];
1855
1856 if (ctx) {
1857 SPRINT(
1858 "rotCtx[%d][%d]:%p\n",
1859 i, j, ctx);
1860 ++num_active;
1861 }
1862 }
1863 }
1864
1865 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
1866 SPRINT("num_active=%d\n", num_active);
1867 }
1868 }
1869
1870#undef SPRINT
1871 return cnt;
1872}
1873
1874/*
1875 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
1876 * @hw_data: Pointer to rotator hw
1877 * @dev: Pointer to platform device
1878 */
1879static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
1880 struct platform_device *dev)
1881{
1882 int ret = 0;
1883 u32 data;
1884
1885 if (!hw_data || !dev)
1886 return -EINVAL;
1887
1888 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
1889 &data);
1890 if (ret) {
1891 SDEROT_DBG("default to regdma off\n");
1892 ret = 0;
1893 hw_data->mode = ROT_REGDMA_OFF;
1894 } else if (data < ROT_REGDMA_MAX) {
1895 SDEROT_DBG("set to regdma mode %d\n", data);
1896 hw_data->mode = data;
1897 } else {
1898 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
1899 hw_data->mode = ROT_REGDMA_OFF;
1900 }
1901
1902 ret = of_property_read_u32(dev->dev.of_node,
1903 "qcom,mdss-highest-bank-bit", &data);
1904 if (ret) {
1905 SDEROT_DBG("default to A5X bank\n");
1906 ret = 0;
1907 hw_data->highest_bank = 2;
1908 } else {
1909 SDEROT_DBG("set highest bank bit to %d\n", data);
1910 hw_data->highest_bank = data;
1911 }
1912
1913 return ret;
1914}
1915
1916/*
1917 * sde_rotator_r3_init - initialize the r3 module
1918 * @mgr: Pointer to rotator manager
1919 *
1920 * This function setup r3 callback functions, parses r3 specific
1921 * device tree settings, installs r3 specific interrupt handler,
1922 * as well as initializes r3 internal data structure.
1923 */
1924int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
1925{
1926 struct sde_hw_rotator *rot;
1927 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1928 int i;
1929 int ret;
1930
1931 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
1932 if (!rot)
1933 return -ENOMEM;
1934
1935 mgr->hw_data = rot;
1936 mgr->queue_count = ROT_QUEUE_MAX;
1937
1938 rot->mdss_base = mdata->sde_io.base;
1939 rot->pdev = mgr->pdev;
1940
1941 /* Assign ops */
1942 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
1943 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
1944 mgr->ops_hw_free = sde_hw_rotator_free_ext;
1945 mgr->ops_config_hw = sde_hw_rotator_config;
1946 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
1947 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
1948 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
1949 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
1950 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
1951 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
1952
1953 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
1954 if (ret)
1955 goto error_parse_dt;
1956
1957 rot->irq_num = platform_get_irq(mgr->pdev, 0);
1958 if (rot->irq_num < 0) {
1959 SDEROT_ERR("fail to get rotator irq\n");
1960 } else {
1961 if (rot->mode == ROT_REGDMA_OFF)
1962 ret = devm_request_threaded_irq(&mgr->pdev->dev,
1963 rot->irq_num,
1964 sde_hw_rotator_rotirq_handler,
1965 NULL, 0, "sde_rotator_r3", rot);
1966 else
1967 ret = devm_request_threaded_irq(&mgr->pdev->dev,
1968 rot->irq_num,
1969 sde_hw_rotator_regdmairq_handler,
1970 NULL, 0, "sde_rotator_r3", rot);
1971 if (ret) {
1972 SDEROT_ERR("fail to request irq r:%d\n", ret);
1973 rot->irq_num = -1;
1974 } else {
1975 disable_irq(rot->irq_num);
1976 }
1977 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04001978 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001979
1980 setup_rotator_ops(&rot->ops, rot->mode);
1981
1982 spin_lock_init(&rot->rotctx_lock);
1983 spin_lock_init(&rot->rotisr_lock);
1984
1985 /* REGDMA initialization */
1986 if (rot->mode == ROT_REGDMA_OFF) {
1987 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
1988 rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
1989 SDE_HW_ROT_REGDMA_SEG_SIZE * i];
1990 } else {
1991 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
1992 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
1993 (u32 *)(rot->mdss_base +
1994 REGDMA_RAM_REGDMA_CMD_RAM +
1995 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
1996
1997 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
1998 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
1999 (u32 *)(rot->mdss_base +
2000 REGDMA_RAM_REGDMA_CMD_RAM +
2001 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
2002 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
2003 }
2004
2005 atomic_set(&rot->timestamp[0], 0);
2006 atomic_set(&rot->timestamp[1], 0);
Alan Kwong9487de22016-01-16 22:06:36 -05002007
2008 ret = sde_rotator_hw_rev_init(rot);
2009 if (ret)
2010 goto error_hw_rev_init;
2011
Alan Kwong315cd772016-08-03 22:29:42 -04002012 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
2013 clk_set_flags(mgr->rot_clk[mgr->core_clk_idx].clk,
2014 CLKFLAG_NORETAIN_MEM);
2015 clk_set_flags(mgr->rot_clk[mgr->core_clk_idx].clk,
2016 CLKFLAG_NORETAIN_PERIPH);
2017
Alan Kwong9487de22016-01-16 22:06:36 -05002018 return 0;
2019error_hw_rev_init:
2020 if (rot->irq_num >= 0)
2021 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2022 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2023error_parse_dt:
2024 return ret;
2025}