blob: 743d2f73fb426c3e2e0cc834c05647ee735cc577 [file] [log] [blame]
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Alan Kwong9487de22016-01-16 22:06:36 -05002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
Alan Kwong9487de22016-01-16 22:06:36 -050020#include <linux/delay.h>
21#include <linux/debugfs.h>
22#include <linux/interrupt.h>
23#include <linux/dma-mapping.h>
24#include <linux/dma-buf.h>
25#include <linux/msm_ion.h>
Alan Kwong6ce448d2016-11-24 18:45:20 -080026#include <linux/clk.h>
27#include <linux/clk/qcom.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
Benjamin Chan53e3bce2016-08-31 14:43:29 -040037#include "sde_rotator_debug.h"
Alan Kwong9487de22016-01-16 22:06:36 -050038
Benjamin Chan99eb63b2016-12-21 15:45:26 -050039#define RES_UHD (3840*2160)
40
41/* traffic shaping clock ticks = finish_time x 19.2MHz */
42#define TRAFFIC_SHAPE_CLKTICK_14MS 268800
43#define TRAFFIC_SHAPE_CLKTICK_12MS 230400
Alan Kwong498d59f2017-02-11 18:56:34 -080044#define TRAFFIC_SHAPE_VSYNC_CLK 19200000
Benjamin Chan99eb63b2016-12-21 15:45:26 -050045
Alan Kwong9487de22016-01-16 22:06:36 -050046/* XIN mapping */
47#define XIN_SSPP 0
48#define XIN_WRITEBACK 1
49
50/* wait for at most 2 vsync for lowest refresh rate (24hz) */
Alan Kwong9a11c452017-05-01 15:11:31 -070051#define KOFF_TIMEOUT (42 * 32)
Alan Kwong6bc64622017-02-04 17:36:03 -080052
53/* default stream buffer headroom in lines */
54#define DEFAULT_SBUF_HEADROOM 20
Clarence Ip37e013c2017-05-04 12:23:13 -070055#define DEFAULT_UBWC_MALSIZE 0
56#define DEFAULT_UBWC_SWIZZLE 0
Alan Kwong9487de22016-01-16 22:06:36 -050057
Alan Kwongb6c049c2017-03-31 12:50:27 -070058#define DEFAULT_MAXLINEWIDTH 4096
59
Clarence Ip77cadd12017-06-19 17:51:46 -040060/* stride alignment requirement for avoiding partial writes */
61#define PARTIAL_WRITE_ALIGNMENT 0x1F
62
Alan Kwong9487de22016-01-16 22:06:36 -050063/* Macro for constructing the REGDMA command */
64#define SDE_REGDMA_WRITE(p, off, data) \
65 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080066 SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
67 (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050068 *p++ = REGDMA_OP_REGWRITE | \
69 ((off) & REGDMA_ADDR_OFFSET_MASK); \
70 *p++ = (data); \
71 } while (0)
72
73#define SDE_REGDMA_MODIFY(p, off, mask, data) \
74 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080075 SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
76 (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050077 *p++ = REGDMA_OP_REGMODIFY | \
78 ((off) & REGDMA_ADDR_OFFSET_MASK); \
79 *p++ = (mask); \
80 *p++ = (data); \
81 } while (0)
82
83#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
84 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080085 SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
86 (u32)(len));\
Alan Kwong9487de22016-01-16 22:06:36 -050087 *p++ = REGDMA_OP_BLKWRITE_INC | \
88 ((off) & REGDMA_ADDR_OFFSET_MASK); \
89 *p++ = (len); \
90 } while (0)
91
92#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
93 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080094 SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050095 *(p) = (data); \
96 (p)++; \
97 } while (0)
98
99/* Macro for directly accessing mapped registers */
100#define SDE_ROTREG_WRITE(base, off, data) \
Alan Kwong6bc64622017-02-04 17:36:03 -0800101 do { \
102 SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
103 , (u32)(data));\
104 writel_relaxed(data, (base + (off))); \
105 } while (0)
Alan Kwong9487de22016-01-16 22:06:36 -0500106
107#define SDE_ROTREG_READ(base, off) \
108 readl_relaxed(base + (off))
109
Alan Kwong6bc64622017-02-04 17:36:03 -0800110static u32 sde_hw_rotator_v3_inpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400111 SDE_PIX_FMT_XRGB_8888,
112 SDE_PIX_FMT_ARGB_8888,
113 SDE_PIX_FMT_ABGR_8888,
114 SDE_PIX_FMT_RGBA_8888,
115 SDE_PIX_FMT_BGRA_8888,
116 SDE_PIX_FMT_RGBX_8888,
117 SDE_PIX_FMT_BGRX_8888,
118 SDE_PIX_FMT_XBGR_8888,
119 SDE_PIX_FMT_RGBA_5551,
120 SDE_PIX_FMT_ARGB_1555,
121 SDE_PIX_FMT_ABGR_1555,
122 SDE_PIX_FMT_BGRA_5551,
123 SDE_PIX_FMT_BGRX_5551,
124 SDE_PIX_FMT_RGBX_5551,
125 SDE_PIX_FMT_XBGR_1555,
126 SDE_PIX_FMT_XRGB_1555,
127 SDE_PIX_FMT_ARGB_4444,
128 SDE_PIX_FMT_RGBA_4444,
129 SDE_PIX_FMT_BGRA_4444,
130 SDE_PIX_FMT_ABGR_4444,
131 SDE_PIX_FMT_RGBX_4444,
132 SDE_PIX_FMT_XRGB_4444,
133 SDE_PIX_FMT_BGRX_4444,
134 SDE_PIX_FMT_XBGR_4444,
135 SDE_PIX_FMT_RGB_888,
136 SDE_PIX_FMT_BGR_888,
137 SDE_PIX_FMT_RGB_565,
138 SDE_PIX_FMT_BGR_565,
139 SDE_PIX_FMT_Y_CB_CR_H2V2,
140 SDE_PIX_FMT_Y_CR_CB_H2V2,
141 SDE_PIX_FMT_Y_CR_CB_GH2V2,
142 SDE_PIX_FMT_Y_CBCR_H2V2,
143 SDE_PIX_FMT_Y_CRCB_H2V2,
144 SDE_PIX_FMT_Y_CBCR_H1V2,
145 SDE_PIX_FMT_Y_CRCB_H1V2,
146 SDE_PIX_FMT_Y_CBCR_H2V1,
147 SDE_PIX_FMT_Y_CRCB_H2V1,
148 SDE_PIX_FMT_YCBYCR_H2V1,
149 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
150 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
151 SDE_PIX_FMT_RGBA_8888_UBWC,
152 SDE_PIX_FMT_RGBX_8888_UBWC,
153 SDE_PIX_FMT_RGB_565_UBWC,
154 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
155 SDE_PIX_FMT_RGBA_1010102,
156 SDE_PIX_FMT_RGBX_1010102,
157 SDE_PIX_FMT_ARGB_2101010,
158 SDE_PIX_FMT_XRGB_2101010,
159 SDE_PIX_FMT_BGRA_1010102,
160 SDE_PIX_FMT_BGRX_1010102,
161 SDE_PIX_FMT_ABGR_2101010,
162 SDE_PIX_FMT_XBGR_2101010,
163 SDE_PIX_FMT_RGBA_1010102_UBWC,
164 SDE_PIX_FMT_RGBX_1010102_UBWC,
165 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
166 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
167 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
168};
169
Alan Kwong6bc64622017-02-04 17:36:03 -0800170static u32 sde_hw_rotator_v3_outpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400171 SDE_PIX_FMT_XRGB_8888,
172 SDE_PIX_FMT_ARGB_8888,
173 SDE_PIX_FMT_ABGR_8888,
174 SDE_PIX_FMT_RGBA_8888,
175 SDE_PIX_FMT_BGRA_8888,
176 SDE_PIX_FMT_RGBX_8888,
177 SDE_PIX_FMT_BGRX_8888,
178 SDE_PIX_FMT_XBGR_8888,
179 SDE_PIX_FMT_RGBA_5551,
180 SDE_PIX_FMT_ARGB_1555,
181 SDE_PIX_FMT_ABGR_1555,
182 SDE_PIX_FMT_BGRA_5551,
183 SDE_PIX_FMT_BGRX_5551,
184 SDE_PIX_FMT_RGBX_5551,
185 SDE_PIX_FMT_XBGR_1555,
186 SDE_PIX_FMT_XRGB_1555,
187 SDE_PIX_FMT_ARGB_4444,
188 SDE_PIX_FMT_RGBA_4444,
189 SDE_PIX_FMT_BGRA_4444,
190 SDE_PIX_FMT_ABGR_4444,
191 SDE_PIX_FMT_RGBX_4444,
192 SDE_PIX_FMT_XRGB_4444,
193 SDE_PIX_FMT_BGRX_4444,
194 SDE_PIX_FMT_XBGR_4444,
195 SDE_PIX_FMT_RGB_888,
196 SDE_PIX_FMT_BGR_888,
197 SDE_PIX_FMT_RGB_565,
198 SDE_PIX_FMT_BGR_565,
199 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
200 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
201 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
202 SDE_PIX_FMT_Y_CBCR_H2V2,
203 SDE_PIX_FMT_Y_CRCB_H2V2,
204 SDE_PIX_FMT_Y_CBCR_H1V2,
205 SDE_PIX_FMT_Y_CRCB_H1V2,
206 SDE_PIX_FMT_Y_CBCR_H2V1,
207 SDE_PIX_FMT_Y_CRCB_H2V1,
208 /* SDE_PIX_FMT_YCBYCR_H2V1 */
209 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
210 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
211 SDE_PIX_FMT_RGBA_8888_UBWC,
212 SDE_PIX_FMT_RGBX_8888_UBWC,
213 SDE_PIX_FMT_RGB_565_UBWC,
214 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
215 SDE_PIX_FMT_RGBA_1010102,
216 SDE_PIX_FMT_RGBX_1010102,
217 /* SDE_PIX_FMT_ARGB_2101010 */
218 /* SDE_PIX_FMT_XRGB_2101010 */
219 SDE_PIX_FMT_BGRA_1010102,
220 SDE_PIX_FMT_BGRX_1010102,
221 /* SDE_PIX_FMT_ABGR_2101010 */
222 /* SDE_PIX_FMT_XBGR_2101010 */
223 SDE_PIX_FMT_RGBA_1010102_UBWC,
224 SDE_PIX_FMT_RGBX_1010102_UBWC,
225 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
226 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
227 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
228};
229
Alan Kwong6bc64622017-02-04 17:36:03 -0800230static u32 sde_hw_rotator_v4_inpixfmts[] = {
231 SDE_PIX_FMT_XRGB_8888,
232 SDE_PIX_FMT_ARGB_8888,
233 SDE_PIX_FMT_ABGR_8888,
234 SDE_PIX_FMT_RGBA_8888,
235 SDE_PIX_FMT_BGRA_8888,
236 SDE_PIX_FMT_RGBX_8888,
237 SDE_PIX_FMT_BGRX_8888,
238 SDE_PIX_FMT_XBGR_8888,
239 SDE_PIX_FMT_RGBA_5551,
240 SDE_PIX_FMT_ARGB_1555,
241 SDE_PIX_FMT_ABGR_1555,
242 SDE_PIX_FMT_BGRA_5551,
243 SDE_PIX_FMT_BGRX_5551,
244 SDE_PIX_FMT_RGBX_5551,
245 SDE_PIX_FMT_XBGR_1555,
246 SDE_PIX_FMT_XRGB_1555,
247 SDE_PIX_FMT_ARGB_4444,
248 SDE_PIX_FMT_RGBA_4444,
249 SDE_PIX_FMT_BGRA_4444,
250 SDE_PIX_FMT_ABGR_4444,
251 SDE_PIX_FMT_RGBX_4444,
252 SDE_PIX_FMT_XRGB_4444,
253 SDE_PIX_FMT_BGRX_4444,
254 SDE_PIX_FMT_XBGR_4444,
255 SDE_PIX_FMT_RGB_888,
256 SDE_PIX_FMT_BGR_888,
257 SDE_PIX_FMT_RGB_565,
258 SDE_PIX_FMT_BGR_565,
259 SDE_PIX_FMT_Y_CB_CR_H2V2,
260 SDE_PIX_FMT_Y_CR_CB_H2V2,
261 SDE_PIX_FMT_Y_CR_CB_GH2V2,
262 SDE_PIX_FMT_Y_CBCR_H2V2,
263 SDE_PIX_FMT_Y_CRCB_H2V2,
264 SDE_PIX_FMT_Y_CBCR_H1V2,
265 SDE_PIX_FMT_Y_CRCB_H1V2,
266 SDE_PIX_FMT_Y_CBCR_H2V1,
267 SDE_PIX_FMT_Y_CRCB_H2V1,
268 SDE_PIX_FMT_YCBYCR_H2V1,
269 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
270 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
271 SDE_PIX_FMT_RGBA_8888_UBWC,
272 SDE_PIX_FMT_RGBX_8888_UBWC,
273 SDE_PIX_FMT_RGB_565_UBWC,
274 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
275 SDE_PIX_FMT_RGBA_1010102,
276 SDE_PIX_FMT_RGBX_1010102,
277 SDE_PIX_FMT_ARGB_2101010,
278 SDE_PIX_FMT_XRGB_2101010,
279 SDE_PIX_FMT_BGRA_1010102,
280 SDE_PIX_FMT_BGRX_1010102,
281 SDE_PIX_FMT_ABGR_2101010,
282 SDE_PIX_FMT_XBGR_2101010,
283 SDE_PIX_FMT_RGBA_1010102_UBWC,
284 SDE_PIX_FMT_RGBX_1010102_UBWC,
285 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
286 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
287 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800288 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
289 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800290 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
291 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
292 SDE_PIX_FMT_XRGB_8888_TILE,
293 SDE_PIX_FMT_ARGB_8888_TILE,
294 SDE_PIX_FMT_ABGR_8888_TILE,
295 SDE_PIX_FMT_XBGR_8888_TILE,
296 SDE_PIX_FMT_RGBA_8888_TILE,
297 SDE_PIX_FMT_BGRA_8888_TILE,
298 SDE_PIX_FMT_RGBX_8888_TILE,
299 SDE_PIX_FMT_BGRX_8888_TILE,
300 SDE_PIX_FMT_RGBA_1010102_TILE,
301 SDE_PIX_FMT_RGBX_1010102_TILE,
302 SDE_PIX_FMT_ARGB_2101010_TILE,
303 SDE_PIX_FMT_XRGB_2101010_TILE,
304 SDE_PIX_FMT_BGRA_1010102_TILE,
305 SDE_PIX_FMT_BGRX_1010102_TILE,
306 SDE_PIX_FMT_ABGR_2101010_TILE,
307 SDE_PIX_FMT_XBGR_2101010_TILE,
308};
309
310static u32 sde_hw_rotator_v4_outpixfmts[] = {
311 SDE_PIX_FMT_XRGB_8888,
312 SDE_PIX_FMT_ARGB_8888,
313 SDE_PIX_FMT_ABGR_8888,
314 SDE_PIX_FMT_RGBA_8888,
315 SDE_PIX_FMT_BGRA_8888,
316 SDE_PIX_FMT_RGBX_8888,
317 SDE_PIX_FMT_BGRX_8888,
318 SDE_PIX_FMT_XBGR_8888,
319 SDE_PIX_FMT_RGBA_5551,
320 SDE_PIX_FMT_ARGB_1555,
321 SDE_PIX_FMT_ABGR_1555,
322 SDE_PIX_FMT_BGRA_5551,
323 SDE_PIX_FMT_BGRX_5551,
324 SDE_PIX_FMT_RGBX_5551,
325 SDE_PIX_FMT_XBGR_1555,
326 SDE_PIX_FMT_XRGB_1555,
327 SDE_PIX_FMT_ARGB_4444,
328 SDE_PIX_FMT_RGBA_4444,
329 SDE_PIX_FMT_BGRA_4444,
330 SDE_PIX_FMT_ABGR_4444,
331 SDE_PIX_FMT_RGBX_4444,
332 SDE_PIX_FMT_XRGB_4444,
333 SDE_PIX_FMT_BGRX_4444,
334 SDE_PIX_FMT_XBGR_4444,
335 SDE_PIX_FMT_RGB_888,
336 SDE_PIX_FMT_BGR_888,
337 SDE_PIX_FMT_RGB_565,
338 SDE_PIX_FMT_BGR_565,
339 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
340 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
341 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
342 SDE_PIX_FMT_Y_CBCR_H2V2,
343 SDE_PIX_FMT_Y_CRCB_H2V2,
344 SDE_PIX_FMT_Y_CBCR_H1V2,
345 SDE_PIX_FMT_Y_CRCB_H1V2,
346 SDE_PIX_FMT_Y_CBCR_H2V1,
347 SDE_PIX_FMT_Y_CRCB_H2V1,
348 /* SDE_PIX_FMT_YCBYCR_H2V1 */
349 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
350 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
351 SDE_PIX_FMT_RGBA_8888_UBWC,
352 SDE_PIX_FMT_RGBX_8888_UBWC,
353 SDE_PIX_FMT_RGB_565_UBWC,
354 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
355 SDE_PIX_FMT_RGBA_1010102,
356 SDE_PIX_FMT_RGBX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400357 SDE_PIX_FMT_ARGB_2101010,
358 SDE_PIX_FMT_XRGB_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800359 SDE_PIX_FMT_BGRA_1010102,
360 SDE_PIX_FMT_BGRX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400361 SDE_PIX_FMT_ABGR_2101010,
362 SDE_PIX_FMT_XBGR_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800363 SDE_PIX_FMT_RGBA_1010102_UBWC,
364 SDE_PIX_FMT_RGBX_1010102_UBWC,
365 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
366 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
367 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800368 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
369 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800370 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
371 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
372 SDE_PIX_FMT_XRGB_8888_TILE,
373 SDE_PIX_FMT_ARGB_8888_TILE,
374 SDE_PIX_FMT_ABGR_8888_TILE,
375 SDE_PIX_FMT_XBGR_8888_TILE,
376 SDE_PIX_FMT_RGBA_8888_TILE,
377 SDE_PIX_FMT_BGRA_8888_TILE,
378 SDE_PIX_FMT_RGBX_8888_TILE,
379 SDE_PIX_FMT_BGRX_8888_TILE,
380 SDE_PIX_FMT_RGBA_1010102_TILE,
381 SDE_PIX_FMT_RGBX_1010102_TILE,
382 SDE_PIX_FMT_ARGB_2101010_TILE,
383 SDE_PIX_FMT_XRGB_2101010_TILE,
384 SDE_PIX_FMT_BGRA_1010102_TILE,
385 SDE_PIX_FMT_BGRX_1010102_TILE,
386 SDE_PIX_FMT_ABGR_2101010_TILE,
387 SDE_PIX_FMT_XBGR_2101010_TILE,
388};
389
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400390static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400391 {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400392 {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
393 {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
394};
395
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400396static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
397 /*
398 * rottop - 0xA8850
399 */
400 /* REGDMA */
401 { 0XA8850, 0, 0 },
402 { 0XA8850, 0, 1 },
403 { 0XA8850, 0, 2 },
404 { 0XA8850, 0, 3 },
405 { 0XA8850, 0, 4 },
406
407 /* ROT_WB */
408 { 0XA8850, 1, 0 },
409 { 0XA8850, 1, 1 },
410 { 0XA8850, 1, 2 },
411 { 0XA8850, 1, 3 },
412 { 0XA8850, 1, 4 },
413 { 0XA8850, 1, 5 },
414 { 0XA8850, 1, 6 },
415 { 0XA8850, 1, 7 },
416
417 /* UBWC_DEC */
418 { 0XA8850, 2, 0 },
419
420 /* UBWC_ENC */
421 { 0XA8850, 3, 0 },
422
423 /* ROT_FETCH_0 */
424 { 0XA8850, 4, 0 },
425 { 0XA8850, 4, 1 },
426 { 0XA8850, 4, 2 },
427 { 0XA8850, 4, 3 },
428 { 0XA8850, 4, 4 },
429 { 0XA8850, 4, 5 },
430 { 0XA8850, 4, 6 },
431 { 0XA8850, 4, 7 },
432
433 /* ROT_FETCH_1 */
434 { 0XA8850, 5, 0 },
435 { 0XA8850, 5, 1 },
436 { 0XA8850, 5, 2 },
437 { 0XA8850, 5, 3 },
438 { 0XA8850, 5, 4 },
439 { 0XA8850, 5, 5 },
440 { 0XA8850, 5, 6 },
441 { 0XA8850, 5, 7 },
442
443 /* ROT_FETCH_2 */
444 { 0XA8850, 6, 0 },
445 { 0XA8850, 6, 1 },
446 { 0XA8850, 6, 2 },
447 { 0XA8850, 6, 3 },
448 { 0XA8850, 6, 4 },
449 { 0XA8850, 6, 5 },
450 { 0XA8850, 6, 6 },
451 { 0XA8850, 6, 7 },
452
453 /* ROT_FETCH_3 */
454 { 0XA8850, 7, 0 },
455 { 0XA8850, 7, 1 },
456 { 0XA8850, 7, 2 },
457 { 0XA8850, 7, 3 },
458 { 0XA8850, 7, 4 },
459 { 0XA8850, 7, 5 },
460 { 0XA8850, 7, 6 },
461 { 0XA8850, 7, 7 },
462
463 /* ROT_FETCH_4 */
464 { 0XA8850, 8, 0 },
465 { 0XA8850, 8, 1 },
466 { 0XA8850, 8, 2 },
467 { 0XA8850, 8, 3 },
468 { 0XA8850, 8, 4 },
469 { 0XA8850, 8, 5 },
470 { 0XA8850, 8, 6 },
471 { 0XA8850, 8, 7 },
472
473 /* ROT_UNPACK_0*/
474 { 0XA8850, 9, 0 },
475 { 0XA8850, 9, 1 },
476 { 0XA8850, 9, 2 },
477 { 0XA8850, 9, 3 },
478};
479
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400480static struct sde_rot_regdump sde_rot_r3_regdump[] = {
481 { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
482 { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
483 { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
484 { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
485 SDE_ROT_REGDUMP_READ },
486 /*
487 * Need to perform a SW reset to REGDMA in order to access the
488 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
489 * REGDMA RAM should be dump at last.
490 */
491 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
492 SDE_ROT_REGDUMP_WRITE },
493 { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
494 SDE_ROT_REGDUMP_READ },
Benjamin Chan59a06052017-01-12 18:06:03 -0500495 { "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
496 SDE_ROT_REGDUMP_VBIF },
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400497};
498
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -0700499struct sde_rot_cdp_params {
500 bool enable;
501 struct sde_mdp_format_params *fmt;
502 u32 offset;
503};
504
Alan Kwong818b7fc2016-07-24 22:07:41 -0400505/* Invalid software timestamp value for initialization */
506#define SDE_REGDMA_SWTS_INVALID (~0)
507
508/**
509 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
510 * @ts_curr: current software timestamp
511 * @ts_prev: previous software timestamp
512 * @return: the amount ts_curr is ahead of ts_prev
513 */
514static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
515{
516 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
517
518 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
519}
520
521/**
522 * sde_hw_rotator_pending_swts - Check if the given context is still pending
523 * @rot: Pointer to hw rotator
524 * @ctx: Pointer to rotator context
525 * @pswts: Pointer to returned reference software timestamp, optional
526 * @return: true if context has pending requests
527 */
528static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
529 struct sde_hw_rotator_context *ctx, u32 *pswts)
530{
531 u32 swts;
532 int ts_diff;
533 bool pending;
534
535 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
536 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
537 else
538 swts = ctx->last_regdma_timestamp;
539
540 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
541 swts >>= SDE_REGDMA_SWTS_SHIFT;
542
543 swts &= SDE_REGDMA_SWTS_MASK;
544
545 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
546
547 if (pswts)
548 *pswts = swts;
549
550 pending = (ts_diff > 0) ? true : false;
551
552 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
553 ctx->timestamp, ctx->q_id, swts, pending);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -0400554 SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400555 return pending;
556}
557
558/**
Alan Kwong6bc64622017-02-04 17:36:03 -0800559 * sde_hw_rotator_update_swts - update software timestamp with given value
560 * @rot: Pointer to hw rotator
561 * @ctx: Pointer to rotator contxt
562 * @swts: new software timestamp
563 * @return: new combined swts
564 */
565static u32 sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
566 struct sde_hw_rotator_context *ctx, u32 swts)
567{
568 u32 mask = SDE_REGDMA_SWTS_MASK;
569
570 swts &= SDE_REGDMA_SWTS_MASK;
571 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY) {
572 swts <<= SDE_REGDMA_SWTS_SHIFT;
573 mask <<= SDE_REGDMA_SWTS_SHIFT;
574 }
575
576 swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
577 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
578
579 return swts;
580}
581
582/**
Alan Kwong818b7fc2016-07-24 22:07:41 -0400583 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
584 * Also, clear rotator/regdma irq status.
585 * @rot: Pointer to hw rotator
586 */
587static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
588{
589 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
590 atomic_read(&rot->irq_enabled));
591
592 if (!atomic_read(&rot->irq_enabled)) {
593 if (rot->mode == ROT_REGDMA_OFF)
594 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
595 ROT_DONE_MASK);
596 else
597 SDE_ROTREG_WRITE(rot->mdss_base,
598 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
599
600 enable_irq(rot->irq_num);
601 }
602 atomic_inc(&rot->irq_enabled);
603}
604
605/**
606 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
607 * Also, clear rotator/regdma irq enable masks.
608 * @rot: Pointer to hw rotator
609 */
610static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
611{
612 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
613 atomic_read(&rot->irq_enabled));
614
615 if (!atomic_read(&rot->irq_enabled)) {
616 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
617 return;
618 }
619
620 if (!atomic_dec_return(&rot->irq_enabled)) {
621 if (rot->mode == ROT_REGDMA_OFF)
622 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
623 else
624 SDE_ROTREG_WRITE(rot->mdss_base,
625 REGDMA_CSR_REGDMA_INT_EN, 0);
626 /* disable irq after last pending irq is handled, if any */
627 synchronize_irq(rot->irq_num);
628 disable_irq_nosync(rot->irq_num);
629 }
630}
631
632/**
633 * sde_hw_rotator_dump_status - Dump hw rotator status on error
634 * @rot: Pointer to hw rotator
635 */
636static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
637{
Benjamin Chan1b94f952017-01-23 17:42:30 -0500638 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
639
Alan Kwong818b7fc2016-07-24 22:07:41 -0400640 SDEROT_ERR(
641 "op_mode = %x, int_en = %x, int_status = %x\n",
642 SDE_ROTREG_READ(rot->mdss_base,
643 REGDMA_CSR_REGDMA_OP_MODE),
644 SDE_ROTREG_READ(rot->mdss_base,
645 REGDMA_CSR_REGDMA_INT_EN),
646 SDE_ROTREG_READ(rot->mdss_base,
647 REGDMA_CSR_REGDMA_INT_STATUS));
648
649 SDEROT_ERR(
650 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
651 SDE_ROTREG_READ(rot->mdss_base,
652 REGDMA_TIMESTAMP_REG),
653 SDE_ROTREG_READ(rot->mdss_base,
654 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
655 SDE_ROTREG_READ(rot->mdss_base,
656 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
657 SDE_ROTREG_READ(rot->mdss_base,
658 REGDMA_CSR_REGDMA_BLOCK_STATUS));
659
660 SDEROT_ERR(
661 "invalid_cmd_offset = %x, fsm_state = %x\n",
662 SDE_ROTREG_READ(rot->mdss_base,
663 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
664 SDE_ROTREG_READ(rot->mdss_base,
665 REGDMA_CSR_REGDMA_FSM_STATE));
Benjamin Chan59a06052017-01-12 18:06:03 -0500666
667 SDEROT_ERR(
668 "UBWC decode status = %x, UBWC encode status = %x\n",
669 SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS),
670 SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
Benjamin Chan1b94f952017-01-23 17:42:30 -0500671
672 SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
673 SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
674 SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
Alan Kwong6bc64622017-02-04 17:36:03 -0800675
676 SDEROT_ERR(
677 "sbuf_status_plane0 = %x, sbuf_status_plane1 = %x\n",
678 SDE_ROTREG_READ(rot->mdss_base,
679 ROT_WB_SBUF_STATUS_PLANE0),
680 SDE_ROTREG_READ(rot->mdss_base,
681 ROT_WB_SBUF_STATUS_PLANE1));
Alan Kwong818b7fc2016-07-24 22:07:41 -0400682}
683
Alan Kwong9487de22016-01-16 22:06:36 -0500684/**
685 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
686 * on provided session_id. Each rotator has a different session_id.
Clarence Ip9e6c3302017-06-02 11:02:57 -0400687 * @rot: Pointer to rotator hw
688 * @session_id: Identifier for rotator session
689 * @sequence_id: Identifier for rotation request within the session
690 * @q_id: Rotator queue identifier
Alan Kwong9487de22016-01-16 22:06:36 -0500691 */
692static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
Clarence Ip9e6c3302017-06-02 11:02:57 -0400693 struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
Alan Kwong9487de22016-01-16 22:06:36 -0500694 enum sde_rot_queue_prio q_id)
695{
696 int i;
697 struct sde_hw_rotator_context *ctx = NULL;
698
699 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
700 ctx = rot->rotCtx[q_id][i];
701
Clarence Ip9e6c3302017-06-02 11:02:57 -0400702 if (ctx && (ctx->session_id == session_id) &&
703 (ctx->sequence_id == sequence_id)) {
Alan Kwong9487de22016-01-16 22:06:36 -0500704 SDEROT_DBG(
Clarence Ip9e6c3302017-06-02 11:02:57 -0400705 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d | sequence-id:%d\n",
706 q_id, i, ctx, ctx->session_id,
707 ctx->sequence_id);
Alan Kwong9487de22016-01-16 22:06:36 -0500708 return ctx;
709 }
710 }
711
712 return NULL;
713}
714
715/*
716 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
717 * @dbgbuf: Pointer to debug buffer
718 * @buf: Pointer to layer buffer structure
719 * @data: Pointer to h/w mapped buffer structure
720 */
721static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
722 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
723{
724 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
725 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
726
727 dbgbuf->vaddr = NULL;
728 dbgbuf->width = buf->width;
729 dbgbuf->height = buf->height;
730
731 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
Alan Kwong6ce448d2016-11-24 18:45:20 -0800732 dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500733 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
734 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
735 dbgbuf->vaddr, dbgbuf->buflen,
736 dbgbuf->width, dbgbuf->height);
737 }
738}
739
740/*
741 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
742 * @dbgbuf: Pointer to debug buffer
743 */
744static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
745{
746 if (dbgbuf->vaddr) {
747 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
Alan Kwong6ce448d2016-11-24 18:45:20 -0800748 dma_buf_end_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500749 }
750
751 dbgbuf->vaddr = NULL;
752 dbgbuf->dmabuf = NULL;
753 dbgbuf->buflen = 0;
754 dbgbuf->width = 0;
755 dbgbuf->height = 0;
756}
757
758/*
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -0700759 * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
760 * levels, enable write gather enable and avoid clk gating setting for
761 * debug purpose.
762 *
763 * @rot: Pointer to rotator hw
764 */
765static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
766{
767 u32 i, mask, vbif_qos, reg_val = 0;
768 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
769
770 /* VBIF_ROT QoS remapper setting */
771 switch (mdata->npriority_lvl) {
772
773 case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
774 for (i = 0; i < mdata->npriority_lvl; i++) {
775 reg_val = SDE_VBIF_READ(mdata,
776 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
777 mask = 0x3 << (XIN_SSPP * 2);
778 vbif_qos = mdata->vbif_nrt_qos[i];
779 reg_val |= vbif_qos << (XIN_SSPP * 2);
780 /* ensure write is issued after the read operation */
781 mb();
782 SDE_VBIF_WRITE(mdata,
783 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
784 reg_val);
785 }
786 break;
787
788 case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
789 mask = mdata->npriority_lvl - 1;
790 for (i = 0; i < mdata->npriority_lvl; i++) {
791 /* RD and WR client */
792 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
793 << (XIN_SSPP * 4);
794 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
795 << (XIN_WRITEBACK * 4);
796
797 SDE_VBIF_WRITE(mdata,
798 MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
799 reg_val);
800 SDE_VBIF_WRITE(mdata,
801 MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
802 reg_val);
803 }
804 break;
805
806 default:
807 SDEROT_DBG("invalid vbif remapper levels\n");
808 }
809
810 /* Enable write gather for writeback to remove write gaps, which
811 * may hang AXI/BIMC/SDE.
812 */
813 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
814 BIT(XIN_WRITEBACK));
815
816 /*
817 * For debug purpose, disable clock gating, i.e. Clocks always on
818 */
819 if (mdata->clk_always_on) {
820 SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
821 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
822 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
823 0xFFFF);
824 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
825 }
826}
827
828/*
Alan Kwong9487de22016-01-16 22:06:36 -0500829 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
830 * @ctx: Pointer to rotator context
831 * @mask: Bit mask location of the timestamp
832 * @swts: Software timestamp
833 */
834static void sde_hw_rotator_setup_timestamp_packet(
835 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
836{
837 u32 *wrptr;
838
839 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
840
841 /*
842 * Create a dummy packet write out to 1 location for timestamp
843 * generation.
844 */
845 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
846 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
847 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
848 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
849 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
850 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
851 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
852 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
853 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
854 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
855 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
856 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
857 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
Benjamin Chan15c93d82016-08-29 10:04:22 -0400858 /*
859 * Must clear secure buffer setting for SW timestamp because
860 * SW timstamp buffer allocation is always non-secure region.
861 */
862 if (ctx->is_secure) {
863 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
864 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
865 }
Alan Kwong9487de22016-01-16 22:06:36 -0500866 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
867 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
868 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
869 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
870 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
871 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
872 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
873 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
874 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
Clarence Ip77cadd12017-06-19 17:51:46 -0400875 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
876 (ctx->rot->highest_bank & 0x3) << 8);
Alan Kwong9487de22016-01-16 22:06:36 -0500877 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
878 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
879 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
880 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
881
882 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
883}
884
885/*
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -0700886 * sde_hw_rotator_cdp_configs - configures the CDP registers
887 * @ctx: Pointer to rotator context
888 * @params: Pointer to parameters needed for CDP configs
889 */
890static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
891 struct sde_rot_cdp_params *params)
892{
893 int reg_val;
894 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
895
896 if (!params->enable) {
897 SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
898 goto end;
899 }
900
901 reg_val = BIT(0); /* enable cdp */
902
903 if (sde_mdp_is_ubwc_format(params->fmt))
904 reg_val |= BIT(1); /* enable UBWC meta cdp */
905
906 if (sde_mdp_is_ubwc_format(params->fmt)
907 || sde_mdp_is_tilea4x_format(params->fmt)
908 || sde_mdp_is_tilea5x_format(params->fmt))
909 reg_val |= BIT(2); /* enable tile amortize */
910
911 reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
912
913 SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
914
915end:
916 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
917}
918
919/*
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -0700920 * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
921 * for the WRITEBACK rotator for inline and offline rotation.
922 *
923 * @ctx: Pointer to rotator context
924 */
925static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
926{
927 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
928 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
929
930 /* Offline rotation setting */
931 if (!ctx->sbuf_mode) {
932 /* QOS LUT WR setting */
933 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
934 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
935 mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
936 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
937 mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
938 }
939
940 /* Danger LUT WR setting */
941 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
942 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
943 mdata->lut_cfg[SDE_ROT_WR].danger_lut);
944
945 /* Safe LUT WR setting */
946 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
947 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
948 mdata->lut_cfg[SDE_ROT_WR].safe_lut);
949
950 /* Inline rotation setting */
951 } else {
952 /* QOS LUT WR setting */
953 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
954 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
955 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
956 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
957 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
958 }
959
960 /* Danger LUT WR setting */
961 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
962 mdata->sde_inline_qos_map))
963 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
964 mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
965
966 /* Safe LUT WR setting */
967 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
968 mdata->sde_inline_qos_map))
969 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
970 mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
971 }
972
973 /* Update command queue write ptr */
974 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
975}
976
977/*
978 * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
979 * for the SSPP rotator for inline and offline rotation.
980 *
981 * @ctx: Pointer to rotator context
982 */
983static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
984{
985 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
986 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
987
988 /* Offline rotation setting */
989 if (!ctx->sbuf_mode) {
990 /* QOS LUT RD setting */
991 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
992 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
993 mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
994 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
995 mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
996 }
997
998 /* Danger LUT RD setting */
999 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
1000 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
1001 mdata->lut_cfg[SDE_ROT_RD].danger_lut);
1002
1003 /* Safe LUT RD setting */
1004 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
1005 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1006 mdata->lut_cfg[SDE_ROT_RD].safe_lut);
1007
1008 /* inline rotation setting */
1009 } else {
1010 /* QOS LUT RD setting */
1011 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
1012 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
1013 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
1014 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
1015 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
1016 }
1017
1018 /* Danger LUT RD setting */
1019 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
1020 mdata->sde_inline_qos_map))
1021 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
1022 mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
1023
1024 /* Safe LUT RD setting */
1025 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
1026 mdata->sde_inline_qos_map))
1027 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1028 mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
1029 }
1030
1031 /* Update command queue write ptr */
1032 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1033}
1034
1035/*
Alan Kwong9487de22016-01-16 22:06:36 -05001036 * sde_hw_rotator_setup_fetchengine - setup fetch engine
1037 * @ctx: Pointer to rotator context
1038 * @queue_id: Priority queue identifier
1039 * @cfg: Fetch configuration
1040 * @danger_lut: real-time QoS LUT for danger setting (not used)
1041 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001042 * @dnsc_factor_w: downscale factor for width
1043 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -05001044 * @flags: Control flag
1045 */
1046static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
1047 enum sde_rot_queue_prio queue_id,
1048 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001049 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -05001050{
1051 struct sde_hw_rotator *rot = ctx->rot;
1052 struct sde_mdp_format_params *fmt;
1053 struct sde_mdp_data *data;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001054 struct sde_rot_cdp_params cdp_params = {0};
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001055 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001056 u32 *wrptr;
1057 u32 opmode = 0;
1058 u32 chroma_samp = 0;
1059 u32 src_format = 0;
1060 u32 unpack = 0;
1061 u32 width = cfg->img_width;
1062 u32 height = cfg->img_height;
1063 u32 fetch_blocksize = 0;
1064 int i;
1065
1066 if (ctx->rot->mode == ROT_REGDMA_ON) {
1067 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_EN,
1068 REGDMA_INT_MASK);
1069 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
1070 REGDMA_EN);
1071 }
1072
1073 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1074
Alan Kwong5b4d71b2017-02-10 20:52:59 -08001075 /*
1076 * initialize start control trigger selection first
1077 */
1078 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
1079 if (ctx->sbuf_mode)
1080 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
1081 ctx->start_ctrl);
1082 else
1083 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
1084 }
1085
Alan Kwong9487de22016-01-16 22:06:36 -05001086 /* source image setup */
1087 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
1088 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
1089 for (i = 0; i < cfg->src_plane.num_planes; i++)
1090 cfg->src_plane.ystride[i] *= 2;
1091 width *= 2;
1092 height /= 2;
1093 }
1094
1095 /*
1096 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
1097 */
1098 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
1099
1100 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
1101 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1102 cfg->src_rect->w | (cfg->src_rect->h << 16));
1103 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
1104 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1105 cfg->src_rect->x | (cfg->src_rect->y << 16));
1106 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1107 cfg->src_rect->w | (cfg->src_rect->h << 16));
1108 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1109 cfg->src_rect->x | (cfg->src_rect->y << 16));
1110
1111 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
1112 data = cfg->data;
1113 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1114 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
1115 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
1116 (cfg->src_plane.ystride[1] << 16));
1117 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
1118 (cfg->src_plane.ystride[3] << 16));
1119
1120 /* UNUSED, write 0 */
1121 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1122
1123 /* setup source format */
1124 fmt = cfg->fmt;
1125
1126 chroma_samp = fmt->chroma_sample;
1127 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
1128 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
1129 chroma_samp = SDE_MDP_CHROMA_H1V2;
1130 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
1131 chroma_samp = SDE_MDP_CHROMA_H2V1;
1132 }
1133
1134 src_format = (chroma_samp << 23) |
1135 (fmt->fetch_planes << 19) |
1136 (fmt->bits[C3_ALPHA] << 6) |
1137 (fmt->bits[C2_R_Cr] << 4) |
1138 (fmt->bits[C1_B_Cb] << 2) |
1139 (fmt->bits[C0_G_Y] << 0);
1140
1141 if (fmt->alpha_enable &&
1142 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
1143 src_format |= BIT(8); /* SRCC3_EN */
1144
1145 src_format |= ((fmt->unpack_count - 1) << 12) |
1146 (fmt->unpack_tight << 17) |
1147 (fmt->unpack_align_msb << 18) |
1148 ((fmt->bpp - 1) << 9) |
1149 ((fmt->frame_format & 3) << 30);
1150
1151 if (flags & SDE_ROT_FLAG_ROT_90)
1152 src_format |= BIT(11); /* ROT90 */
1153
1154 if (sde_mdp_is_ubwc_format(fmt))
1155 opmode |= BIT(0); /* BWC_DEC_EN */
1156
1157 /* if this is YUV pixel format, enable CSC */
1158 if (sde_mdp_is_yuv_format(fmt))
1159 src_format |= BIT(15); /* SRC_COLOR_SPACE */
1160
1161 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1162 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
1163
Alan Kwong3bef26f2017-02-26 15:38:09 -08001164 if (rot->solid_fill)
1165 src_format |= BIT(22); /* SOLID_FILL */
1166
Alan Kwong9487de22016-01-16 22:06:36 -05001167 /* SRC_FORMAT */
1168 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
1169
1170 /* setup source unpack pattern */
1171 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1172 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1173
1174 /* SRC_UNPACK_PATTERN */
1175 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
1176
1177 /* setup source op mode */
1178 if (flags & SDE_ROT_FLAG_FLIP_LR)
1179 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
1180 if (flags & SDE_ROT_FLAG_FLIP_UD)
1181 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
1182 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
1183
1184 /* SRC_OP_MODE */
1185 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
1186
1187 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001188 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
1189 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
1190 if (sde_mdp_is_tp10_format(fmt))
1191 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
1192 else
1193 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
1194 } else {
1195 if (sde_mdp_is_tp10_format(fmt))
1196 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
1197 else
1198 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
1199 }
1200
Alan Kwong3bef26f2017-02-26 15:38:09 -08001201 if (rot->solid_fill)
1202 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
1203 rot->constant_color);
1204
Alan Kwong9487de22016-01-16 22:06:36 -05001205 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
1206 fetch_blocksize |
1207 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
1208 ((rot->highest_bank & 0x3) << 18));
1209
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001210 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1211 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(31) |
1212 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1213 ((ctx->rot->highest_bank & 0x3) << 4) |
1214 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1215
Alan Kwong9487de22016-01-16 22:06:36 -05001216 /* setup source buffer plane security status */
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001217 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1218 SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
Alan Kwong9487de22016-01-16 22:06:36 -05001219 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
1220 ctx->is_secure = true;
Benjamin Chan15c93d82016-08-29 10:04:22 -04001221 } else {
1222 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
1223 ctx->is_secure = false;
Alan Kwong9487de22016-01-16 22:06:36 -05001224 }
1225
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001226 /* Update command queue write ptr */
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001227 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1228
1229 /* CDP register RD setting */
1230 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1231 mdata->enable_cdp[SDE_ROT_RD] : false;
1232 cdp_params.fmt = fmt;
1233 cdp_params.offset = ROT_SSPP_CDP_CNTL;
1234 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1235
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001236 /* QOS LUT/ Danger LUT/ Safe Lut WR setting */
1237 sde_hw_rotator_setup_qos_lut_rd(ctx);
1238
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001239 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1240
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001241 /*
1242 * Determine if traffic shaping is required. Only enable traffic
1243 * shaping when content is 4k@30fps. The actual traffic shaping
1244 * bandwidth calculation is done in output setup.
1245 */
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001246 if (((!ctx->sbuf_mode)
1247 && (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
1248 && (cfg->fps <= 30)) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001249 SDEROT_DBG("Enable Traffic Shaper\n");
1250 ctx->is_traffic_shaping = true;
1251 } else {
1252 SDEROT_DBG("Disable Traffic Shaper\n");
1253 ctx->is_traffic_shaping = false;
1254 }
1255
Alan Kwong9487de22016-01-16 22:06:36 -05001256 /* Update command queue write ptr */
1257 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1258}
1259
1260/*
1261 * sde_hw_rotator_setup_wbengine - setup writeback engine
1262 * @ctx: Pointer to rotator context
1263 * @queue_id: Priority queue identifier
1264 * @cfg: Writeback configuration
1265 * @flags: Control flag
1266 */
1267static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
1268 enum sde_rot_queue_prio queue_id,
1269 struct sde_hw_rot_wb_cfg *cfg,
1270 u32 flags)
1271{
Alan Kwong6bc64622017-02-04 17:36:03 -08001272 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001273 struct sde_mdp_format_params *fmt;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001274 struct sde_rot_cdp_params cdp_params = {0};
Alan Kwong9487de22016-01-16 22:06:36 -05001275 u32 *wrptr;
1276 u32 pack = 0;
1277 u32 dst_format = 0;
Clarence Ip77cadd12017-06-19 17:51:46 -04001278 u32 no_partial_writes = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001279 int i;
1280
1281 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1282
1283 fmt = cfg->fmt;
1284
1285 /* setup WB DST format */
1286 dst_format |= (fmt->chroma_sample << 23) |
1287 (fmt->fetch_planes << 19) |
1288 (fmt->bits[C3_ALPHA] << 6) |
1289 (fmt->bits[C2_R_Cr] << 4) |
1290 (fmt->bits[C1_B_Cb] << 2) |
1291 (fmt->bits[C0_G_Y] << 0);
1292
1293 /* alpha control */
1294 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
1295 dst_format |= BIT(8);
1296 if (!fmt->alpha_enable) {
1297 dst_format |= BIT(14);
1298 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
1299 }
1300 }
1301
1302 dst_format |= ((fmt->unpack_count - 1) << 12) |
1303 (fmt->unpack_tight << 17) |
1304 (fmt->unpack_align_msb << 18) |
1305 ((fmt->bpp - 1) << 9) |
1306 ((fmt->frame_format & 3) << 30);
1307
1308 if (sde_mdp_is_yuv_format(fmt))
1309 dst_format |= BIT(15);
1310
1311 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1312 dst_format |= BIT(21); /* PACK_DX_FORMAT */
1313
1314 /*
1315 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
1316 */
1317 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
1318
1319 /* DST_FORMAT */
1320 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
1321
1322 /* DST_OP_MODE */
1323 if (sde_mdp_is_ubwc_format(fmt))
1324 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
1325 else
1326 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1327
1328 /* DST_PACK_PATTERN */
1329 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1330 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1331 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
1332
1333 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
1334 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1335 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
1336 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
1337 (cfg->dst_plane.ystride[1] << 16));
1338 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
1339 (cfg->dst_plane.ystride[3] << 16));
1340
1341 /* setup WB out image size and ROI */
1342 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
1343 cfg->img_width | (cfg->img_height << 16));
1344 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
1345 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
1346 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
1347 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
1348
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001349 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1350 SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
Benjamin Chan15c93d82016-08-29 10:04:22 -04001351 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
1352 else
1353 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
1354
Alan Kwong9487de22016-01-16 22:06:36 -05001355 /*
1356 * setup Downscale factor
1357 */
1358 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
1359 cfg->v_downscale_factor |
1360 (cfg->h_downscale_factor << 16));
1361
Clarence Ip22fed4c2017-05-16 15:30:51 -04001362 /* partial write check */
Clarence Ip77cadd12017-06-19 17:51:46 -04001363 if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
1364 no_partial_writes = BIT(10);
1365
1366 /*
1367 * For simplicity, don't disable partial writes if
1368 * the ROI does not span the entire width of the
1369 * output image, and require the total stride to
1370 * also be properly aligned.
1371 *
1372 * This avoids having to determine the memory access
1373 * alignment of the actual horizontal ROI on a per
1374 * color format basis.
1375 */
1376 if (sde_mdp_is_ubwc_format(fmt)) {
1377 no_partial_writes = 0x0;
1378 } else if (cfg->dst_rect->x ||
1379 cfg->dst_rect->w != cfg->img_width) {
1380 no_partial_writes = 0x0;
1381 } else {
1382 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1383 if (cfg->dst_plane.ystride[i] &
1384 PARTIAL_WRITE_ALIGNMENT)
1385 no_partial_writes = 0x0;
1386 }
1387 }
Clarence Ip22fed4c2017-05-16 15:30:51 -04001388
Alan Kwong6bc64622017-02-04 17:36:03 -08001389 /* write config setup for bank configuration */
Clarence Ip77cadd12017-06-19 17:51:46 -04001390 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
Alan Kwong9487de22016-01-16 22:06:36 -05001391 (ctx->rot->highest_bank & 0x3) << 8);
1392
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001393 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1394 SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
1395 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1396 ((ctx->rot->highest_bank & 0x3) << 4) |
1397 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1398
Alan Kwong6bc64622017-02-04 17:36:03 -08001399 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
1400 SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
1401 ctx->sys_cache_mode);
1402
1403 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
1404 (flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
Alan Kwong9487de22016-01-16 22:06:36 -05001405
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001406 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1407
1408 /* CDP register WR setting */
1409 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1410 mdata->enable_cdp[SDE_ROT_WR] : false;
1411 cdp_params.fmt = fmt;
1412 cdp_params.offset = ROT_WB_CDP_CNTL;
1413 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1414
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001415 /* QOS LUT/ Danger LUT/ Safe LUT WR setting */
1416 sde_hw_rotator_setup_qos_lut_wr(ctx);
1417
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001418 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1419
Alan Kwong498d59f2017-02-11 18:56:34 -08001420 /* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001421 if (!ctx->sbuf_mode &&
1422 (ctx->is_traffic_shaping || cfg->prefill_bw)) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001423 u32 bw;
1424
1425 /*
1426 * Target to finish in 12ms, and we need to set number of bytes
1427 * per clock tick for traffic shaping.
1428 * Each clock tick run @ 19.2MHz, so we need we know total of
1429 * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
1430 * Finally, calcualte the byte count per clock tick based on
1431 * resolution, bpp and compression ratio.
1432 */
1433 bw = cfg->dst_rect->w * cfg->dst_rect->h;
1434
1435 if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
1436 bw = (bw * 3) / 2;
1437 else
1438 bw *= fmt->bpp;
1439
1440 bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
Alan Kwong498d59f2017-02-11 18:56:34 -08001441
1442 /* use prefill bandwidth instead if specified */
1443 if (cfg->prefill_bw)
1444 bw = DIV_ROUND_UP(cfg->prefill_bw,
1445 TRAFFIC_SHAPE_VSYNC_CLK);
1446
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001447 if (bw > 0xFF)
1448 bw = 0xFF;
1449 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
Alan Kwong498d59f2017-02-11 18:56:34 -08001450 BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001451 SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
1452 } else {
1453 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
1454 SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
1455 }
1456
Alan Kwong9487de22016-01-16 22:06:36 -05001457 /* Update command queue write ptr */
1458 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1459}
1460
1461/*
1462 * sde_hw_rotator_start_no_regdma - start non-regdma operation
1463 * @ctx: Pointer to rotator context
1464 * @queue_id: Priority queue identifier
1465 */
1466static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
1467 enum sde_rot_queue_prio queue_id)
1468{
1469 struct sde_hw_rotator *rot = ctx->rot;
1470 u32 *wrptr;
1471 u32 *rdptr;
1472 u8 *addr;
1473 u32 mask;
1474 u32 blksize;
1475
1476 rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
1477 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1478
1479 if (rot->irq_num >= 0) {
1480 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
1481 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
1482 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001483 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001484 }
1485
Alan Kwong6bc64622017-02-04 17:36:03 -08001486 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
Alan Kwong9487de22016-01-16 22:06:36 -05001487
1488 /* Update command queue write ptr */
1489 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1490
1491 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
1492 /* Write all command stream to Rotator blocks */
1493 /* Rotator will start right away after command stream finish writing */
1494 while (rdptr < wrptr) {
1495 u32 op = REGDMA_OP_MASK & *rdptr;
1496
1497 switch (op) {
1498 case REGDMA_OP_NOP:
1499 SDEROT_DBG("NOP\n");
1500 rdptr++;
1501 break;
1502 case REGDMA_OP_REGWRITE:
1503 SDEROT_DBG("REGW %6.6x %8.8x\n",
1504 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1505 rdptr[1]);
1506 addr = rot->mdss_base +
1507 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1508 writel_relaxed(*rdptr++, addr);
1509 break;
1510 case REGDMA_OP_REGMODIFY:
1511 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
1512 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1513 rdptr[1], rdptr[2]);
1514 addr = rot->mdss_base +
1515 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1516 mask = *rdptr++;
1517 writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
1518 addr);
1519 break;
1520 case REGDMA_OP_BLKWRITE_SINGLE:
1521 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
1522 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1523 rdptr[1]);
1524 addr = rot->mdss_base +
1525 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1526 blksize = *rdptr++;
1527 while (blksize--) {
1528 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
1529 writel_relaxed(*rdptr++, addr);
1530 }
1531 break;
1532 case REGDMA_OP_BLKWRITE_INC:
1533 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
1534 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1535 rdptr[1]);
1536 addr = rot->mdss_base +
1537 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1538 blksize = *rdptr++;
1539 while (blksize--) {
1540 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
1541 writel_relaxed(*rdptr++, addr);
1542 addr += 4;
1543 }
1544 break;
1545 default:
1546 /* Other not supported OP mode
1547 * Skip data for now for unregonized OP mode
1548 */
1549 SDEROT_DBG("UNDEFINED\n");
1550 rdptr++;
1551 break;
1552 }
1553 }
1554 SDEROT_DBG("END %d\n", ctx->timestamp);
1555
1556 return ctx->timestamp;
1557}
1558
1559/*
1560 * sde_hw_rotator_start_regdma - start regdma operation
1561 * @ctx: Pointer to rotator context
1562 * @queue_id: Priority queue identifier
1563 */
1564static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
1565 enum sde_rot_queue_prio queue_id)
1566{
1567 struct sde_hw_rotator *rot = ctx->rot;
1568 u32 *wrptr;
1569 u32 regdmaSlot;
1570 u32 offset;
1571 long length;
1572 long ts_length;
1573 u32 enableInt;
1574 u32 swts = 0;
1575 u32 mask = 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08001576 u32 trig_sel;
Alan Kwong9487de22016-01-16 22:06:36 -05001577
1578 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1579
Alan Kwong9487de22016-01-16 22:06:36 -05001580 /*
1581 * Last ROT command must be ROT_START before REGDMA start
1582 */
Alan Kwong6bc64622017-02-04 17:36:03 -08001583 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
1584
Alan Kwong9487de22016-01-16 22:06:36 -05001585 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1586
1587 /*
1588 * Start REGDMA with command offset and size
1589 */
1590 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
1591 length = ((long)wrptr - (long)ctx->regdma_base) / 4;
1592 offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
1593 REGDMA_RAM_REGDMA_CMD_RAM));
1594 enableInt = ((ctx->timestamp & 1) + 1) << 30;
Alan Kwong6bc64622017-02-04 17:36:03 -08001595 trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
1596 REGDMA_CMD_TRIG_SEL_SW_START;
Alan Kwong9487de22016-01-16 22:06:36 -05001597
1598 SDEROT_DBG(
1599 "regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
1600 queue_id, regdmaSlot, enableInt, length, offset,
1601 ctx->timestamp);
1602
1603 /* ensure the command packet is issued before the submit command */
1604 wmb();
1605
1606 /* REGDMA submission for current context */
1607 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1608 SDE_ROTREG_WRITE(rot->mdss_base,
1609 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001610 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1611 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001612 swts = ctx->timestamp;
1613 mask = ~SDE_REGDMA_SWTS_MASK;
1614 } else {
1615 SDE_ROTREG_WRITE(rot->mdss_base,
1616 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001617 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1618 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001619 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
1620 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
1621 }
1622
Alan Kwong6bc64622017-02-04 17:36:03 -08001623 /* timestamp update can only be used in offline multi-context mode */
1624 if (!ctx->sbuf_mode) {
1625 /* Write timestamp after previous rotator job finished */
1626 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
1627 offset += length;
1628 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
1629 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
Alan Kwong9487de22016-01-16 22:06:36 -05001630
Alan Kwong6bc64622017-02-04 17:36:03 -08001631 /* ensure command packet is issue before the submit command */
1632 wmb();
Alan Kwong9487de22016-01-16 22:06:36 -05001633
Alan Kwong6bc64622017-02-04 17:36:03 -08001634 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1635 SDE_ROTREG_WRITE(rot->mdss_base,
1636 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
1637 enableInt | (ts_length << 14) | offset);
1638 } else {
1639 SDE_ROTREG_WRITE(rot->mdss_base,
1640 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
1641 enableInt | (ts_length << 14) | offset);
1642 }
Alan Kwong9487de22016-01-16 22:06:36 -05001643 }
1644
Alan Kwong9487de22016-01-16 22:06:36 -05001645 /* Update command queue write ptr */
1646 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1647
1648 return ctx->timestamp;
1649}
1650
1651/*
1652 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
1653 * @ctx: Pointer to rotator context
1654 * @queue_id: Priority queue identifier
1655 * @flags: Option flag
1656 */
1657static u32 sde_hw_rotator_wait_done_no_regdma(
1658 struct sde_hw_rotator_context *ctx,
1659 enum sde_rot_queue_prio queue_id, u32 flag)
1660{
1661 struct sde_hw_rotator *rot = ctx->rot;
1662 int rc = 0;
1663 u32 sts = 0;
1664 u32 status;
1665 unsigned long flags;
1666
1667 if (rot->irq_num >= 0) {
1668 SDEROT_DBG("Wait for Rotator completion\n");
1669 rc = wait_for_completion_timeout(&ctx->rot_comp,
Alan Kwong6bc64622017-02-04 17:36:03 -08001670 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001671
1672 spin_lock_irqsave(&rot->rotisr_lock, flags);
1673 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1674 if (rc == 0) {
1675 /*
1676 * Timeout, there might be error,
1677 * or rotator still busy
1678 */
1679 if (status & ROT_BUSY_BIT)
1680 SDEROT_ERR(
1681 "Timeout waiting for rotator done\n");
1682 else if (status & ROT_ERROR_BIT)
1683 SDEROT_ERR(
1684 "Rotator report error status\n");
1685 else
1686 SDEROT_WARN(
1687 "Timeout waiting, but rotator job is done!!\n");
1688
Alan Kwong818b7fc2016-07-24 22:07:41 -04001689 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001690 }
1691 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1692 } else {
1693 int cnt = 200;
1694
1695 do {
1696 udelay(500);
1697 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1698 cnt--;
1699 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
1700 && ((status & ROT_ERROR_BIT) == 0));
1701
1702 if (status & ROT_ERROR_BIT)
1703 SDEROT_ERR("Rotator error\n");
1704 else if (status & ROT_BUSY_BIT)
1705 SDEROT_ERR("Rotator busy\n");
1706
1707 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1708 ROT_DONE_CLEAR);
1709 }
1710
1711 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1712
1713 return sts;
1714}
1715
1716/*
1717 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
1718 * @ctx: Pointer to rotator context
1719 * @queue_id: Priority queue identifier
1720 * @flags: Option flag
1721 */
1722static u32 sde_hw_rotator_wait_done_regdma(
1723 struct sde_hw_rotator_context *ctx,
1724 enum sde_rot_queue_prio queue_id, u32 flag)
1725{
1726 struct sde_hw_rotator *rot = ctx->rot;
1727 int rc = 0;
1728 u32 status;
1729 u32 last_isr;
1730 u32 last_ts;
1731 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001732 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -05001733 u32 sts = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001734 unsigned long flags;
1735
1736 if (rot->irq_num >= 0) {
1737 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
1738 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001739 rc = wait_event_timeout(ctx->regdma_waitq,
1740 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Alan Kwong6bc64622017-02-04 17:36:03 -08001741 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001742
Benjamin Chane7ca72e2016-12-22 18:42:34 -05001743 ATRACE_INT("sde_rot_done", 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001744 spin_lock_irqsave(&rot->rotisr_lock, flags);
1745
1746 last_isr = ctx->last_regdma_isr_status;
1747 last_ts = ctx->last_regdma_timestamp;
1748 status = last_isr & REGDMA_INT_MASK;
1749 int_id = last_ts & 1;
1750 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
1751 status, int_id, last_ts);
1752
1753 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
Alan Kwong818b7fc2016-07-24 22:07:41 -04001754 bool pending;
1755
1756 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001757 SDEROT_ERR(
Alan Kwong818b7fc2016-07-24 22:07:41 -04001758 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
1759 ctx->timestamp, swts, pending);
Alan Kwong9487de22016-01-16 22:06:36 -05001760
1761 if (status & REGDMA_WATCHDOG_INT)
1762 SDEROT_ERR("REGDMA watchdog interrupt\n");
1763 else if (status & REGDMA_INVALID_DESCRIPTOR)
1764 SDEROT_ERR("REGDMA invalid descriptor\n");
1765 else if (status & REGDMA_INCOMPLETE_CMD)
1766 SDEROT_ERR("REGDMA incomplete command\n");
1767 else if (status & REGDMA_INVALID_CMD)
1768 SDEROT_ERR("REGDMA invalid command\n");
1769
Alan Kwong818b7fc2016-07-24 22:07:41 -04001770 sde_hw_rotator_dump_status(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001771 status = ROT_ERROR_BIT;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001772 } else {
1773 if (rc == 1)
1774 SDEROT_WARN(
1775 "REGDMA done but no irq, ts:0x%X/0x%X\n",
1776 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001777 status = 0;
1778 }
1779
Alan Kwong9487de22016-01-16 22:06:36 -05001780 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1781 } else {
1782 int cnt = 200;
Alan Kwongb0679602016-11-27 17:04:13 -08001783 bool pending;
Alan Kwong9487de22016-01-16 22:06:36 -05001784
1785 do {
1786 udelay(500);
Alan Kwongb0679602016-11-27 17:04:13 -08001787 last_isr = SDE_ROTREG_READ(rot->mdss_base,
1788 REGDMA_CSR_REGDMA_INT_STATUS);
1789 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001790 cnt--;
Alan Kwongb0679602016-11-27 17:04:13 -08001791 } while ((cnt > 0) && pending &&
1792 ((last_isr & REGDMA_INT_ERR_MASK) == 0));
Alan Kwong9487de22016-01-16 22:06:36 -05001793
Alan Kwongb0679602016-11-27 17:04:13 -08001794 if (last_isr & REGDMA_INT_ERR_MASK) {
1795 SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
1796 ctx->timestamp, swts, last_isr);
1797 sde_hw_rotator_dump_status(rot);
1798 status = ROT_ERROR_BIT;
1799 } else if (pending) {
1800 SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
1801 ctx->timestamp, swts, last_isr);
1802 sde_hw_rotator_dump_status(rot);
1803 status = ROT_ERROR_BIT;
1804 } else {
1805 status = 0;
1806 }
Alan Kwong9487de22016-01-16 22:06:36 -05001807
1808 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
Alan Kwongb0679602016-11-27 17:04:13 -08001809 last_isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001810 }
1811
1812 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1813
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001814 if (status & ROT_ERROR_BIT)
Benjamin Chan2d6411a2017-03-28 18:01:53 -04001815 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
1816 "vbif_dbg_bus", "panic");
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001817
Alan Kwong9487de22016-01-16 22:06:36 -05001818 return sts;
1819}
1820
1821/*
1822 * setup_rotator_ops - setup callback functions for the low-level HAL
1823 * @ops: Pointer to low-level ops callback
1824 * @mode: Operation mode (non-regdma or regdma)
1825 */
1826static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
1827 enum sde_rotator_regdma_mode mode)
1828{
1829 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
1830 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
1831 if (mode == ROT_REGDMA_ON) {
1832 ops->start_rotator = sde_hw_rotator_start_regdma;
1833 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
1834 } else {
1835 ops->start_rotator = sde_hw_rotator_start_no_regdma;
1836 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
1837 }
1838}
1839
1840/*
1841 * sde_hw_rotator_swts_create - create software timestamp buffer
1842 * @rot: Pointer to rotator hw
1843 *
1844 * This buffer is used by regdma to keep track of last completed command.
1845 */
1846static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
1847{
1848 int rc = 0;
1849 struct ion_handle *handle;
1850 struct sde_mdp_img_data *data;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001851 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001852 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
1853
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001854 rot->iclient = mdata->iclient;
Alan Kwong9487de22016-01-16 22:06:36 -05001855
1856 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
1857 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
1858 if (IS_ERR_OR_NULL(handle)) {
1859 SDEROT_ERR("ion memory allocation failed\n");
1860 return -ENOMEM;
1861 }
1862
1863 data = &rot->swts_buf;
1864 data->len = bufsize;
1865 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
1866 if (IS_ERR(data->srcp_dma_buf)) {
1867 SDEROT_ERR("ion_dma_buf setup failed\n");
1868 rc = -ENOMEM;
1869 goto imap_err;
1870 }
1871
1872 sde_smmu_ctrl(1);
1873
1874 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
1875 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
1876 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
1877 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
1878 rc = -ENOMEM;
1879 goto err_put;
1880 }
1881
1882 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
1883 DMA_BIDIRECTIONAL);
1884 if (IS_ERR_OR_NULL(data->srcp_table)) {
1885 SDEROT_ERR("dma_buf_map_attachment error\n");
1886 rc = -ENOMEM;
1887 goto err_detach;
1888 }
1889
1890 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
1891 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
1892 &data->len, DMA_BIDIRECTIONAL);
Alan Kwong6ce448d2016-11-24 18:45:20 -08001893 if (rc < 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05001894 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
1895 goto err_unmap;
1896 }
1897
Alan Kwong6ce448d2016-11-24 18:45:20 -08001898 dma_buf_begin_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05001899 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
1900 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
1901 SDEROT_ERR("ion kernel memory mapping failed\n");
1902 rc = IS_ERR(rot->swts_buffer);
1903 goto kmap_err;
1904 }
1905
1906 data->mapped = true;
1907 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
1908 data->len, rot->swts_buffer);
1909
1910 ion_free(rot->iclient, handle);
1911
1912 sde_smmu_ctrl(0);
1913
1914 return rc;
1915kmap_err:
1916 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1917 DMA_FROM_DEVICE, data->srcp_dma_buf);
1918err_unmap:
1919 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1920 DMA_FROM_DEVICE);
1921err_detach:
1922 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1923err_put:
1924 dma_buf_put(data->srcp_dma_buf);
1925 data->srcp_dma_buf = NULL;
1926imap_err:
1927 ion_free(rot->iclient, handle);
1928
1929 return rc;
1930}
1931
1932/*
1933 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
1934 * @rot: Pointer to rotator hw
1935 */
1936static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
1937{
1938 struct sde_mdp_img_data *data;
1939
1940 data = &rot->swts_buf;
1941
Alan Kwong6ce448d2016-11-24 18:45:20 -08001942 dma_buf_end_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05001943 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
1944
1945 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1946 DMA_FROM_DEVICE, data->srcp_dma_buf);
1947 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1948 DMA_FROM_DEVICE);
1949 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1950 dma_buf_put(data->srcp_dma_buf);
1951 data->srcp_dma_buf = NULL;
1952}
1953
1954/*
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001955 * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
1956 * PM event occurs
1957 * @mgr: Pointer to rotator manager
1958 * @pmon: Boolean indicate an on/off power event
1959 */
1960void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1961{
1962 struct sde_hw_rotator *rot;
1963 u32 l_ts, h_ts, swts, hwts;
1964 u32 rotsts, regdmasts;
1965
1966 /*
1967 * Check last HW timestamp with SW timestamp before power off event.
1968 * If there is a mismatch, that will be quite possible the rotator HW
1969 * is either hang or not finishing last submitted job. In that case,
1970 * it is best to do a timeout eventlog to capture some good events
1971 * log data for analysis.
1972 */
1973 if (!pmon && mgr && mgr->hw_data) {
1974 rot = mgr->hw_data;
1975 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1976 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1977
1978 /* contruct the combined timstamp */
1979 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1980 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1981 SDE_REGDMA_SWTS_SHIFT);
1982
1983 /* Need to turn on clock to access rotator register */
1984 sde_rotator_clk_ctrl(mgr, true);
1985 hwts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1986 regdmasts = SDE_ROTREG_READ(rot->mdss_base,
1987 REGDMA_CSR_REGDMA_BLOCK_STATUS);
1988 rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1989
1990 SDEROT_DBG(
1991 "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1992 swts, hwts, regdmasts, rotsts);
1993 SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
1994
1995 if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
1996 (rotsts & ROT_STATUS_MASK))) {
1997 SDEROT_ERR(
1998 "Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1999 swts, hwts, regdmasts, rotsts);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002000 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
2001 "vbif_dbg_bus", "panic");
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002002 }
2003
2004 /* Turn off rotator clock after checking rotator registers */
2005 sde_rotator_clk_ctrl(mgr, false);
2006 }
2007}
2008
2009/*
2010 * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
2011 * PM event occurs
2012 * @mgr: Pointer to rotator manager
2013 * @pmon: Boolean indicate an on/off power event
2014 */
2015void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
2016{
2017 struct sde_hw_rotator *rot;
2018 u32 l_ts, h_ts, swts;
2019
2020 /*
2021 * After a power on event, the rotator HW is reset to default setting.
2022 * It is necessary to synchronize the SW timestamp with the HW.
2023 */
2024 if (pmon && mgr && mgr->hw_data) {
2025 rot = mgr->hw_data;
2026 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2027 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2028
2029 /* contruct the combined timstamp */
2030 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2031 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2032 SDE_REGDMA_SWTS_SHIFT);
2033
2034 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2035 swts, h_ts, l_ts);
2036 SDEROT_EVTLOG(swts, h_ts, l_ts);
2037 rot->reset_hw_ts = true;
2038 rot->last_hw_ts = swts;
2039 }
2040}
2041
2042/*
Alan Kwong9487de22016-01-16 22:06:36 -05002043 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
2044 * @mgr: Pointer to rotator manager
2045 */
2046static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
2047{
2048 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2049 struct sde_hw_rotator *rot;
2050
2051 if (!mgr || !mgr->pdev || !mgr->hw_data) {
2052 SDEROT_ERR("null parameters\n");
2053 return;
2054 }
2055
2056 rot = mgr->hw_data;
2057 if (rot->irq_num >= 0)
2058 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2059
2060 if (rot->mode == ROT_REGDMA_ON)
2061 sde_hw_rotator_swtc_destroy(rot);
2062
2063 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2064 mgr->hw_data = NULL;
2065}
2066
2067/*
2068 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
2069 * @mgr: Pointer to rotator manager
2070 * @pipe_id: pipe identifier (not used)
2071 * @wb_id: writeback identifier/priority queue identifier
2072 *
2073 * This function allocates a new hw rotator resource for the given priority.
2074 */
2075static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
2076 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
2077{
2078 struct sde_hw_rotator_resource_info *resinfo;
2079
2080 if (!mgr || !mgr->hw_data) {
2081 SDEROT_ERR("null parameters\n");
2082 return NULL;
2083 }
2084
2085 /*
2086 * Allocate rotator resource info. Each allocation is per
2087 * HW priority queue
2088 */
2089 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
2090 if (!resinfo) {
2091 SDEROT_ERR("Failed allocation HW rotator resource info\n");
2092 return NULL;
2093 }
2094
2095 resinfo->rot = mgr->hw_data;
2096 resinfo->hw.wb_id = wb_id;
2097 atomic_set(&resinfo->hw.num_active, 0);
2098 init_waitqueue_head(&resinfo->hw.wait_queue);
2099
2100 /* For non-regdma, only support one active session */
2101 if (resinfo->rot->mode == ROT_REGDMA_OFF)
2102 resinfo->hw.max_active = 1;
2103 else {
2104 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
2105
2106 if (resinfo->rot->iclient == NULL)
2107 sde_hw_rotator_swts_create(resinfo->rot);
2108 }
2109
Alan Kwongf987ea32016-07-06 12:11:44 -04002110 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002111 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002112
Alan Kwong9487de22016-01-16 22:06:36 -05002113 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
2114 resinfo, wb_id);
2115
2116 return &resinfo->hw;
2117}
2118
2119/*
2120 * sde_hw_rotator_free_ext - free the given rotator resource
2121 * @mgr: Pointer to rotator manager
2122 * @hw: Pointer to rotator resource
2123 */
2124static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
2125 struct sde_rot_hw_resource *hw)
2126{
2127 struct sde_hw_rotator_resource_info *resinfo;
2128
2129 if (!mgr || !mgr->hw_data)
2130 return;
2131
2132 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2133
2134 SDEROT_DBG(
2135 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
2136 resinfo, hw->wb_id, atomic_read(&hw->num_active),
2137 hw->pending_count);
2138
Alan Kwongf987ea32016-07-06 12:11:44 -04002139 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002140 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002141
Alan Kwong9487de22016-01-16 22:06:36 -05002142 devm_kfree(&mgr->pdev->dev, resinfo);
2143}
2144
2145/*
2146 * sde_hw_rotator_alloc_rotctx - allocate rotator context
2147 * @rot: Pointer to rotator hw
2148 * @hw: Pointer to rotator resource
2149 * @session_id: Session identifier of this context
Clarence Ip9e6c3302017-06-02 11:02:57 -04002150 * @sequence_id: Sequence identifier of this request
Alan Kwong6bc64622017-02-04 17:36:03 -08002151 * @sbuf_mode: true if stream buffer is requested
Alan Kwong9487de22016-01-16 22:06:36 -05002152 *
2153 * This function allocates a new rotator context for the given session id.
2154 */
2155static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
2156 struct sde_hw_rotator *rot,
2157 struct sde_rot_hw_resource *hw,
Alan Kwong6bc64622017-02-04 17:36:03 -08002158 u32 session_id,
Clarence Ip9e6c3302017-06-02 11:02:57 -04002159 u32 sequence_id,
Alan Kwong6bc64622017-02-04 17:36:03 -08002160 bool sbuf_mode)
Alan Kwong9487de22016-01-16 22:06:36 -05002161{
2162 struct sde_hw_rotator_context *ctx;
2163
2164 /* Allocate rotator context */
2165 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
2166 if (!ctx) {
2167 SDEROT_ERR("Failed allocation HW rotator context\n");
2168 return NULL;
2169 }
2170
2171 ctx->rot = rot;
2172 ctx->q_id = hw->wb_id;
2173 ctx->session_id = session_id;
Clarence Ip9e6c3302017-06-02 11:02:57 -04002174 ctx->sequence_id = sequence_id;
Alan Kwong9487de22016-01-16 22:06:36 -05002175 ctx->hwres = hw;
2176 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
2177 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
2178 ctx->is_secure = false;
Alan Kwong6bc64622017-02-04 17:36:03 -08002179 ctx->sbuf_mode = sbuf_mode;
2180 INIT_LIST_HEAD(&ctx->list);
Alan Kwong9487de22016-01-16 22:06:36 -05002181
2182 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
2183 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
2184 ctx->regdma_wrptr = ctx->regdma_base;
2185 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
2186 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
2187 sde_hw_rotator_get_regdma_ctxidx(ctx));
2188
Alan Kwong818b7fc2016-07-24 22:07:41 -04002189 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
2190
Alan Kwong9487de22016-01-16 22:06:36 -05002191 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002192 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002193
2194 /* Store rotator context for lookup purpose */
2195 sde_hw_rotator_put_ctx(ctx);
2196
2197 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002198 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002199 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2200 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002201 atomic_read(&ctx->hwres->num_active),
2202 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002203
2204 return ctx;
2205}
2206
2207/*
2208 * sde_hw_rotator_free_rotctx - free the given rotator context
2209 * @rot: Pointer to rotator hw
2210 * @ctx: Pointer to rotator context
2211 */
2212static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
2213 struct sde_hw_rotator_context *ctx)
2214{
2215 if (!rot || !ctx)
2216 return;
2217
2218 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002219 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002220 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2221 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002222 atomic_read(&ctx->hwres->num_active),
2223 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002224
Benjamin Chanc3e185f2016-11-08 21:48:21 -05002225 /* Clear rotator context from lookup purpose */
2226 sde_hw_rotator_clr_ctx(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05002227
2228 devm_kfree(&rot->pdev->dev, ctx);
2229}
2230
2231/*
2232 * sde_hw_rotator_config - configure hw for the given rotation entry
2233 * @hw: Pointer to rotator resource
2234 * @entry: Pointer to rotation entry
2235 *
2236 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
2237 * based on the given rotation entry.
2238 */
2239static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
2240 struct sde_rot_entry *entry)
2241{
2242 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2243 struct sde_hw_rotator *rot;
2244 struct sde_hw_rotator_resource_info *resinfo;
2245 struct sde_hw_rotator_context *ctx;
2246 struct sde_hw_rot_sspp_cfg sspp_cfg;
2247 struct sde_hw_rot_wb_cfg wb_cfg;
2248 u32 danger_lut = 0; /* applicable for realtime client only */
2249 u32 safe_lut = 0; /* applicable for realtime client only */
2250 u32 flags = 0;
Benjamin Chana9dd3052017-02-14 17:39:32 -05002251 u32 rststs = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05002252 struct sde_rotation_item *item;
Alan Kwong6bc64622017-02-04 17:36:03 -08002253 int ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002254
2255 if (!hw || !entry) {
2256 SDEROT_ERR("null hw resource/entry\n");
2257 return -EINVAL;
2258 }
2259
2260 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2261 rot = resinfo->rot;
2262 item = &entry->item;
2263
Alan Kwong6bc64622017-02-04 17:36:03 -08002264 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
Clarence Ip9e6c3302017-06-02 11:02:57 -04002265 item->sequence_id, item->output.sbuf);
Alan Kwong9487de22016-01-16 22:06:36 -05002266 if (!ctx) {
2267 SDEROT_ERR("Failed allocating rotator context!!\n");
2268 return -EINVAL;
2269 }
2270
Alan Kwong6bc64622017-02-04 17:36:03 -08002271 /* save entry for debugging purposes */
2272 ctx->last_entry = entry;
2273
2274 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
2275 if (entry->dst_buf.sbuf) {
2276 u32 op_mode;
2277
2278 if (entry->item.trigger ==
2279 SDE_ROTATOR_TRIGGER_COMMAND)
2280 ctx->start_ctrl = (rot->cmd_trigger << 4);
2281 else if (entry->item.trigger ==
2282 SDE_ROTATOR_TRIGGER_VIDEO)
2283 ctx->start_ctrl = (rot->vid_trigger << 4);
2284 else
2285 ctx->start_ctrl = 0;
2286
2287 ctx->sys_cache_mode = BIT(15) |
2288 ((item->output.scid & 0x1f) << 8) |
2289 (item->output.writeback ? 0x5 : 0);
2290
2291 ctx->op_mode = BIT(4) |
2292 ((ctx->rot->sbuf_headroom & 0xff) << 8);
2293
2294 /* detect transition to inline mode */
2295 op_mode = (SDE_ROTREG_READ(rot->mdss_base,
2296 ROTTOP_OP_MODE) >> 4) & 0x3;
2297 if (!op_mode) {
2298 u32 status;
2299
2300 status = SDE_ROTREG_READ(rot->mdss_base,
2301 ROTTOP_STATUS);
2302 if (status & BIT(0)) {
2303 SDEROT_ERR("rotator busy 0x%x\n",
2304 status);
2305 sde_hw_rotator_dump_status(rot);
2306 SDEROT_EVTLOG_TOUT_HANDLER("rot",
2307 "vbif_dbg_bus",
2308 "panic");
2309 }
2310 }
2311
2312 } else {
2313 ctx->start_ctrl = BIT(0);
2314 ctx->sys_cache_mode = 0;
2315 ctx->op_mode = 0;
2316 }
2317 } else {
2318 ctx->start_ctrl = BIT(0);
2319 }
2320
2321 SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
2322
Benjamin Chana9dd3052017-02-14 17:39:32 -05002323 /*
2324 * if Rotator HW is reset, but missing PM event notification, we
2325 * need to init the SW timestamp automatically.
2326 */
2327 rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
2328 if (!rot->reset_hw_ts && rststs) {
2329 u32 l_ts, h_ts, swts;
2330
2331 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2332 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2333 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2334 SDEROT_EVTLOG(0xbad0, rststs, swts, h_ts, l_ts);
2335
2336 if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY)
2337 h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
2338 else
2339 l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
2340
2341 /* construct the combined timstamp */
2342 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2343 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2344 SDE_REGDMA_SWTS_SHIFT);
2345
2346 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2347 swts, h_ts, l_ts);
2348 SDEROT_EVTLOG(0x900d, swts, h_ts, l_ts);
2349 rot->last_hw_ts = swts;
2350
2351 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2352 rot->last_hw_ts);
2353 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
2354 /* ensure write is issued to the rotator HW */
2355 wmb();
2356 }
2357
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002358 if (rot->reset_hw_ts) {
2359 SDEROT_EVTLOG(rot->last_hw_ts);
2360 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2361 rot->last_hw_ts);
Benjamin Chana9dd3052017-02-14 17:39:32 -05002362 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002363 /* ensure write is issued to the rotator HW */
2364 wmb();
2365 rot->reset_hw_ts = false;
2366 }
2367
Alan Kwong9487de22016-01-16 22:06:36 -05002368 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
2369 SDE_ROT_FLAG_FLIP_LR : 0;
2370 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
2371 SDE_ROT_FLAG_FLIP_UD : 0;
2372 flags |= (item->flags & SDE_ROTATION_90) ?
2373 SDE_ROT_FLAG_ROT_90 : 0;
2374 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
2375 SDE_ROT_FLAG_DEINTERLACE : 0;
2376 flags |= (item->flags & SDE_ROTATION_SECURE) ?
2377 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002378 flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
2379 SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
2380
Alan Kwong9487de22016-01-16 22:06:36 -05002381
2382 sspp_cfg.img_width = item->input.width;
2383 sspp_cfg.img_height = item->input.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002384 sspp_cfg.fps = entry->perf->config.frame_rate;
2385 sspp_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002386 sspp_cfg.fmt = sde_get_format_params(item->input.format);
2387 if (!sspp_cfg.fmt) {
2388 SDEROT_ERR("null format\n");
Alan Kwong6bc64622017-02-04 17:36:03 -08002389 ret = -EINVAL;
2390 goto error;
Alan Kwong9487de22016-01-16 22:06:36 -05002391 }
2392 sspp_cfg.src_rect = &item->src_rect;
2393 sspp_cfg.data = &entry->src_buf;
2394 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
2395 item->input.height, &sspp_cfg.src_plane,
2396 0, /* No bwc_mode */
2397 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
2398 true : false);
2399
2400 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002401 &sspp_cfg, danger_lut, safe_lut,
2402 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05002403
2404 wb_cfg.img_width = item->output.width;
2405 wb_cfg.img_height = item->output.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002406 wb_cfg.fps = entry->perf->config.frame_rate;
2407 wb_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002408 wb_cfg.fmt = sde_get_format_params(item->output.format);
2409 wb_cfg.dst_rect = &item->dst_rect;
2410 wb_cfg.data = &entry->dst_buf;
2411 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
2412 item->output.height, &wb_cfg.dst_plane,
2413 0, /* No bwc_mode */
2414 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
2415
2416 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
2417 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
Alan Kwong498d59f2017-02-11 18:56:34 -08002418 wb_cfg.prefill_bw = item->prefill_bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002419
2420 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
2421
2422 /* setup VA mapping for debugfs */
2423 if (rot->dbgmem) {
2424 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
2425 &item->input,
2426 &entry->src_buf);
2427
2428 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
2429 &item->output,
2430 &entry->dst_buf);
2431 }
2432
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002433 SDEROT_EVTLOG(ctx->timestamp, flags,
2434 item->input.width, item->input.height,
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002435 item->output.width, item->output.height,
Benjamin Chan59a06052017-01-12 18:06:03 -05002436 entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
Benjamin Chan1b94f952017-01-23 17:42:30 -05002437 item->input.format, item->output.format,
2438 entry->perf->config.frame_rate);
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002439
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002440 if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002441 struct sde_mdp_set_ot_params ot_params;
2442
2443 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2444 ot_params.xin_id = XIN_SSPP;
2445 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002446 ot_params.width = entry->perf->config.input.width;
2447 ot_params.height = entry->perf->config.input.height;
2448 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002449 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
2450 ot_params.reg_off_mdp_clk_ctrl =
2451 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2452 ot_params.bit_off_mdp_clk_ctrl =
2453 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002454 ot_params.fmt = ctx->is_traffic_shaping ?
2455 SDE_PIX_FMT_ABGR_8888 :
2456 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002457 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2458 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002459 sde_mdp_set_ot_limit(&ot_params);
2460 }
2461
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002462 if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002463 struct sde_mdp_set_ot_params ot_params;
2464
2465 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2466 ot_params.xin_id = XIN_WRITEBACK;
2467 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002468 ot_params.width = entry->perf->config.input.width;
2469 ot_params.height = entry->perf->config.input.height;
2470 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002471 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
2472 ot_params.reg_off_mdp_clk_ctrl =
2473 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2474 ot_params.bit_off_mdp_clk_ctrl =
2475 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002476 ot_params.fmt = ctx->is_traffic_shaping ?
2477 SDE_PIX_FMT_ABGR_8888 :
2478 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002479 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2480 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002481 sde_mdp_set_ot_limit(&ot_params);
2482 }
2483
2484 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
2485 u32 qos_lut = 0; /* low priority for nrt read client */
2486
2487 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
2488 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
2489
2490 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
2491 }
2492
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -07002493 /* VBIF QoS and other settings */
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002494 if (!ctx->sbuf_mode)
2495 sde_hw_rotator_vbif_setting(rot);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002496
Alan Kwong9487de22016-01-16 22:06:36 -05002497 return 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08002498
2499error:
2500 sde_hw_rotator_free_rotctx(rot, ctx);
2501 return ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002502}
2503
2504/*
2505 * sde_hw_rotator_kickoff - kickoff processing on the given entry
2506 * @hw: Pointer to rotator resource
2507 * @entry: Pointer to rotation entry
2508 */
2509static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
2510 struct sde_rot_entry *entry)
2511{
2512 struct sde_hw_rotator *rot;
2513 struct sde_hw_rotator_resource_info *resinfo;
2514 struct sde_hw_rotator_context *ctx;
Alan Kwong9487de22016-01-16 22:06:36 -05002515
2516 if (!hw || !entry) {
2517 SDEROT_ERR("null hw resource/entry\n");
2518 return -EINVAL;
2519 }
2520
2521 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2522 rot = resinfo->rot;
2523
2524 /* Lookup rotator context from session-id */
Clarence Ip9e6c3302017-06-02 11:02:57 -04002525 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2526 entry->item.sequence_id, hw->wb_id);
Alan Kwong9487de22016-01-16 22:06:36 -05002527 if (!ctx) {
2528 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2529 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002530 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002531 }
Alan Kwong9487de22016-01-16 22:06:36 -05002532
Alan Kwong9487de22016-01-16 22:06:36 -05002533 rot->ops.start_rotator(ctx, ctx->q_id);
2534
2535 return 0;
2536}
2537
2538/*
2539 * sde_hw_rotator_wait4done - wait for completion notification
2540 * @hw: Pointer to rotator resource
2541 * @entry: Pointer to rotation entry
2542 *
2543 * This function blocks until the given entry is complete, error
2544 * is detected, or timeout.
2545 */
2546static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
2547 struct sde_rot_entry *entry)
2548{
2549 struct sde_hw_rotator *rot;
2550 struct sde_hw_rotator_resource_info *resinfo;
2551 struct sde_hw_rotator_context *ctx;
2552 int ret;
2553
2554 if (!hw || !entry) {
2555 SDEROT_ERR("null hw resource/entry\n");
2556 return -EINVAL;
2557 }
2558
2559 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2560 rot = resinfo->rot;
2561
2562 /* Lookup rotator context from session-id */
Clarence Ip9e6c3302017-06-02 11:02:57 -04002563 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2564 entry->item.sequence_id, hw->wb_id);
Alan Kwong9487de22016-01-16 22:06:36 -05002565 if (!ctx) {
2566 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2567 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002568 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002569 }
Alan Kwong9487de22016-01-16 22:06:36 -05002570
2571 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
2572
Alan Kwong9487de22016-01-16 22:06:36 -05002573 if (rot->dbgmem) {
2574 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
2575 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
2576 }
2577
2578 /* Current rotator context job is finished, time to free up*/
2579 sde_hw_rotator_free_rotctx(rot, ctx);
2580
2581 return ret;
2582}
2583
2584/*
2585 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
2586 * @rot: Pointer to hw rotator
2587 *
2588 * This function initializes feature and/or capability bitmask based on
2589 * h/w version read from the device.
2590 */
2591static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
2592{
2593 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2594 u32 hw_version;
2595
2596 if (!mdata) {
2597 SDEROT_ERR("null rotator data\n");
2598 return -EINVAL;
2599 }
2600
2601 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
2602 SDEROT_DBG("hw version %8.8x\n", hw_version);
2603
2604 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
2605 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
Alan Kwong9487de22016-01-16 22:06:36 -05002606 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
2607 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
2608 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
2609
2610 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
2611
Alan Kwong6bc64622017-02-04 17:36:03 -08002612 /* features exposed via rotator top h/w version */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002613 if (hw_version != SDE_ROT_TYPE_V1_0) {
2614 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
2615 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
2616 }
2617
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002618 set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
2619
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002620 mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
2621 mdata->nrt_vbif_dbg_bus_size =
2622 ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
2623
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002624 mdata->rot_dbg_bus = rot_dbgbus_r3;
2625 mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
2626
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002627 mdata->regdump = sde_rot_r3_regdump;
2628 mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002629 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
Alan Kwong6bc64622017-02-04 17:36:03 -08002630
2631 /* features exposed via mdss h/w version */
2632 if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400)) {
2633 SDEROT_DBG("Supporting sys cache inline rotation\n");
2634 set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
Alan Kwongfb8eeb22017-02-06 15:00:03 -08002635 set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
Clarence Ip22fed4c2017-05-16 15:30:51 -04002636 set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
Alan Kwong6bc64622017-02-04 17:36:03 -08002637 rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
2638 rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
2639 rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
2640 rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
2641 rot->downscale_caps =
2642 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
2643 } else {
2644 rot->inpixfmts = sde_hw_rotator_v3_inpixfmts;
2645 rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
2646 rot->outpixfmts = sde_hw_rotator_v3_outpixfmts;
2647 rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
2648 rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
2649 "LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
2650 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
2651 }
2652
Alan Kwong9487de22016-01-16 22:06:36 -05002653 return 0;
2654}
2655
2656/*
2657 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
2658 * @irq: Interrupt number
2659 * @ptr: Pointer to private handle provided during registration
2660 *
2661 * This function services rotator interrupt and wakes up waiting client
2662 * with pending rotation requests already submitted to h/w.
2663 */
2664static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
2665{
2666 struct sde_hw_rotator *rot = ptr;
2667 struct sde_hw_rotator_context *ctx;
2668 irqreturn_t ret = IRQ_NONE;
2669 u32 isr;
2670
2671 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
2672
2673 SDEROT_DBG("intr_status = %8.8x\n", isr);
2674
2675 if (isr & ROT_DONE_MASK) {
2676 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002677 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05002678 SDEROT_DBG("Notify rotator complete\n");
2679
2680 /* Normal rotator only 1 session, no need to lookup */
2681 ctx = rot->rotCtx[0][0];
2682 WARN_ON(ctx == NULL);
2683 complete_all(&ctx->rot_comp);
2684
2685 spin_lock(&rot->rotisr_lock);
2686 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
2687 ROT_DONE_CLEAR);
2688 spin_unlock(&rot->rotisr_lock);
2689 ret = IRQ_HANDLED;
2690 }
2691
2692 return ret;
2693}
2694
2695/*
2696 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
2697 * @irq: Interrupt number
2698 * @ptr: Pointer to private handle provided during registration
2699 *
2700 * This function services rotator interrupt, decoding the source of
2701 * events (high/low priority queue), and wakes up all waiting clients
2702 * with pending rotation requests already submitted to h/w.
2703 */
2704static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
2705{
2706 struct sde_hw_rotator *rot = ptr;
Clarence Ip96854c2db12017-06-12 14:32:26 -04002707 struct sde_hw_rotator_context *ctx, *tmp;
Alan Kwong9487de22016-01-16 22:06:36 -05002708 irqreturn_t ret = IRQ_NONE;
Clarence Ip96854c2db12017-06-12 14:32:26 -04002709 u32 isr, isr_tmp;
Alan Kwong9487de22016-01-16 22:06:36 -05002710 u32 ts;
2711 u32 q_id;
2712
2713 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002714 /* acknowledge interrupt before reading latest timestamp */
2715 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05002716 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2717
2718 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
2719
2720 /* Any REGDMA status, including error and watchdog timer, should
2721 * trigger and wake up waiting thread
2722 */
2723 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
2724 spin_lock(&rot->rotisr_lock);
2725
2726 /*
2727 * Obtain rotator context based on timestamp from regdma
2728 * and low/high interrupt status
2729 */
2730 if (isr & REGDMA_INT_HIGH_MASK) {
2731 q_id = ROT_QUEUE_HIGH_PRIORITY;
2732 ts = ts & SDE_REGDMA_SWTS_MASK;
2733 } else if (isr & REGDMA_INT_LOW_MASK) {
2734 q_id = ROT_QUEUE_LOW_PRIORITY;
2735 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
2736 SDE_REGDMA_SWTS_MASK;
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002737 } else {
2738 SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
2739 goto done_isr_handle;
Alan Kwong9487de22016-01-16 22:06:36 -05002740 }
Alan Kwong6bc64622017-02-04 17:36:03 -08002741
2742 /*
2743 * Timestamp packet is not available in sbuf mode.
2744 * Simulate timestamp update in the handler instead.
2745 */
Clarence Ip96854c2db12017-06-12 14:32:26 -04002746 if (list_empty(&rot->sbuf_ctx[q_id]))
2747 goto skip_sbuf;
2748
2749 ctx = NULL;
2750 isr_tmp = isr;
2751 list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) {
2752 u32 mask;
2753
2754 mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK :
2755 REGDMA_INT_0_MASK;
2756 if (isr_tmp & mask) {
2757 isr_tmp &= ~mask;
2758 ctx = tmp;
Alan Kwong6bc64622017-02-04 17:36:03 -08002759 ts = ctx->timestamp;
2760 sde_hw_rotator_update_swts(rot, ctx, ts);
2761 SDEROT_DBG("update swts:0x%X\n", ts);
Alan Kwong6bc64622017-02-04 17:36:03 -08002762 }
Clarence Ip96854c2db12017-06-12 14:32:26 -04002763 SDEROT_EVTLOG(isr, tmp->timestamp);
Alan Kwong6bc64622017-02-04 17:36:03 -08002764 }
Clarence Ip96854c2db12017-06-12 14:32:26 -04002765 if (ctx == NULL)
2766 SDEROT_ERR("invalid swts ctx\n");
2767skip_sbuf:
Alan Kwong9487de22016-01-16 22:06:36 -05002768 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05002769
2770 /*
2771 * Wake up all waiting context from the current and previous
2772 * SW Timestamp.
2773 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04002774 while (ctx &&
2775 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05002776 ctx->last_regdma_isr_status = isr;
2777 ctx->last_regdma_timestamp = ts;
2778 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04002779 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002780 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002781
2782 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
2783 ctx = rot->rotCtx[q_id]
2784 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04002785 };
Alan Kwong9487de22016-01-16 22:06:36 -05002786
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002787done_isr_handle:
Alan Kwong9487de22016-01-16 22:06:36 -05002788 spin_unlock(&rot->rotisr_lock);
2789 ret = IRQ_HANDLED;
2790 } else if (isr & REGDMA_INT_ERR_MASK) {
2791 /*
2792 * For REGDMA Err, we save the isr info and wake up
2793 * all waiting contexts
2794 */
2795 int i, j;
2796
2797 SDEROT_ERR(
2798 "regdma err isr:%X, wake up all waiting contexts\n",
2799 isr);
2800
2801 spin_lock(&rot->rotisr_lock);
2802
2803 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2804 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
2805 ctx = rot->rotCtx[i][j];
2806 if (ctx && ctx->last_regdma_isr_status == 0) {
2807 ctx->last_regdma_isr_status = isr;
2808 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04002809 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002810 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
2811 i, j, ctx);
2812 }
2813 }
2814 }
2815
Alan Kwong9487de22016-01-16 22:06:36 -05002816 spin_unlock(&rot->rotisr_lock);
2817 ret = IRQ_HANDLED;
2818 }
2819
2820 return ret;
2821}
2822
2823/*
2824 * sde_hw_rotator_validate_entry - validate rotation entry
2825 * @mgr: Pointer to rotator manager
2826 * @entry: Pointer to rotation entry
2827 *
2828 * This function validates the given rotation entry and provides possible
2829 * fixup (future improvement) if available. This function returns 0 if
2830 * the entry is valid, and returns error code otherwise.
2831 */
2832static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
2833 struct sde_rot_entry *entry)
2834{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002835 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwongb6c049c2017-03-31 12:50:27 -07002836 struct sde_hw_rotator *hw_data;
Alan Kwong9487de22016-01-16 22:06:36 -05002837 int ret = 0;
2838 u16 src_w, src_h, dst_w, dst_h;
2839 struct sde_rotation_item *item = &entry->item;
2840 struct sde_mdp_format_params *fmt;
2841
Alan Kwongb6c049c2017-03-31 12:50:27 -07002842 if (!mgr || !entry || !mgr->hw_data) {
2843 SDEROT_ERR("invalid parameters\n");
2844 return -EINVAL;
2845 }
2846
2847 hw_data = mgr->hw_data;
2848
2849 if (hw_data->maxlinewidth < item->src_rect.w) {
2850 SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
2851 return -EINVAL;
2852 }
2853
Alan Kwong9487de22016-01-16 22:06:36 -05002854 src_w = item->src_rect.w;
2855 src_h = item->src_rect.h;
2856
2857 if (item->flags & SDE_ROTATION_90) {
2858 dst_w = item->dst_rect.h;
2859 dst_h = item->dst_rect.w;
2860 } else {
2861 dst_w = item->dst_rect.w;
2862 dst_h = item->dst_rect.h;
2863 }
2864
2865 entry->dnsc_factor_w = 0;
2866 entry->dnsc_factor_h = 0;
2867
Alan Kwong6bc64622017-02-04 17:36:03 -08002868 if (item->output.sbuf &&
2869 !test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
2870 SDEROT_ERR("stream buffer not supported\n");
2871 return -EINVAL;
2872 }
2873
Alan Kwong9487de22016-01-16 22:06:36 -05002874 if ((src_w != dst_w) || (src_h != dst_h)) {
Clarence Ip4db1ea82017-05-01 12:18:55 -07002875 if (!dst_w || !dst_h) {
2876 SDEROT_DBG("zero output width/height not support\n");
2877 ret = -EINVAL;
2878 goto dnsc_err;
2879 }
Alan Kwong9487de22016-01-16 22:06:36 -05002880 if ((src_w % dst_w) || (src_h % dst_h)) {
2881 SDEROT_DBG("non integral scale not support\n");
2882 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002883 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05002884 }
2885 entry->dnsc_factor_w = src_w / dst_w;
2886 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
2887 (entry->dnsc_factor_w > 64)) {
2888 SDEROT_DBG("non power-of-2 w_scale not support\n");
2889 ret = -EINVAL;
2890 goto dnsc_err;
2891 }
2892 entry->dnsc_factor_h = src_h / dst_h;
2893 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
2894 (entry->dnsc_factor_h > 64)) {
2895 SDEROT_DBG("non power-of-2 h_scale not support\n");
2896 ret = -EINVAL;
2897 goto dnsc_err;
2898 }
2899 }
2900
Benjamin Chan0e96afd2017-01-17 16:49:12 -05002901 fmt = sde_get_format_params(item->output.format);
Benjamin Chan886ff672016-11-07 15:23:17 -05002902 /*
2903 * Rotator downscale support max 4 times for UBWC format and
2904 * max 2 times for TP10/TP10_UBWC format
2905 */
2906 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
2907 SDEROT_DBG("max downscale for UBWC format is 4\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002908 ret = -EINVAL;
2909 goto dnsc_err;
2910 }
Benjamin Chan886ff672016-11-07 15:23:17 -05002911 if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
2912 SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002913 ret = -EINVAL;
2914 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002915 goto dnsc_err;
2916
2917dnsc_1p5_check:
2918 /* Check for 1.5 downscale that only applies to V2 HW */
2919 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
2920 entry->dnsc_factor_w = src_w / dst_w;
2921 if ((entry->dnsc_factor_w != 1) ||
2922 ((dst_w * 3) != (src_w * 2))) {
2923 SDEROT_DBG(
2924 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
2925 src_w, dst_w);
2926 ret = -EINVAL;
2927 goto dnsc_err;
2928 }
2929
2930 entry->dnsc_factor_h = src_h / dst_h;
2931 if ((entry->dnsc_factor_h != 1) ||
2932 ((dst_h * 3) != (src_h * 2))) {
2933 SDEROT_DBG(
2934 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
2935 src_h, dst_h);
2936 ret = -EINVAL;
2937 goto dnsc_err;
2938 }
2939 ret = 0;
2940 }
Alan Kwong9487de22016-01-16 22:06:36 -05002941
2942dnsc_err:
2943 /* Downscaler does not support asymmetrical dnsc */
2944 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
2945 SDEROT_DBG("asymmetric downscale not support\n");
2946 ret = -EINVAL;
2947 }
2948
2949 if (ret) {
2950 entry->dnsc_factor_w = 0;
2951 entry->dnsc_factor_h = 0;
2952 }
2953 return ret;
2954}
2955
2956/*
2957 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
2958 * @mgr: Pointer to rotator manager
2959 * @attr: Pointer to device attribute interface
2960 * @buf: Pointer to output buffer
2961 * @len: Length of output buffer
2962 */
2963static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
2964 struct device_attribute *attr, char *buf, ssize_t len)
2965{
2966 struct sde_hw_rotator *hw_data;
Benjamin Chan886ff672016-11-07 15:23:17 -05002967 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002968 int cnt = 0;
2969
2970 if (!mgr || !buf)
2971 return 0;
2972
2973 hw_data = mgr->hw_data;
2974
2975#define SPRINT(fmt, ...) \
2976 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2977
2978 /* insert capabilities here */
Benjamin Chan886ff672016-11-07 15:23:17 -05002979 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
2980 SPRINT("min_downscale=1.5\n");
2981 else
2982 SPRINT("min_downscale=2.0\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002983
Benjamin Chan42db2c92016-11-22 22:50:01 -05002984 SPRINT("downscale_compression=1\n");
2985
Alan Kwong6bc64622017-02-04 17:36:03 -08002986 if (hw_data->downscale_caps)
2987 SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
2988
Alan Kwong9487de22016-01-16 22:06:36 -05002989#undef SPRINT
2990 return cnt;
2991}
2992
2993/*
2994 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
2995 * @mgr: Pointer to rotator manager
2996 * @attr: Pointer to device attribute interface
2997 * @buf: Pointer to output buffer
2998 * @len: Length of output buffer
2999 */
3000static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
3001 struct device_attribute *attr, char *buf, ssize_t len)
3002{
3003 struct sde_hw_rotator *rot;
3004 struct sde_hw_rotator_context *ctx;
3005 int cnt = 0;
3006 int num_active = 0;
3007 int i, j;
3008
3009 if (!mgr || !buf) {
3010 SDEROT_ERR("null parameters\n");
3011 return 0;
3012 }
3013
3014 rot = mgr->hw_data;
3015
3016#define SPRINT(fmt, ...) \
3017 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
3018
3019 if (rot) {
3020 SPRINT("rot_mode=%d\n", rot->mode);
3021 SPRINT("irq_num=%d\n", rot->irq_num);
3022
3023 if (rot->mode == ROT_REGDMA_OFF) {
3024 SPRINT("max_active=1\n");
3025 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
3026 } else {
3027 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3028 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
3029 j++) {
3030 ctx = rot->rotCtx[i][j];
3031
3032 if (ctx) {
3033 SPRINT(
3034 "rotCtx[%d][%d]:%p\n",
3035 i, j, ctx);
3036 ++num_active;
3037 }
3038 }
3039 }
3040
3041 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
3042 SPRINT("num_active=%d\n", num_active);
3043 }
3044 }
3045
3046#undef SPRINT
3047 return cnt;
3048}
3049
3050/*
Alan Kwongda16e442016-08-14 20:47:18 -04003051 * sde_hw_rotator_get_pixfmt - get the indexed pixel format
3052 * @mgr: Pointer to rotator manager
3053 * @index: index of pixel format
3054 * @input: true for input port; false for output port
3055 */
3056static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
3057 int index, bool input)
3058{
Alan Kwong6bc64622017-02-04 17:36:03 -08003059 struct sde_hw_rotator *rot;
3060
3061 if (!mgr || !mgr->hw_data) {
3062 SDEROT_ERR("null parameters\n");
3063 return 0;
3064 }
3065
3066 rot = mgr->hw_data;
3067
Alan Kwongda16e442016-08-14 20:47:18 -04003068 if (input) {
Alan Kwong6bc64622017-02-04 17:36:03 -08003069 if ((index < rot->num_inpixfmt) && rot->inpixfmts)
3070 return rot->inpixfmts[index];
Alan Kwongda16e442016-08-14 20:47:18 -04003071 else
3072 return 0;
3073 } else {
Alan Kwong6bc64622017-02-04 17:36:03 -08003074 if ((index < rot->num_outpixfmt) && rot->outpixfmts)
3075 return rot->outpixfmts[index];
Alan Kwongda16e442016-08-14 20:47:18 -04003076 else
3077 return 0;
3078 }
3079}
3080
3081/*
3082 * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
3083 * @mgr: Pointer to rotator manager
3084 * @pixfmt: pixel format to be verified
3085 * @input: true for input port; false for output port
3086 */
3087static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
3088 bool input)
3089{
Alan Kwong6bc64622017-02-04 17:36:03 -08003090 struct sde_hw_rotator *rot;
3091 u32 *pixfmts;
3092 u32 num_pixfmt;
Alan Kwongda16e442016-08-14 20:47:18 -04003093 int i;
3094
Alan Kwong6bc64622017-02-04 17:36:03 -08003095 if (!mgr || !mgr->hw_data) {
3096 SDEROT_ERR("null parameters\n");
3097 return false;
Alan Kwongda16e442016-08-14 20:47:18 -04003098 }
3099
Alan Kwong6bc64622017-02-04 17:36:03 -08003100 rot = mgr->hw_data;
3101
3102 if (input) {
3103 pixfmts = rot->inpixfmts;
3104 num_pixfmt = rot->num_inpixfmt;
3105 } else {
3106 pixfmts = rot->outpixfmts;
3107 num_pixfmt = rot->num_outpixfmt;
3108 }
3109
3110 if (!pixfmts || !num_pixfmt) {
3111 SDEROT_ERR("invalid pixel format tables\n");
3112 return false;
3113 }
3114
3115 for (i = 0; i < num_pixfmt; i++)
3116 if (pixfmts[i] == pixfmt)
3117 return true;
3118
Alan Kwongda16e442016-08-14 20:47:18 -04003119 return false;
3120}
3121
3122/*
Alan Kwong6bc64622017-02-04 17:36:03 -08003123 * sde_hw_rotator_get_downscale_caps - get scaling capability string
3124 * @mgr: Pointer to rotator manager
3125 * @caps: Pointer to capability string buffer; NULL to return maximum length
3126 * @len: length of capability string buffer
3127 * return: length of capability string
3128 */
3129static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
3130 char *caps, int len)
3131{
3132 struct sde_hw_rotator *rot;
3133 int rc = 0;
3134
3135 if (!mgr || !mgr->hw_data) {
3136 SDEROT_ERR("null parameters\n");
3137 return -EINVAL;
3138 }
3139
3140 rot = mgr->hw_data;
3141
3142 if (rot->downscale_caps) {
3143 if (caps)
3144 rc = snprintf(caps, len, "%s", rot->downscale_caps);
3145 else
3146 rc = strlen(rot->downscale_caps);
3147 }
3148
3149 return rc;
3150}
3151
3152/*
Alan Kwongb6c049c2017-03-31 12:50:27 -07003153 * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
3154 * @mgr: Pointer to rotator manager
3155 * return: maximum line width supported by hardware
3156 */
3157static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
3158{
3159 struct sde_hw_rotator *rot;
3160
3161 if (!mgr || !mgr->hw_data) {
3162 SDEROT_ERR("null parameters\n");
3163 return -EINVAL;
3164 }
3165
3166 rot = mgr->hw_data;
3167
3168 return rot->maxlinewidth;
3169}
3170
3171/*
Alan Kwong9487de22016-01-16 22:06:36 -05003172 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
3173 * @hw_data: Pointer to rotator hw
3174 * @dev: Pointer to platform device
3175 */
3176static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
3177 struct platform_device *dev)
3178{
3179 int ret = 0;
3180 u32 data;
3181
3182 if (!hw_data || !dev)
3183 return -EINVAL;
3184
3185 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
3186 &data);
3187 if (ret) {
3188 SDEROT_DBG("default to regdma off\n");
3189 ret = 0;
3190 hw_data->mode = ROT_REGDMA_OFF;
3191 } else if (data < ROT_REGDMA_MAX) {
3192 SDEROT_DBG("set to regdma mode %d\n", data);
3193 hw_data->mode = data;
3194 } else {
3195 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
3196 hw_data->mode = ROT_REGDMA_OFF;
3197 }
3198
3199 ret = of_property_read_u32(dev->dev.of_node,
3200 "qcom,mdss-highest-bank-bit", &data);
3201 if (ret) {
3202 SDEROT_DBG("default to A5X bank\n");
3203 ret = 0;
3204 hw_data->highest_bank = 2;
3205 } else {
3206 SDEROT_DBG("set highest bank bit to %d\n", data);
3207 hw_data->highest_bank = data;
3208 }
3209
Alan Kwong6bc64622017-02-04 17:36:03 -08003210 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwongfb8eeb22017-02-06 15:00:03 -08003211 "qcom,sde-ubwc-malsize", &data);
3212 if (ret) {
3213 ret = 0;
3214 hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
3215 } else {
3216 SDEROT_DBG("set ubwc malsize to %d\n", data);
3217 hw_data->ubwc_malsize = data;
3218 }
3219
3220 ret = of_property_read_u32(dev->dev.of_node,
3221 "qcom,sde-ubwc_swizzle", &data);
3222 if (ret) {
3223 ret = 0;
3224 hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
3225 } else {
3226 SDEROT_DBG("set ubwc swizzle to %d\n", data);
3227 hw_data->ubwc_swizzle = data;
3228 }
3229
3230 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwong6bc64622017-02-04 17:36:03 -08003231 "qcom,mdss-sbuf-headroom", &data);
3232 if (ret) {
3233 ret = 0;
3234 hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
3235 } else {
3236 SDEROT_DBG("set sbuf headroom to %d\n", data);
3237 hw_data->sbuf_headroom = data;
3238 }
3239
Alan Kwongb6c049c2017-03-31 12:50:27 -07003240 ret = of_property_read_u32(dev->dev.of_node,
3241 "qcom,mdss-rot-linewidth", &data);
3242 if (ret) {
3243 ret = 0;
3244 hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
3245 } else {
3246 SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
3247 hw_data->maxlinewidth = data;
3248 }
3249
Alan Kwong9487de22016-01-16 22:06:36 -05003250 return ret;
3251}
3252
3253/*
3254 * sde_rotator_r3_init - initialize the r3 module
3255 * @mgr: Pointer to rotator manager
3256 *
3257 * This function setup r3 callback functions, parses r3 specific
3258 * device tree settings, installs r3 specific interrupt handler,
3259 * as well as initializes r3 internal data structure.
3260 */
3261int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
3262{
3263 struct sde_hw_rotator *rot;
3264 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
3265 int i;
3266 int ret;
3267
3268 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
3269 if (!rot)
3270 return -ENOMEM;
3271
3272 mgr->hw_data = rot;
3273 mgr->queue_count = ROT_QUEUE_MAX;
3274
3275 rot->mdss_base = mdata->sde_io.base;
3276 rot->pdev = mgr->pdev;
Alan Kwong6bc64622017-02-04 17:36:03 -08003277 rot->koff_timeout = KOFF_TIMEOUT;
3278 rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
3279 rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
Alan Kwong9487de22016-01-16 22:06:36 -05003280
3281 /* Assign ops */
3282 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
3283 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
3284 mgr->ops_hw_free = sde_hw_rotator_free_ext;
3285 mgr->ops_config_hw = sde_hw_rotator_config;
3286 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
3287 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
3288 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
3289 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
3290 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
3291 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
Alan Kwongda16e442016-08-14 20:47:18 -04003292 mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
3293 mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04003294 mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
3295 mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
Alan Kwong6bc64622017-02-04 17:36:03 -08003296 mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
Alan Kwongb6c049c2017-03-31 12:50:27 -07003297 mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
Alan Kwong9487de22016-01-16 22:06:36 -05003298
3299 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
3300 if (ret)
3301 goto error_parse_dt;
3302
3303 rot->irq_num = platform_get_irq(mgr->pdev, 0);
3304 if (rot->irq_num < 0) {
3305 SDEROT_ERR("fail to get rotator irq\n");
3306 } else {
3307 if (rot->mode == ROT_REGDMA_OFF)
3308 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3309 rot->irq_num,
3310 sde_hw_rotator_rotirq_handler,
3311 NULL, 0, "sde_rotator_r3", rot);
3312 else
3313 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3314 rot->irq_num,
3315 sde_hw_rotator_regdmairq_handler,
3316 NULL, 0, "sde_rotator_r3", rot);
3317 if (ret) {
3318 SDEROT_ERR("fail to request irq r:%d\n", ret);
3319 rot->irq_num = -1;
3320 } else {
3321 disable_irq(rot->irq_num);
3322 }
3323 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04003324 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05003325
3326 setup_rotator_ops(&rot->ops, rot->mode);
3327
3328 spin_lock_init(&rot->rotctx_lock);
3329 spin_lock_init(&rot->rotisr_lock);
3330
3331 /* REGDMA initialization */
3332 if (rot->mode == ROT_REGDMA_OFF) {
3333 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3334 rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
3335 SDE_HW_ROT_REGDMA_SEG_SIZE * i];
3336 } else {
3337 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3338 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
3339 (u32 *)(rot->mdss_base +
3340 REGDMA_RAM_REGDMA_CMD_RAM +
3341 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
3342
3343 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3344 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
3345 (u32 *)(rot->mdss_base +
3346 REGDMA_RAM_REGDMA_CMD_RAM +
3347 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
3348 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
3349 }
3350
Alan Kwong6bc64622017-02-04 17:36:03 -08003351 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3352 atomic_set(&rot->timestamp[i], 0);
3353 INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
3354 }
Alan Kwong9487de22016-01-16 22:06:36 -05003355
3356 ret = sde_rotator_hw_rev_init(rot);
3357 if (ret)
3358 goto error_hw_rev_init;
3359
Alan Kwong315cd772016-08-03 22:29:42 -04003360 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
Clarence Ip77c053d2017-04-24 19:26:37 -07003361 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003362 CLKFLAG_NORETAIN_MEM);
Clarence Ip77c053d2017-04-24 19:26:37 -07003363 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003364 CLKFLAG_NORETAIN_PERIPH);
3365
Benjamin Chan53e3bce2016-08-31 14:43:29 -04003366 mdata->sde_rot_hw = rot;
Alan Kwong9487de22016-01-16 22:06:36 -05003367 return 0;
3368error_hw_rev_init:
3369 if (rot->irq_num >= 0)
3370 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
3371 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
3372error_parse_dt:
3373 return ret;
3374}