blob: b582934c88af7967e42640fb1dc38e7f177784a5 [file] [log] [blame]
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Alan Kwong9487de22016-01-16 22:06:36 -05002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
Alan Kwong9487de22016-01-16 22:06:36 -050020#include <linux/delay.h>
21#include <linux/debugfs.h>
22#include <linux/interrupt.h>
23#include <linux/dma-mapping.h>
24#include <linux/dma-buf.h>
25#include <linux/msm_ion.h>
Alan Kwong6ce448d2016-11-24 18:45:20 -080026#include <linux/clk.h>
27#include <linux/clk/qcom.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
Benjamin Chan53e3bce2016-08-31 14:43:29 -040037#include "sde_rotator_debug.h"
Alan Kwong9487de22016-01-16 22:06:36 -050038
Benjamin Chan99eb63b2016-12-21 15:45:26 -050039#define RES_UHD (3840*2160)
40
41/* traffic shaping clock ticks = finish_time x 19.2MHz */
42#define TRAFFIC_SHAPE_CLKTICK_14MS 268800
43#define TRAFFIC_SHAPE_CLKTICK_12MS 230400
Alan Kwong498d59f2017-02-11 18:56:34 -080044#define TRAFFIC_SHAPE_VSYNC_CLK 19200000
Benjamin Chan99eb63b2016-12-21 15:45:26 -050045
Alan Kwong9487de22016-01-16 22:06:36 -050046/* XIN mapping */
47#define XIN_SSPP 0
48#define XIN_WRITEBACK 1
49
50/* wait for at most 2 vsync for lowest refresh rate (24hz) */
Alan Kwong9a11c452017-05-01 15:11:31 -070051#define KOFF_TIMEOUT (42 * 32)
Alan Kwong6bc64622017-02-04 17:36:03 -080052
53/* default stream buffer headroom in lines */
54#define DEFAULT_SBUF_HEADROOM 20
Clarence Ip37e013c2017-05-04 12:23:13 -070055#define DEFAULT_UBWC_MALSIZE 0
56#define DEFAULT_UBWC_SWIZZLE 0
Alan Kwong9487de22016-01-16 22:06:36 -050057
Alan Kwongb6c049c2017-03-31 12:50:27 -070058#define DEFAULT_MAXLINEWIDTH 4096
59
Alan Kwong9487de22016-01-16 22:06:36 -050060/* Macro for constructing the REGDMA command */
61#define SDE_REGDMA_WRITE(p, off, data) \
62 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080063 SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
64 (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050065 *p++ = REGDMA_OP_REGWRITE | \
66 ((off) & REGDMA_ADDR_OFFSET_MASK); \
67 *p++ = (data); \
68 } while (0)
69
70#define SDE_REGDMA_MODIFY(p, off, mask, data) \
71 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080072 SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
73 (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050074 *p++ = REGDMA_OP_REGMODIFY | \
75 ((off) & REGDMA_ADDR_OFFSET_MASK); \
76 *p++ = (mask); \
77 *p++ = (data); \
78 } while (0)
79
80#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
81 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080082 SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
83 (u32)(len));\
Alan Kwong9487de22016-01-16 22:06:36 -050084 *p++ = REGDMA_OP_BLKWRITE_INC | \
85 ((off) & REGDMA_ADDR_OFFSET_MASK); \
86 *p++ = (len); \
87 } while (0)
88
89#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
90 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080091 SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050092 *(p) = (data); \
93 (p)++; \
94 } while (0)
95
96/* Macro for directly accessing mapped registers */
97#define SDE_ROTREG_WRITE(base, off, data) \
Alan Kwong6bc64622017-02-04 17:36:03 -080098 do { \
99 SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
100 , (u32)(data));\
101 writel_relaxed(data, (base + (off))); \
102 } while (0)
Alan Kwong9487de22016-01-16 22:06:36 -0500103
104#define SDE_ROTREG_READ(base, off) \
105 readl_relaxed(base + (off))
106
Alan Kwong6bc64622017-02-04 17:36:03 -0800107static u32 sde_hw_rotator_v3_inpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400108 SDE_PIX_FMT_XRGB_8888,
109 SDE_PIX_FMT_ARGB_8888,
110 SDE_PIX_FMT_ABGR_8888,
111 SDE_PIX_FMT_RGBA_8888,
112 SDE_PIX_FMT_BGRA_8888,
113 SDE_PIX_FMT_RGBX_8888,
114 SDE_PIX_FMT_BGRX_8888,
115 SDE_PIX_FMT_XBGR_8888,
116 SDE_PIX_FMT_RGBA_5551,
117 SDE_PIX_FMT_ARGB_1555,
118 SDE_PIX_FMT_ABGR_1555,
119 SDE_PIX_FMT_BGRA_5551,
120 SDE_PIX_FMT_BGRX_5551,
121 SDE_PIX_FMT_RGBX_5551,
122 SDE_PIX_FMT_XBGR_1555,
123 SDE_PIX_FMT_XRGB_1555,
124 SDE_PIX_FMT_ARGB_4444,
125 SDE_PIX_FMT_RGBA_4444,
126 SDE_PIX_FMT_BGRA_4444,
127 SDE_PIX_FMT_ABGR_4444,
128 SDE_PIX_FMT_RGBX_4444,
129 SDE_PIX_FMT_XRGB_4444,
130 SDE_PIX_FMT_BGRX_4444,
131 SDE_PIX_FMT_XBGR_4444,
132 SDE_PIX_FMT_RGB_888,
133 SDE_PIX_FMT_BGR_888,
134 SDE_PIX_FMT_RGB_565,
135 SDE_PIX_FMT_BGR_565,
136 SDE_PIX_FMT_Y_CB_CR_H2V2,
137 SDE_PIX_FMT_Y_CR_CB_H2V2,
138 SDE_PIX_FMT_Y_CR_CB_GH2V2,
139 SDE_PIX_FMT_Y_CBCR_H2V2,
140 SDE_PIX_FMT_Y_CRCB_H2V2,
141 SDE_PIX_FMT_Y_CBCR_H1V2,
142 SDE_PIX_FMT_Y_CRCB_H1V2,
143 SDE_PIX_FMT_Y_CBCR_H2V1,
144 SDE_PIX_FMT_Y_CRCB_H2V1,
145 SDE_PIX_FMT_YCBYCR_H2V1,
146 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
147 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
148 SDE_PIX_FMT_RGBA_8888_UBWC,
149 SDE_PIX_FMT_RGBX_8888_UBWC,
150 SDE_PIX_FMT_RGB_565_UBWC,
151 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
152 SDE_PIX_FMT_RGBA_1010102,
153 SDE_PIX_FMT_RGBX_1010102,
154 SDE_PIX_FMT_ARGB_2101010,
155 SDE_PIX_FMT_XRGB_2101010,
156 SDE_PIX_FMT_BGRA_1010102,
157 SDE_PIX_FMT_BGRX_1010102,
158 SDE_PIX_FMT_ABGR_2101010,
159 SDE_PIX_FMT_XBGR_2101010,
160 SDE_PIX_FMT_RGBA_1010102_UBWC,
161 SDE_PIX_FMT_RGBX_1010102_UBWC,
162 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
163 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
164 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
165};
166
Alan Kwong6bc64622017-02-04 17:36:03 -0800167static u32 sde_hw_rotator_v3_outpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400168 SDE_PIX_FMT_XRGB_8888,
169 SDE_PIX_FMT_ARGB_8888,
170 SDE_PIX_FMT_ABGR_8888,
171 SDE_PIX_FMT_RGBA_8888,
172 SDE_PIX_FMT_BGRA_8888,
173 SDE_PIX_FMT_RGBX_8888,
174 SDE_PIX_FMT_BGRX_8888,
175 SDE_PIX_FMT_XBGR_8888,
176 SDE_PIX_FMT_RGBA_5551,
177 SDE_PIX_FMT_ARGB_1555,
178 SDE_PIX_FMT_ABGR_1555,
179 SDE_PIX_FMT_BGRA_5551,
180 SDE_PIX_FMT_BGRX_5551,
181 SDE_PIX_FMT_RGBX_5551,
182 SDE_PIX_FMT_XBGR_1555,
183 SDE_PIX_FMT_XRGB_1555,
184 SDE_PIX_FMT_ARGB_4444,
185 SDE_PIX_FMT_RGBA_4444,
186 SDE_PIX_FMT_BGRA_4444,
187 SDE_PIX_FMT_ABGR_4444,
188 SDE_PIX_FMT_RGBX_4444,
189 SDE_PIX_FMT_XRGB_4444,
190 SDE_PIX_FMT_BGRX_4444,
191 SDE_PIX_FMT_XBGR_4444,
192 SDE_PIX_FMT_RGB_888,
193 SDE_PIX_FMT_BGR_888,
194 SDE_PIX_FMT_RGB_565,
195 SDE_PIX_FMT_BGR_565,
196 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
197 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
198 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
199 SDE_PIX_FMT_Y_CBCR_H2V2,
200 SDE_PIX_FMT_Y_CRCB_H2V2,
201 SDE_PIX_FMT_Y_CBCR_H1V2,
202 SDE_PIX_FMT_Y_CRCB_H1V2,
203 SDE_PIX_FMT_Y_CBCR_H2V1,
204 SDE_PIX_FMT_Y_CRCB_H2V1,
205 /* SDE_PIX_FMT_YCBYCR_H2V1 */
206 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
207 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
208 SDE_PIX_FMT_RGBA_8888_UBWC,
209 SDE_PIX_FMT_RGBX_8888_UBWC,
210 SDE_PIX_FMT_RGB_565_UBWC,
211 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
212 SDE_PIX_FMT_RGBA_1010102,
213 SDE_PIX_FMT_RGBX_1010102,
214 /* SDE_PIX_FMT_ARGB_2101010 */
215 /* SDE_PIX_FMT_XRGB_2101010 */
216 SDE_PIX_FMT_BGRA_1010102,
217 SDE_PIX_FMT_BGRX_1010102,
218 /* SDE_PIX_FMT_ABGR_2101010 */
219 /* SDE_PIX_FMT_XBGR_2101010 */
220 SDE_PIX_FMT_RGBA_1010102_UBWC,
221 SDE_PIX_FMT_RGBX_1010102_UBWC,
222 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
223 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
224 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
225};
226
Alan Kwong6bc64622017-02-04 17:36:03 -0800227static u32 sde_hw_rotator_v4_inpixfmts[] = {
228 SDE_PIX_FMT_XRGB_8888,
229 SDE_PIX_FMT_ARGB_8888,
230 SDE_PIX_FMT_ABGR_8888,
231 SDE_PIX_FMT_RGBA_8888,
232 SDE_PIX_FMT_BGRA_8888,
233 SDE_PIX_FMT_RGBX_8888,
234 SDE_PIX_FMT_BGRX_8888,
235 SDE_PIX_FMT_XBGR_8888,
236 SDE_PIX_FMT_RGBA_5551,
237 SDE_PIX_FMT_ARGB_1555,
238 SDE_PIX_FMT_ABGR_1555,
239 SDE_PIX_FMT_BGRA_5551,
240 SDE_PIX_FMT_BGRX_5551,
241 SDE_PIX_FMT_RGBX_5551,
242 SDE_PIX_FMT_XBGR_1555,
243 SDE_PIX_FMT_XRGB_1555,
244 SDE_PIX_FMT_ARGB_4444,
245 SDE_PIX_FMT_RGBA_4444,
246 SDE_PIX_FMT_BGRA_4444,
247 SDE_PIX_FMT_ABGR_4444,
248 SDE_PIX_FMT_RGBX_4444,
249 SDE_PIX_FMT_XRGB_4444,
250 SDE_PIX_FMT_BGRX_4444,
251 SDE_PIX_FMT_XBGR_4444,
252 SDE_PIX_FMT_RGB_888,
253 SDE_PIX_FMT_BGR_888,
254 SDE_PIX_FMT_RGB_565,
255 SDE_PIX_FMT_BGR_565,
256 SDE_PIX_FMT_Y_CB_CR_H2V2,
257 SDE_PIX_FMT_Y_CR_CB_H2V2,
258 SDE_PIX_FMT_Y_CR_CB_GH2V2,
259 SDE_PIX_FMT_Y_CBCR_H2V2,
260 SDE_PIX_FMT_Y_CRCB_H2V2,
261 SDE_PIX_FMT_Y_CBCR_H1V2,
262 SDE_PIX_FMT_Y_CRCB_H1V2,
263 SDE_PIX_FMT_Y_CBCR_H2V1,
264 SDE_PIX_FMT_Y_CRCB_H2V1,
265 SDE_PIX_FMT_YCBYCR_H2V1,
266 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
267 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
268 SDE_PIX_FMT_RGBA_8888_UBWC,
269 SDE_PIX_FMT_RGBX_8888_UBWC,
270 SDE_PIX_FMT_RGB_565_UBWC,
271 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
272 SDE_PIX_FMT_RGBA_1010102,
273 SDE_PIX_FMT_RGBX_1010102,
274 SDE_PIX_FMT_ARGB_2101010,
275 SDE_PIX_FMT_XRGB_2101010,
276 SDE_PIX_FMT_BGRA_1010102,
277 SDE_PIX_FMT_BGRX_1010102,
278 SDE_PIX_FMT_ABGR_2101010,
279 SDE_PIX_FMT_XBGR_2101010,
280 SDE_PIX_FMT_RGBA_1010102_UBWC,
281 SDE_PIX_FMT_RGBX_1010102_UBWC,
282 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
283 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
284 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800285 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
286 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800287 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
288 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
289 SDE_PIX_FMT_XRGB_8888_TILE,
290 SDE_PIX_FMT_ARGB_8888_TILE,
291 SDE_PIX_FMT_ABGR_8888_TILE,
292 SDE_PIX_FMT_XBGR_8888_TILE,
293 SDE_PIX_FMT_RGBA_8888_TILE,
294 SDE_PIX_FMT_BGRA_8888_TILE,
295 SDE_PIX_FMT_RGBX_8888_TILE,
296 SDE_PIX_FMT_BGRX_8888_TILE,
297 SDE_PIX_FMT_RGBA_1010102_TILE,
298 SDE_PIX_FMT_RGBX_1010102_TILE,
299 SDE_PIX_FMT_ARGB_2101010_TILE,
300 SDE_PIX_FMT_XRGB_2101010_TILE,
301 SDE_PIX_FMT_BGRA_1010102_TILE,
302 SDE_PIX_FMT_BGRX_1010102_TILE,
303 SDE_PIX_FMT_ABGR_2101010_TILE,
304 SDE_PIX_FMT_XBGR_2101010_TILE,
305};
306
307static u32 sde_hw_rotator_v4_outpixfmts[] = {
308 SDE_PIX_FMT_XRGB_8888,
309 SDE_PIX_FMT_ARGB_8888,
310 SDE_PIX_FMT_ABGR_8888,
311 SDE_PIX_FMT_RGBA_8888,
312 SDE_PIX_FMT_BGRA_8888,
313 SDE_PIX_FMT_RGBX_8888,
314 SDE_PIX_FMT_BGRX_8888,
315 SDE_PIX_FMT_XBGR_8888,
316 SDE_PIX_FMT_RGBA_5551,
317 SDE_PIX_FMT_ARGB_1555,
318 SDE_PIX_FMT_ABGR_1555,
319 SDE_PIX_FMT_BGRA_5551,
320 SDE_PIX_FMT_BGRX_5551,
321 SDE_PIX_FMT_RGBX_5551,
322 SDE_PIX_FMT_XBGR_1555,
323 SDE_PIX_FMT_XRGB_1555,
324 SDE_PIX_FMT_ARGB_4444,
325 SDE_PIX_FMT_RGBA_4444,
326 SDE_PIX_FMT_BGRA_4444,
327 SDE_PIX_FMT_ABGR_4444,
328 SDE_PIX_FMT_RGBX_4444,
329 SDE_PIX_FMT_XRGB_4444,
330 SDE_PIX_FMT_BGRX_4444,
331 SDE_PIX_FMT_XBGR_4444,
332 SDE_PIX_FMT_RGB_888,
333 SDE_PIX_FMT_BGR_888,
334 SDE_PIX_FMT_RGB_565,
335 SDE_PIX_FMT_BGR_565,
336 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
337 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
338 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
339 SDE_PIX_FMT_Y_CBCR_H2V2,
340 SDE_PIX_FMT_Y_CRCB_H2V2,
341 SDE_PIX_FMT_Y_CBCR_H1V2,
342 SDE_PIX_FMT_Y_CRCB_H1V2,
343 SDE_PIX_FMT_Y_CBCR_H2V1,
344 SDE_PIX_FMT_Y_CRCB_H2V1,
345 /* SDE_PIX_FMT_YCBYCR_H2V1 */
346 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
347 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
348 SDE_PIX_FMT_RGBA_8888_UBWC,
349 SDE_PIX_FMT_RGBX_8888_UBWC,
350 SDE_PIX_FMT_RGB_565_UBWC,
351 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
352 SDE_PIX_FMT_RGBA_1010102,
353 SDE_PIX_FMT_RGBX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400354 SDE_PIX_FMT_ARGB_2101010,
355 SDE_PIX_FMT_XRGB_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800356 SDE_PIX_FMT_BGRA_1010102,
357 SDE_PIX_FMT_BGRX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400358 SDE_PIX_FMT_ABGR_2101010,
359 SDE_PIX_FMT_XBGR_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800360 SDE_PIX_FMT_RGBA_1010102_UBWC,
361 SDE_PIX_FMT_RGBX_1010102_UBWC,
362 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
363 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
364 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800365 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
366 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800367 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
368 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
369 SDE_PIX_FMT_XRGB_8888_TILE,
370 SDE_PIX_FMT_ARGB_8888_TILE,
371 SDE_PIX_FMT_ABGR_8888_TILE,
372 SDE_PIX_FMT_XBGR_8888_TILE,
373 SDE_PIX_FMT_RGBA_8888_TILE,
374 SDE_PIX_FMT_BGRA_8888_TILE,
375 SDE_PIX_FMT_RGBX_8888_TILE,
376 SDE_PIX_FMT_BGRX_8888_TILE,
377 SDE_PIX_FMT_RGBA_1010102_TILE,
378 SDE_PIX_FMT_RGBX_1010102_TILE,
379 SDE_PIX_FMT_ARGB_2101010_TILE,
380 SDE_PIX_FMT_XRGB_2101010_TILE,
381 SDE_PIX_FMT_BGRA_1010102_TILE,
382 SDE_PIX_FMT_BGRX_1010102_TILE,
383 SDE_PIX_FMT_ABGR_2101010_TILE,
384 SDE_PIX_FMT_XBGR_2101010_TILE,
385};
386
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400387static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400388 {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400389 {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
390 {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
391};
392
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400393static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
394 /*
395 * rottop - 0xA8850
396 */
397 /* REGDMA */
398 { 0XA8850, 0, 0 },
399 { 0XA8850, 0, 1 },
400 { 0XA8850, 0, 2 },
401 { 0XA8850, 0, 3 },
402 { 0XA8850, 0, 4 },
403
404 /* ROT_WB */
405 { 0XA8850, 1, 0 },
406 { 0XA8850, 1, 1 },
407 { 0XA8850, 1, 2 },
408 { 0XA8850, 1, 3 },
409 { 0XA8850, 1, 4 },
410 { 0XA8850, 1, 5 },
411 { 0XA8850, 1, 6 },
412 { 0XA8850, 1, 7 },
413
414 /* UBWC_DEC */
415 { 0XA8850, 2, 0 },
416
417 /* UBWC_ENC */
418 { 0XA8850, 3, 0 },
419
420 /* ROT_FETCH_0 */
421 { 0XA8850, 4, 0 },
422 { 0XA8850, 4, 1 },
423 { 0XA8850, 4, 2 },
424 { 0XA8850, 4, 3 },
425 { 0XA8850, 4, 4 },
426 { 0XA8850, 4, 5 },
427 { 0XA8850, 4, 6 },
428 { 0XA8850, 4, 7 },
429
430 /* ROT_FETCH_1 */
431 { 0XA8850, 5, 0 },
432 { 0XA8850, 5, 1 },
433 { 0XA8850, 5, 2 },
434 { 0XA8850, 5, 3 },
435 { 0XA8850, 5, 4 },
436 { 0XA8850, 5, 5 },
437 { 0XA8850, 5, 6 },
438 { 0XA8850, 5, 7 },
439
440 /* ROT_FETCH_2 */
441 { 0XA8850, 6, 0 },
442 { 0XA8850, 6, 1 },
443 { 0XA8850, 6, 2 },
444 { 0XA8850, 6, 3 },
445 { 0XA8850, 6, 4 },
446 { 0XA8850, 6, 5 },
447 { 0XA8850, 6, 6 },
448 { 0XA8850, 6, 7 },
449
450 /* ROT_FETCH_3 */
451 { 0XA8850, 7, 0 },
452 { 0XA8850, 7, 1 },
453 { 0XA8850, 7, 2 },
454 { 0XA8850, 7, 3 },
455 { 0XA8850, 7, 4 },
456 { 0XA8850, 7, 5 },
457 { 0XA8850, 7, 6 },
458 { 0XA8850, 7, 7 },
459
460 /* ROT_FETCH_4 */
461 { 0XA8850, 8, 0 },
462 { 0XA8850, 8, 1 },
463 { 0XA8850, 8, 2 },
464 { 0XA8850, 8, 3 },
465 { 0XA8850, 8, 4 },
466 { 0XA8850, 8, 5 },
467 { 0XA8850, 8, 6 },
468 { 0XA8850, 8, 7 },
469
470 /* ROT_UNPACK_0*/
471 { 0XA8850, 9, 0 },
472 { 0XA8850, 9, 1 },
473 { 0XA8850, 9, 2 },
474 { 0XA8850, 9, 3 },
475};
476
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400477static struct sde_rot_regdump sde_rot_r3_regdump[] = {
478 { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
479 { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
480 { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
481 { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
482 SDE_ROT_REGDUMP_READ },
483 /*
484 * Need to perform a SW reset to REGDMA in order to access the
485 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
486 * REGDMA RAM should be dump at last.
487 */
488 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
489 SDE_ROT_REGDUMP_WRITE },
490 { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
491 SDE_ROT_REGDUMP_READ },
Benjamin Chan59a06052017-01-12 18:06:03 -0500492 { "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
493 SDE_ROT_REGDUMP_VBIF },
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400494};
495
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -0700496struct sde_rot_cdp_params {
497 bool enable;
498 struct sde_mdp_format_params *fmt;
499 u32 offset;
500};
501
Alan Kwong818b7fc2016-07-24 22:07:41 -0400502/* Invalid software timestamp value for initialization */
503#define SDE_REGDMA_SWTS_INVALID (~0)
504
505/**
506 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
507 * @ts_curr: current software timestamp
508 * @ts_prev: previous software timestamp
509 * @return: the amount ts_curr is ahead of ts_prev
510 */
511static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
512{
513 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
514
515 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
516}
517
518/**
519 * sde_hw_rotator_pending_swts - Check if the given context is still pending
520 * @rot: Pointer to hw rotator
521 * @ctx: Pointer to rotator context
522 * @pswts: Pointer to returned reference software timestamp, optional
523 * @return: true if context has pending requests
524 */
525static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
526 struct sde_hw_rotator_context *ctx, u32 *pswts)
527{
528 u32 swts;
529 int ts_diff;
530 bool pending;
531
532 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
533 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
534 else
535 swts = ctx->last_regdma_timestamp;
536
537 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
538 swts >>= SDE_REGDMA_SWTS_SHIFT;
539
540 swts &= SDE_REGDMA_SWTS_MASK;
541
542 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
543
544 if (pswts)
545 *pswts = swts;
546
547 pending = (ts_diff > 0) ? true : false;
548
549 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
550 ctx->timestamp, ctx->q_id, swts, pending);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -0400551 SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400552 return pending;
553}
554
555/**
Alan Kwong6bc64622017-02-04 17:36:03 -0800556 * sde_hw_rotator_update_swts - update software timestamp with given value
557 * @rot: Pointer to hw rotator
558 * @ctx: Pointer to rotator contxt
559 * @swts: new software timestamp
560 * @return: new combined swts
561 */
562static u32 sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
563 struct sde_hw_rotator_context *ctx, u32 swts)
564{
565 u32 mask = SDE_REGDMA_SWTS_MASK;
566
567 swts &= SDE_REGDMA_SWTS_MASK;
568 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY) {
569 swts <<= SDE_REGDMA_SWTS_SHIFT;
570 mask <<= SDE_REGDMA_SWTS_SHIFT;
571 }
572
573 swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
574 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
575
576 return swts;
577}
578
579/**
Alan Kwong818b7fc2016-07-24 22:07:41 -0400580 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
581 * Also, clear rotator/regdma irq status.
582 * @rot: Pointer to hw rotator
583 */
584static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
585{
586 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
587 atomic_read(&rot->irq_enabled));
588
589 if (!atomic_read(&rot->irq_enabled)) {
590 if (rot->mode == ROT_REGDMA_OFF)
591 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
592 ROT_DONE_MASK);
593 else
594 SDE_ROTREG_WRITE(rot->mdss_base,
595 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
596
597 enable_irq(rot->irq_num);
598 }
599 atomic_inc(&rot->irq_enabled);
600}
601
602/**
603 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
604 * Also, clear rotator/regdma irq enable masks.
605 * @rot: Pointer to hw rotator
606 */
607static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
608{
609 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
610 atomic_read(&rot->irq_enabled));
611
612 if (!atomic_read(&rot->irq_enabled)) {
613 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
614 return;
615 }
616
617 if (!atomic_dec_return(&rot->irq_enabled)) {
618 if (rot->mode == ROT_REGDMA_OFF)
619 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
620 else
621 SDE_ROTREG_WRITE(rot->mdss_base,
622 REGDMA_CSR_REGDMA_INT_EN, 0);
623 /* disable irq after last pending irq is handled, if any */
624 synchronize_irq(rot->irq_num);
625 disable_irq_nosync(rot->irq_num);
626 }
627}
628
629/**
630 * sde_hw_rotator_dump_status - Dump hw rotator status on error
631 * @rot: Pointer to hw rotator
632 */
633static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
634{
Benjamin Chan1b94f952017-01-23 17:42:30 -0500635 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
636
Alan Kwong818b7fc2016-07-24 22:07:41 -0400637 SDEROT_ERR(
638 "op_mode = %x, int_en = %x, int_status = %x\n",
639 SDE_ROTREG_READ(rot->mdss_base,
640 REGDMA_CSR_REGDMA_OP_MODE),
641 SDE_ROTREG_READ(rot->mdss_base,
642 REGDMA_CSR_REGDMA_INT_EN),
643 SDE_ROTREG_READ(rot->mdss_base,
644 REGDMA_CSR_REGDMA_INT_STATUS));
645
646 SDEROT_ERR(
647 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
648 SDE_ROTREG_READ(rot->mdss_base,
649 REGDMA_TIMESTAMP_REG),
650 SDE_ROTREG_READ(rot->mdss_base,
651 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
652 SDE_ROTREG_READ(rot->mdss_base,
653 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
654 SDE_ROTREG_READ(rot->mdss_base,
655 REGDMA_CSR_REGDMA_BLOCK_STATUS));
656
657 SDEROT_ERR(
658 "invalid_cmd_offset = %x, fsm_state = %x\n",
659 SDE_ROTREG_READ(rot->mdss_base,
660 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
661 SDE_ROTREG_READ(rot->mdss_base,
662 REGDMA_CSR_REGDMA_FSM_STATE));
Benjamin Chan59a06052017-01-12 18:06:03 -0500663
664 SDEROT_ERR(
665 "UBWC decode status = %x, UBWC encode status = %x\n",
666 SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS),
667 SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
Benjamin Chan1b94f952017-01-23 17:42:30 -0500668
669 SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
670 SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
671 SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
Alan Kwong6bc64622017-02-04 17:36:03 -0800672
673 SDEROT_ERR(
674 "sbuf_status_plane0 = %x, sbuf_status_plane1 = %x\n",
675 SDE_ROTREG_READ(rot->mdss_base,
676 ROT_WB_SBUF_STATUS_PLANE0),
677 SDE_ROTREG_READ(rot->mdss_base,
678 ROT_WB_SBUF_STATUS_PLANE1));
Alan Kwong818b7fc2016-07-24 22:07:41 -0400679}
680
Alan Kwong9487de22016-01-16 22:06:36 -0500681/**
682 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
683 * on provided session_id. Each rotator has a different session_id.
Clarence Ip9e6c3302017-06-02 11:02:57 -0400684 * @rot: Pointer to rotator hw
685 * @session_id: Identifier for rotator session
686 * @sequence_id: Identifier for rotation request within the session
687 * @q_id: Rotator queue identifier
Alan Kwong9487de22016-01-16 22:06:36 -0500688 */
689static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
Clarence Ip9e6c3302017-06-02 11:02:57 -0400690 struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
Alan Kwong9487de22016-01-16 22:06:36 -0500691 enum sde_rot_queue_prio q_id)
692{
693 int i;
694 struct sde_hw_rotator_context *ctx = NULL;
695
696 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
697 ctx = rot->rotCtx[q_id][i];
698
Clarence Ip9e6c3302017-06-02 11:02:57 -0400699 if (ctx && (ctx->session_id == session_id) &&
700 (ctx->sequence_id == sequence_id)) {
Alan Kwong9487de22016-01-16 22:06:36 -0500701 SDEROT_DBG(
Clarence Ip9e6c3302017-06-02 11:02:57 -0400702 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d | sequence-id:%d\n",
703 q_id, i, ctx, ctx->session_id,
704 ctx->sequence_id);
Alan Kwong9487de22016-01-16 22:06:36 -0500705 return ctx;
706 }
707 }
708
709 return NULL;
710}
711
712/*
713 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
714 * @dbgbuf: Pointer to debug buffer
715 * @buf: Pointer to layer buffer structure
716 * @data: Pointer to h/w mapped buffer structure
717 */
718static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
719 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
720{
721 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
722 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
723
724 dbgbuf->vaddr = NULL;
725 dbgbuf->width = buf->width;
726 dbgbuf->height = buf->height;
727
728 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
Alan Kwong6ce448d2016-11-24 18:45:20 -0800729 dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500730 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
731 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
732 dbgbuf->vaddr, dbgbuf->buflen,
733 dbgbuf->width, dbgbuf->height);
734 }
735}
736
737/*
738 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
739 * @dbgbuf: Pointer to debug buffer
740 */
741static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
742{
743 if (dbgbuf->vaddr) {
744 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
Alan Kwong6ce448d2016-11-24 18:45:20 -0800745 dma_buf_end_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500746 }
747
748 dbgbuf->vaddr = NULL;
749 dbgbuf->dmabuf = NULL;
750 dbgbuf->buflen = 0;
751 dbgbuf->width = 0;
752 dbgbuf->height = 0;
753}
754
755/*
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -0700756 * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
757 * levels, enable write gather enable and avoid clk gating setting for
758 * debug purpose.
759 *
760 * @rot: Pointer to rotator hw
761 */
762static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
763{
764 u32 i, mask, vbif_qos, reg_val = 0;
765 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
766
767 /* VBIF_ROT QoS remapper setting */
768 switch (mdata->npriority_lvl) {
769
770 case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
771 for (i = 0; i < mdata->npriority_lvl; i++) {
772 reg_val = SDE_VBIF_READ(mdata,
773 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
774 mask = 0x3 << (XIN_SSPP * 2);
775 vbif_qos = mdata->vbif_nrt_qos[i];
776 reg_val |= vbif_qos << (XIN_SSPP * 2);
777 /* ensure write is issued after the read operation */
778 mb();
779 SDE_VBIF_WRITE(mdata,
780 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
781 reg_val);
782 }
783 break;
784
785 case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
786 mask = mdata->npriority_lvl - 1;
787 for (i = 0; i < mdata->npriority_lvl; i++) {
788 /* RD and WR client */
789 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
790 << (XIN_SSPP * 4);
791 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
792 << (XIN_WRITEBACK * 4);
793
794 SDE_VBIF_WRITE(mdata,
795 MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
796 reg_val);
797 SDE_VBIF_WRITE(mdata,
798 MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
799 reg_val);
800 }
801 break;
802
803 default:
804 SDEROT_DBG("invalid vbif remapper levels\n");
805 }
806
807 /* Enable write gather for writeback to remove write gaps, which
808 * may hang AXI/BIMC/SDE.
809 */
810 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
811 BIT(XIN_WRITEBACK));
812
813 /*
814 * For debug purpose, disable clock gating, i.e. Clocks always on
815 */
816 if (mdata->clk_always_on) {
817 SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
818 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
819 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
820 0xFFFF);
821 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
822 }
823}
824
825/*
Alan Kwong9487de22016-01-16 22:06:36 -0500826 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
827 * @ctx: Pointer to rotator context
828 * @mask: Bit mask location of the timestamp
829 * @swts: Software timestamp
830 */
831static void sde_hw_rotator_setup_timestamp_packet(
832 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
833{
834 u32 *wrptr;
835
836 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
837
838 /*
839 * Create a dummy packet write out to 1 location for timestamp
840 * generation.
841 */
842 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
843 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
844 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
845 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
846 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
847 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
848 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
849 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
850 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
851 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
852 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
853 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
854 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
Benjamin Chan15c93d82016-08-29 10:04:22 -0400855 /*
856 * Must clear secure buffer setting for SW timestamp because
857 * SW timstamp buffer allocation is always non-secure region.
858 */
859 if (ctx->is_secure) {
860 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
861 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
862 }
Alan Kwong9487de22016-01-16 22:06:36 -0500863 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
864 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
865 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
866 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
867 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
868 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
869 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
870 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
871 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
872 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
873 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
874 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
875 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
876
877 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
878}
879
880/*
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -0700881 * sde_hw_rotator_cdp_configs - configures the CDP registers
882 * @ctx: Pointer to rotator context
883 * @params: Pointer to parameters needed for CDP configs
884 */
885static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
886 struct sde_rot_cdp_params *params)
887{
888 int reg_val;
889 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
890
891 if (!params->enable) {
892 SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
893 goto end;
894 }
895
896 reg_val = BIT(0); /* enable cdp */
897
898 if (sde_mdp_is_ubwc_format(params->fmt))
899 reg_val |= BIT(1); /* enable UBWC meta cdp */
900
901 if (sde_mdp_is_ubwc_format(params->fmt)
902 || sde_mdp_is_tilea4x_format(params->fmt)
903 || sde_mdp_is_tilea5x_format(params->fmt))
904 reg_val |= BIT(2); /* enable tile amortize */
905
906 reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
907
908 SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
909
910end:
911 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
912}
913
914/*
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -0700915 * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
916 * for the WRITEBACK rotator for inline and offline rotation.
917 *
918 * @ctx: Pointer to rotator context
919 */
920static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
921{
922 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
923 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
924
925 /* Offline rotation setting */
926 if (!ctx->sbuf_mode) {
927 /* QOS LUT WR setting */
928 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
929 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
930 mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
931 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
932 mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
933 }
934
935 /* Danger LUT WR setting */
936 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
937 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
938 mdata->lut_cfg[SDE_ROT_WR].danger_lut);
939
940 /* Safe LUT WR setting */
941 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
942 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
943 mdata->lut_cfg[SDE_ROT_WR].safe_lut);
944
945 /* Inline rotation setting */
946 } else {
947 /* QOS LUT WR setting */
948 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
949 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
950 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
951 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
952 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
953 }
954
955 /* Danger LUT WR setting */
956 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
957 mdata->sde_inline_qos_map))
958 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
959 mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
960
961 /* Safe LUT WR setting */
962 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
963 mdata->sde_inline_qos_map))
964 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
965 mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
966 }
967
968 /* Update command queue write ptr */
969 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
970}
971
972/*
973 * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
974 * for the SSPP rotator for inline and offline rotation.
975 *
976 * @ctx: Pointer to rotator context
977 */
978static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
979{
980 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
981 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
982
983 /* Offline rotation setting */
984 if (!ctx->sbuf_mode) {
985 /* QOS LUT RD setting */
986 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
987 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
988 mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
989 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
990 mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
991 }
992
993 /* Danger LUT RD setting */
994 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
995 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
996 mdata->lut_cfg[SDE_ROT_RD].danger_lut);
997
998 /* Safe LUT RD setting */
999 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
1000 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1001 mdata->lut_cfg[SDE_ROT_RD].safe_lut);
1002
1003 /* inline rotation setting */
1004 } else {
1005 /* QOS LUT RD setting */
1006 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
1007 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
1008 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
1009 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
1010 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
1011 }
1012
1013 /* Danger LUT RD setting */
1014 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
1015 mdata->sde_inline_qos_map))
1016 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
1017 mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
1018
1019 /* Safe LUT RD setting */
1020 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
1021 mdata->sde_inline_qos_map))
1022 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1023 mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
1024 }
1025
1026 /* Update command queue write ptr */
1027 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1028}
1029
1030/*
Alan Kwong9487de22016-01-16 22:06:36 -05001031 * sde_hw_rotator_setup_fetchengine - setup fetch engine
1032 * @ctx: Pointer to rotator context
1033 * @queue_id: Priority queue identifier
1034 * @cfg: Fetch configuration
1035 * @danger_lut: real-time QoS LUT for danger setting (not used)
1036 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001037 * @dnsc_factor_w: downscale factor for width
1038 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -05001039 * @flags: Control flag
1040 */
1041static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
1042 enum sde_rot_queue_prio queue_id,
1043 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001044 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -05001045{
1046 struct sde_hw_rotator *rot = ctx->rot;
1047 struct sde_mdp_format_params *fmt;
1048 struct sde_mdp_data *data;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001049 struct sde_rot_cdp_params cdp_params = {0};
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001050 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001051 u32 *wrptr;
1052 u32 opmode = 0;
1053 u32 chroma_samp = 0;
1054 u32 src_format = 0;
1055 u32 unpack = 0;
1056 u32 width = cfg->img_width;
1057 u32 height = cfg->img_height;
1058 u32 fetch_blocksize = 0;
1059 int i;
1060
1061 if (ctx->rot->mode == ROT_REGDMA_ON) {
1062 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_EN,
1063 REGDMA_INT_MASK);
1064 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
1065 REGDMA_EN);
1066 }
1067
1068 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1069
Alan Kwong5b4d71b2017-02-10 20:52:59 -08001070 /*
1071 * initialize start control trigger selection first
1072 */
1073 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
1074 if (ctx->sbuf_mode)
1075 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
1076 ctx->start_ctrl);
1077 else
1078 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
1079 }
1080
Alan Kwong9487de22016-01-16 22:06:36 -05001081 /* source image setup */
1082 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
1083 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
1084 for (i = 0; i < cfg->src_plane.num_planes; i++)
1085 cfg->src_plane.ystride[i] *= 2;
1086 width *= 2;
1087 height /= 2;
1088 }
1089
1090 /*
1091 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
1092 */
1093 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
1094
1095 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
1096 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1097 cfg->src_rect->w | (cfg->src_rect->h << 16));
1098 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
1099 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1100 cfg->src_rect->x | (cfg->src_rect->y << 16));
1101 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1102 cfg->src_rect->w | (cfg->src_rect->h << 16));
1103 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1104 cfg->src_rect->x | (cfg->src_rect->y << 16));
1105
1106 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
1107 data = cfg->data;
1108 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1109 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
1110 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
1111 (cfg->src_plane.ystride[1] << 16));
1112 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
1113 (cfg->src_plane.ystride[3] << 16));
1114
1115 /* UNUSED, write 0 */
1116 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1117
1118 /* setup source format */
1119 fmt = cfg->fmt;
1120
1121 chroma_samp = fmt->chroma_sample;
1122 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
1123 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
1124 chroma_samp = SDE_MDP_CHROMA_H1V2;
1125 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
1126 chroma_samp = SDE_MDP_CHROMA_H2V1;
1127 }
1128
1129 src_format = (chroma_samp << 23) |
1130 (fmt->fetch_planes << 19) |
1131 (fmt->bits[C3_ALPHA] << 6) |
1132 (fmt->bits[C2_R_Cr] << 4) |
1133 (fmt->bits[C1_B_Cb] << 2) |
1134 (fmt->bits[C0_G_Y] << 0);
1135
1136 if (fmt->alpha_enable &&
1137 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
1138 src_format |= BIT(8); /* SRCC3_EN */
1139
1140 src_format |= ((fmt->unpack_count - 1) << 12) |
1141 (fmt->unpack_tight << 17) |
1142 (fmt->unpack_align_msb << 18) |
1143 ((fmt->bpp - 1) << 9) |
1144 ((fmt->frame_format & 3) << 30);
1145
1146 if (flags & SDE_ROT_FLAG_ROT_90)
1147 src_format |= BIT(11); /* ROT90 */
1148
1149 if (sde_mdp_is_ubwc_format(fmt))
1150 opmode |= BIT(0); /* BWC_DEC_EN */
1151
1152 /* if this is YUV pixel format, enable CSC */
1153 if (sde_mdp_is_yuv_format(fmt))
1154 src_format |= BIT(15); /* SRC_COLOR_SPACE */
1155
1156 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1157 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
1158
Alan Kwong3bef26f2017-02-26 15:38:09 -08001159 if (rot->solid_fill)
1160 src_format |= BIT(22); /* SOLID_FILL */
1161
Alan Kwong9487de22016-01-16 22:06:36 -05001162 /* SRC_FORMAT */
1163 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
1164
1165 /* setup source unpack pattern */
1166 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1167 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1168
1169 /* SRC_UNPACK_PATTERN */
1170 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
1171
1172 /* setup source op mode */
1173 if (flags & SDE_ROT_FLAG_FLIP_LR)
1174 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
1175 if (flags & SDE_ROT_FLAG_FLIP_UD)
1176 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
1177 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
1178
1179 /* SRC_OP_MODE */
1180 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
1181
1182 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001183 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
1184 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
1185 if (sde_mdp_is_tp10_format(fmt))
1186 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
1187 else
1188 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
1189 } else {
1190 if (sde_mdp_is_tp10_format(fmt))
1191 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
1192 else
1193 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
1194 }
1195
Alan Kwong3bef26f2017-02-26 15:38:09 -08001196 if (rot->solid_fill)
1197 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
1198 rot->constant_color);
1199
Alan Kwong9487de22016-01-16 22:06:36 -05001200 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
1201 fetch_blocksize |
1202 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
1203 ((rot->highest_bank & 0x3) << 18));
1204
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001205 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1206 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(31) |
1207 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1208 ((ctx->rot->highest_bank & 0x3) << 4) |
1209 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1210
Alan Kwong9487de22016-01-16 22:06:36 -05001211 /* setup source buffer plane security status */
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001212 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1213 SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
Alan Kwong9487de22016-01-16 22:06:36 -05001214 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
1215 ctx->is_secure = true;
Benjamin Chan15c93d82016-08-29 10:04:22 -04001216 } else {
1217 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
1218 ctx->is_secure = false;
Alan Kwong9487de22016-01-16 22:06:36 -05001219 }
1220
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001221 /* Update command queue write ptr */
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001222 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1223
1224 /* CDP register RD setting */
1225 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1226 mdata->enable_cdp[SDE_ROT_RD] : false;
1227 cdp_params.fmt = fmt;
1228 cdp_params.offset = ROT_SSPP_CDP_CNTL;
1229 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1230
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001231 /* QOS LUT/ Danger LUT/ Safe Lut WR setting */
1232 sde_hw_rotator_setup_qos_lut_rd(ctx);
1233
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001234 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1235
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001236 /*
1237 * Determine if traffic shaping is required. Only enable traffic
1238 * shaping when content is 4k@30fps. The actual traffic shaping
1239 * bandwidth calculation is done in output setup.
1240 */
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001241 if (((!ctx->sbuf_mode)
1242 && (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
1243 && (cfg->fps <= 30)) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001244 SDEROT_DBG("Enable Traffic Shaper\n");
1245 ctx->is_traffic_shaping = true;
1246 } else {
1247 SDEROT_DBG("Disable Traffic Shaper\n");
1248 ctx->is_traffic_shaping = false;
1249 }
1250
Alan Kwong9487de22016-01-16 22:06:36 -05001251 /* Update command queue write ptr */
1252 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1253}
1254
1255/*
1256 * sde_hw_rotator_setup_wbengine - setup writeback engine
1257 * @ctx: Pointer to rotator context
1258 * @queue_id: Priority queue identifier
1259 * @cfg: Writeback configuration
1260 * @flags: Control flag
1261 */
1262static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
1263 enum sde_rot_queue_prio queue_id,
1264 struct sde_hw_rot_wb_cfg *cfg,
1265 u32 flags)
1266{
Alan Kwong6bc64622017-02-04 17:36:03 -08001267 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001268 struct sde_mdp_format_params *fmt;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001269 struct sde_rot_cdp_params cdp_params = {0};
Alan Kwong9487de22016-01-16 22:06:36 -05001270 u32 *wrptr;
1271 u32 pack = 0;
1272 u32 dst_format = 0;
Clarence Ip22fed4c2017-05-16 15:30:51 -04001273 u32 partial_write = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001274 int i;
1275
1276 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1277
1278 fmt = cfg->fmt;
1279
1280 /* setup WB DST format */
1281 dst_format |= (fmt->chroma_sample << 23) |
1282 (fmt->fetch_planes << 19) |
1283 (fmt->bits[C3_ALPHA] << 6) |
1284 (fmt->bits[C2_R_Cr] << 4) |
1285 (fmt->bits[C1_B_Cb] << 2) |
1286 (fmt->bits[C0_G_Y] << 0);
1287
1288 /* alpha control */
1289 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
1290 dst_format |= BIT(8);
1291 if (!fmt->alpha_enable) {
1292 dst_format |= BIT(14);
1293 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
1294 }
1295 }
1296
1297 dst_format |= ((fmt->unpack_count - 1) << 12) |
1298 (fmt->unpack_tight << 17) |
1299 (fmt->unpack_align_msb << 18) |
1300 ((fmt->bpp - 1) << 9) |
1301 ((fmt->frame_format & 3) << 30);
1302
1303 if (sde_mdp_is_yuv_format(fmt))
1304 dst_format |= BIT(15);
1305
1306 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1307 dst_format |= BIT(21); /* PACK_DX_FORMAT */
1308
1309 /*
1310 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
1311 */
1312 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
1313
1314 /* DST_FORMAT */
1315 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
1316
1317 /* DST_OP_MODE */
1318 if (sde_mdp_is_ubwc_format(fmt))
1319 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
1320 else
1321 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1322
1323 /* DST_PACK_PATTERN */
1324 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1325 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1326 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
1327
1328 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
1329 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1330 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
1331 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
1332 (cfg->dst_plane.ystride[1] << 16));
1333 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
1334 (cfg->dst_plane.ystride[3] << 16));
1335
1336 /* setup WB out image size and ROI */
1337 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
1338 cfg->img_width | (cfg->img_height << 16));
1339 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
1340 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
1341 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
1342 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
1343
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001344 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1345 SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
Benjamin Chan15c93d82016-08-29 10:04:22 -04001346 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
1347 else
1348 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
1349
Alan Kwong9487de22016-01-16 22:06:36 -05001350 /*
1351 * setup Downscale factor
1352 */
1353 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
1354 cfg->v_downscale_factor |
1355 (cfg->h_downscale_factor << 16));
1356
Clarence Ip22fed4c2017-05-16 15:30:51 -04001357 /* partial write check */
1358 if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map) &&
1359 !sde_mdp_is_ubwc_format(fmt))
1360 partial_write = BIT(10);
1361
Alan Kwong6bc64622017-02-04 17:36:03 -08001362 /* write config setup for bank configuration */
Clarence Ip22fed4c2017-05-16 15:30:51 -04001363 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, partial_write |
Alan Kwong9487de22016-01-16 22:06:36 -05001364 (ctx->rot->highest_bank & 0x3) << 8);
1365
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001366 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1367 SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
1368 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1369 ((ctx->rot->highest_bank & 0x3) << 4) |
1370 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1371
Alan Kwong6bc64622017-02-04 17:36:03 -08001372 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
1373 SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
1374 ctx->sys_cache_mode);
1375
1376 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
1377 (flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
Alan Kwong9487de22016-01-16 22:06:36 -05001378
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001379 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1380
1381 /* CDP register WR setting */
1382 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1383 mdata->enable_cdp[SDE_ROT_WR] : false;
1384 cdp_params.fmt = fmt;
1385 cdp_params.offset = ROT_WB_CDP_CNTL;
1386 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1387
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001388 /* QOS LUT/ Danger LUT/ Safe LUT WR setting */
1389 sde_hw_rotator_setup_qos_lut_wr(ctx);
1390
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001391 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1392
Alan Kwong498d59f2017-02-11 18:56:34 -08001393 /* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001394 if (!ctx->sbuf_mode &&
1395 (ctx->is_traffic_shaping || cfg->prefill_bw)) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001396 u32 bw;
1397
1398 /*
1399 * Target to finish in 12ms, and we need to set number of bytes
1400 * per clock tick for traffic shaping.
1401 * Each clock tick run @ 19.2MHz, so we need we know total of
1402 * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
1403 * Finally, calcualte the byte count per clock tick based on
1404 * resolution, bpp and compression ratio.
1405 */
1406 bw = cfg->dst_rect->w * cfg->dst_rect->h;
1407
1408 if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
1409 bw = (bw * 3) / 2;
1410 else
1411 bw *= fmt->bpp;
1412
1413 bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
Alan Kwong498d59f2017-02-11 18:56:34 -08001414
1415 /* use prefill bandwidth instead if specified */
1416 if (cfg->prefill_bw)
1417 bw = DIV_ROUND_UP(cfg->prefill_bw,
1418 TRAFFIC_SHAPE_VSYNC_CLK);
1419
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001420 if (bw > 0xFF)
1421 bw = 0xFF;
1422 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
Alan Kwong498d59f2017-02-11 18:56:34 -08001423 BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001424 SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
1425 } else {
1426 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
1427 SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
1428 }
1429
Alan Kwong9487de22016-01-16 22:06:36 -05001430 /* Update command queue write ptr */
1431 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1432}
1433
1434/*
1435 * sde_hw_rotator_start_no_regdma - start non-regdma operation
1436 * @ctx: Pointer to rotator context
1437 * @queue_id: Priority queue identifier
1438 */
1439static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
1440 enum sde_rot_queue_prio queue_id)
1441{
1442 struct sde_hw_rotator *rot = ctx->rot;
1443 u32 *wrptr;
1444 u32 *rdptr;
1445 u8 *addr;
1446 u32 mask;
1447 u32 blksize;
1448
1449 rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
1450 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1451
1452 if (rot->irq_num >= 0) {
1453 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
1454 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
1455 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001456 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001457 }
1458
Alan Kwong6bc64622017-02-04 17:36:03 -08001459 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
Alan Kwong9487de22016-01-16 22:06:36 -05001460
1461 /* Update command queue write ptr */
1462 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1463
1464 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
1465 /* Write all command stream to Rotator blocks */
1466 /* Rotator will start right away after command stream finish writing */
1467 while (rdptr < wrptr) {
1468 u32 op = REGDMA_OP_MASK & *rdptr;
1469
1470 switch (op) {
1471 case REGDMA_OP_NOP:
1472 SDEROT_DBG("NOP\n");
1473 rdptr++;
1474 break;
1475 case REGDMA_OP_REGWRITE:
1476 SDEROT_DBG("REGW %6.6x %8.8x\n",
1477 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1478 rdptr[1]);
1479 addr = rot->mdss_base +
1480 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1481 writel_relaxed(*rdptr++, addr);
1482 break;
1483 case REGDMA_OP_REGMODIFY:
1484 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
1485 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1486 rdptr[1], rdptr[2]);
1487 addr = rot->mdss_base +
1488 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1489 mask = *rdptr++;
1490 writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
1491 addr);
1492 break;
1493 case REGDMA_OP_BLKWRITE_SINGLE:
1494 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
1495 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1496 rdptr[1]);
1497 addr = rot->mdss_base +
1498 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1499 blksize = *rdptr++;
1500 while (blksize--) {
1501 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
1502 writel_relaxed(*rdptr++, addr);
1503 }
1504 break;
1505 case REGDMA_OP_BLKWRITE_INC:
1506 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
1507 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1508 rdptr[1]);
1509 addr = rot->mdss_base +
1510 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1511 blksize = *rdptr++;
1512 while (blksize--) {
1513 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
1514 writel_relaxed(*rdptr++, addr);
1515 addr += 4;
1516 }
1517 break;
1518 default:
1519 /* Other not supported OP mode
1520 * Skip data for now for unregonized OP mode
1521 */
1522 SDEROT_DBG("UNDEFINED\n");
1523 rdptr++;
1524 break;
1525 }
1526 }
1527 SDEROT_DBG("END %d\n", ctx->timestamp);
1528
1529 return ctx->timestamp;
1530}
1531
1532/*
1533 * sde_hw_rotator_start_regdma - start regdma operation
1534 * @ctx: Pointer to rotator context
1535 * @queue_id: Priority queue identifier
1536 */
1537static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
1538 enum sde_rot_queue_prio queue_id)
1539{
1540 struct sde_hw_rotator *rot = ctx->rot;
1541 u32 *wrptr;
1542 u32 regdmaSlot;
1543 u32 offset;
1544 long length;
1545 long ts_length;
1546 u32 enableInt;
1547 u32 swts = 0;
1548 u32 mask = 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08001549 u32 trig_sel;
Alan Kwong9487de22016-01-16 22:06:36 -05001550
1551 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1552
Alan Kwong9487de22016-01-16 22:06:36 -05001553 /*
1554 * Last ROT command must be ROT_START before REGDMA start
1555 */
Alan Kwong6bc64622017-02-04 17:36:03 -08001556 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
1557
Alan Kwong9487de22016-01-16 22:06:36 -05001558 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1559
1560 /*
1561 * Start REGDMA with command offset and size
1562 */
1563 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
1564 length = ((long)wrptr - (long)ctx->regdma_base) / 4;
1565 offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
1566 REGDMA_RAM_REGDMA_CMD_RAM));
1567 enableInt = ((ctx->timestamp & 1) + 1) << 30;
Alan Kwong6bc64622017-02-04 17:36:03 -08001568 trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
1569 REGDMA_CMD_TRIG_SEL_SW_START;
Alan Kwong9487de22016-01-16 22:06:36 -05001570
1571 SDEROT_DBG(
1572 "regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
1573 queue_id, regdmaSlot, enableInt, length, offset,
1574 ctx->timestamp);
1575
1576 /* ensure the command packet is issued before the submit command */
1577 wmb();
1578
1579 /* REGDMA submission for current context */
1580 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1581 SDE_ROTREG_WRITE(rot->mdss_base,
1582 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001583 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1584 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001585 swts = ctx->timestamp;
1586 mask = ~SDE_REGDMA_SWTS_MASK;
1587 } else {
1588 SDE_ROTREG_WRITE(rot->mdss_base,
1589 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001590 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1591 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001592 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
1593 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
1594 }
1595
Alan Kwong6bc64622017-02-04 17:36:03 -08001596 /* timestamp update can only be used in offline multi-context mode */
1597 if (!ctx->sbuf_mode) {
1598 /* Write timestamp after previous rotator job finished */
1599 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
1600 offset += length;
1601 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
1602 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
Alan Kwong9487de22016-01-16 22:06:36 -05001603
Alan Kwong6bc64622017-02-04 17:36:03 -08001604 /* ensure command packet is issue before the submit command */
1605 wmb();
Alan Kwong9487de22016-01-16 22:06:36 -05001606
Alan Kwong6bc64622017-02-04 17:36:03 -08001607 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1608 SDE_ROTREG_WRITE(rot->mdss_base,
1609 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
1610 enableInt | (ts_length << 14) | offset);
1611 } else {
1612 SDE_ROTREG_WRITE(rot->mdss_base,
1613 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
1614 enableInt | (ts_length << 14) | offset);
1615 }
Alan Kwong9487de22016-01-16 22:06:36 -05001616 }
1617
Alan Kwong9487de22016-01-16 22:06:36 -05001618 /* Update command queue write ptr */
1619 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1620
1621 return ctx->timestamp;
1622}
1623
1624/*
1625 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
1626 * @ctx: Pointer to rotator context
1627 * @queue_id: Priority queue identifier
1628 * @flags: Option flag
1629 */
1630static u32 sde_hw_rotator_wait_done_no_regdma(
1631 struct sde_hw_rotator_context *ctx,
1632 enum sde_rot_queue_prio queue_id, u32 flag)
1633{
1634 struct sde_hw_rotator *rot = ctx->rot;
1635 int rc = 0;
1636 u32 sts = 0;
1637 u32 status;
1638 unsigned long flags;
1639
1640 if (rot->irq_num >= 0) {
1641 SDEROT_DBG("Wait for Rotator completion\n");
1642 rc = wait_for_completion_timeout(&ctx->rot_comp,
Alan Kwong6bc64622017-02-04 17:36:03 -08001643 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001644
1645 spin_lock_irqsave(&rot->rotisr_lock, flags);
1646 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1647 if (rc == 0) {
1648 /*
1649 * Timeout, there might be error,
1650 * or rotator still busy
1651 */
1652 if (status & ROT_BUSY_BIT)
1653 SDEROT_ERR(
1654 "Timeout waiting for rotator done\n");
1655 else if (status & ROT_ERROR_BIT)
1656 SDEROT_ERR(
1657 "Rotator report error status\n");
1658 else
1659 SDEROT_WARN(
1660 "Timeout waiting, but rotator job is done!!\n");
1661
Alan Kwong818b7fc2016-07-24 22:07:41 -04001662 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001663 }
1664 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1665 } else {
1666 int cnt = 200;
1667
1668 do {
1669 udelay(500);
1670 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1671 cnt--;
1672 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
1673 && ((status & ROT_ERROR_BIT) == 0));
1674
1675 if (status & ROT_ERROR_BIT)
1676 SDEROT_ERR("Rotator error\n");
1677 else if (status & ROT_BUSY_BIT)
1678 SDEROT_ERR("Rotator busy\n");
1679
1680 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1681 ROT_DONE_CLEAR);
1682 }
1683
1684 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1685
1686 return sts;
1687}
1688
1689/*
1690 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
1691 * @ctx: Pointer to rotator context
1692 * @queue_id: Priority queue identifier
1693 * @flags: Option flag
1694 */
1695static u32 sde_hw_rotator_wait_done_regdma(
1696 struct sde_hw_rotator_context *ctx,
1697 enum sde_rot_queue_prio queue_id, u32 flag)
1698{
1699 struct sde_hw_rotator *rot = ctx->rot;
1700 int rc = 0;
1701 u32 status;
1702 u32 last_isr;
1703 u32 last_ts;
1704 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001705 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -05001706 u32 sts = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001707 unsigned long flags;
1708
1709 if (rot->irq_num >= 0) {
1710 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
1711 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001712 rc = wait_event_timeout(ctx->regdma_waitq,
1713 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Alan Kwong6bc64622017-02-04 17:36:03 -08001714 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001715
Benjamin Chane7ca72e2016-12-22 18:42:34 -05001716 ATRACE_INT("sde_rot_done", 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001717 spin_lock_irqsave(&rot->rotisr_lock, flags);
1718
1719 last_isr = ctx->last_regdma_isr_status;
1720 last_ts = ctx->last_regdma_timestamp;
1721 status = last_isr & REGDMA_INT_MASK;
1722 int_id = last_ts & 1;
1723 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
1724 status, int_id, last_ts);
1725
1726 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
Alan Kwong818b7fc2016-07-24 22:07:41 -04001727 bool pending;
1728
1729 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001730 SDEROT_ERR(
Alan Kwong818b7fc2016-07-24 22:07:41 -04001731 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
1732 ctx->timestamp, swts, pending);
Alan Kwong9487de22016-01-16 22:06:36 -05001733
1734 if (status & REGDMA_WATCHDOG_INT)
1735 SDEROT_ERR("REGDMA watchdog interrupt\n");
1736 else if (status & REGDMA_INVALID_DESCRIPTOR)
1737 SDEROT_ERR("REGDMA invalid descriptor\n");
1738 else if (status & REGDMA_INCOMPLETE_CMD)
1739 SDEROT_ERR("REGDMA incomplete command\n");
1740 else if (status & REGDMA_INVALID_CMD)
1741 SDEROT_ERR("REGDMA invalid command\n");
1742
Alan Kwong818b7fc2016-07-24 22:07:41 -04001743 sde_hw_rotator_dump_status(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001744 status = ROT_ERROR_BIT;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001745 } else {
1746 if (rc == 1)
1747 SDEROT_WARN(
1748 "REGDMA done but no irq, ts:0x%X/0x%X\n",
1749 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001750 status = 0;
1751 }
1752
Alan Kwong9487de22016-01-16 22:06:36 -05001753 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1754 } else {
1755 int cnt = 200;
Alan Kwongb0679602016-11-27 17:04:13 -08001756 bool pending;
Alan Kwong9487de22016-01-16 22:06:36 -05001757
1758 do {
1759 udelay(500);
Alan Kwongb0679602016-11-27 17:04:13 -08001760 last_isr = SDE_ROTREG_READ(rot->mdss_base,
1761 REGDMA_CSR_REGDMA_INT_STATUS);
1762 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001763 cnt--;
Alan Kwongb0679602016-11-27 17:04:13 -08001764 } while ((cnt > 0) && pending &&
1765 ((last_isr & REGDMA_INT_ERR_MASK) == 0));
Alan Kwong9487de22016-01-16 22:06:36 -05001766
Alan Kwongb0679602016-11-27 17:04:13 -08001767 if (last_isr & REGDMA_INT_ERR_MASK) {
1768 SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
1769 ctx->timestamp, swts, last_isr);
1770 sde_hw_rotator_dump_status(rot);
1771 status = ROT_ERROR_BIT;
1772 } else if (pending) {
1773 SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
1774 ctx->timestamp, swts, last_isr);
1775 sde_hw_rotator_dump_status(rot);
1776 status = ROT_ERROR_BIT;
1777 } else {
1778 status = 0;
1779 }
Alan Kwong9487de22016-01-16 22:06:36 -05001780
1781 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
Alan Kwongb0679602016-11-27 17:04:13 -08001782 last_isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001783 }
1784
1785 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1786
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001787 if (status & ROT_ERROR_BIT)
Benjamin Chan2d6411a2017-03-28 18:01:53 -04001788 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
1789 "vbif_dbg_bus", "panic");
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001790
Alan Kwong9487de22016-01-16 22:06:36 -05001791 return sts;
1792}
1793
1794/*
1795 * setup_rotator_ops - setup callback functions for the low-level HAL
1796 * @ops: Pointer to low-level ops callback
1797 * @mode: Operation mode (non-regdma or regdma)
1798 */
1799static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
1800 enum sde_rotator_regdma_mode mode)
1801{
1802 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
1803 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
1804 if (mode == ROT_REGDMA_ON) {
1805 ops->start_rotator = sde_hw_rotator_start_regdma;
1806 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
1807 } else {
1808 ops->start_rotator = sde_hw_rotator_start_no_regdma;
1809 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
1810 }
1811}
1812
1813/*
1814 * sde_hw_rotator_swts_create - create software timestamp buffer
1815 * @rot: Pointer to rotator hw
1816 *
1817 * This buffer is used by regdma to keep track of last completed command.
1818 */
1819static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
1820{
1821 int rc = 0;
1822 struct ion_handle *handle;
1823 struct sde_mdp_img_data *data;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001824 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001825 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
1826
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001827 rot->iclient = mdata->iclient;
Alan Kwong9487de22016-01-16 22:06:36 -05001828
1829 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
1830 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
1831 if (IS_ERR_OR_NULL(handle)) {
1832 SDEROT_ERR("ion memory allocation failed\n");
1833 return -ENOMEM;
1834 }
1835
1836 data = &rot->swts_buf;
1837 data->len = bufsize;
1838 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
1839 if (IS_ERR(data->srcp_dma_buf)) {
1840 SDEROT_ERR("ion_dma_buf setup failed\n");
1841 rc = -ENOMEM;
1842 goto imap_err;
1843 }
1844
1845 sde_smmu_ctrl(1);
1846
1847 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
1848 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
1849 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
1850 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
1851 rc = -ENOMEM;
1852 goto err_put;
1853 }
1854
1855 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
1856 DMA_BIDIRECTIONAL);
1857 if (IS_ERR_OR_NULL(data->srcp_table)) {
1858 SDEROT_ERR("dma_buf_map_attachment error\n");
1859 rc = -ENOMEM;
1860 goto err_detach;
1861 }
1862
1863 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
1864 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
1865 &data->len, DMA_BIDIRECTIONAL);
Alan Kwong6ce448d2016-11-24 18:45:20 -08001866 if (rc < 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05001867 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
1868 goto err_unmap;
1869 }
1870
Alan Kwong6ce448d2016-11-24 18:45:20 -08001871 dma_buf_begin_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05001872 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
1873 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
1874 SDEROT_ERR("ion kernel memory mapping failed\n");
1875 rc = IS_ERR(rot->swts_buffer);
1876 goto kmap_err;
1877 }
1878
1879 data->mapped = true;
1880 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
1881 data->len, rot->swts_buffer);
1882
1883 ion_free(rot->iclient, handle);
1884
1885 sde_smmu_ctrl(0);
1886
1887 return rc;
1888kmap_err:
1889 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1890 DMA_FROM_DEVICE, data->srcp_dma_buf);
1891err_unmap:
1892 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1893 DMA_FROM_DEVICE);
1894err_detach:
1895 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1896err_put:
1897 dma_buf_put(data->srcp_dma_buf);
1898 data->srcp_dma_buf = NULL;
1899imap_err:
1900 ion_free(rot->iclient, handle);
1901
1902 return rc;
1903}
1904
1905/*
1906 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
1907 * @rot: Pointer to rotator hw
1908 */
1909static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
1910{
1911 struct sde_mdp_img_data *data;
1912
1913 data = &rot->swts_buf;
1914
Alan Kwong6ce448d2016-11-24 18:45:20 -08001915 dma_buf_end_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05001916 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
1917
1918 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1919 DMA_FROM_DEVICE, data->srcp_dma_buf);
1920 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1921 DMA_FROM_DEVICE);
1922 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1923 dma_buf_put(data->srcp_dma_buf);
1924 data->srcp_dma_buf = NULL;
1925}
1926
1927/*
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001928 * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
1929 * PM event occurs
1930 * @mgr: Pointer to rotator manager
1931 * @pmon: Boolean indicate an on/off power event
1932 */
1933void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1934{
1935 struct sde_hw_rotator *rot;
1936 u32 l_ts, h_ts, swts, hwts;
1937 u32 rotsts, regdmasts;
1938
1939 /*
1940 * Check last HW timestamp with SW timestamp before power off event.
1941 * If there is a mismatch, that will be quite possible the rotator HW
1942 * is either hang or not finishing last submitted job. In that case,
1943 * it is best to do a timeout eventlog to capture some good events
1944 * log data for analysis.
1945 */
1946 if (!pmon && mgr && mgr->hw_data) {
1947 rot = mgr->hw_data;
1948 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1949 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1950
1951 /* contruct the combined timstamp */
1952 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1953 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1954 SDE_REGDMA_SWTS_SHIFT);
1955
1956 /* Need to turn on clock to access rotator register */
1957 sde_rotator_clk_ctrl(mgr, true);
1958 hwts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1959 regdmasts = SDE_ROTREG_READ(rot->mdss_base,
1960 REGDMA_CSR_REGDMA_BLOCK_STATUS);
1961 rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1962
1963 SDEROT_DBG(
1964 "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1965 swts, hwts, regdmasts, rotsts);
1966 SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
1967
1968 if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
1969 (rotsts & ROT_STATUS_MASK))) {
1970 SDEROT_ERR(
1971 "Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1972 swts, hwts, regdmasts, rotsts);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04001973 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
1974 "vbif_dbg_bus", "panic");
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001975 }
1976
1977 /* Turn off rotator clock after checking rotator registers */
1978 sde_rotator_clk_ctrl(mgr, false);
1979 }
1980}
1981
1982/*
1983 * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
1984 * PM event occurs
1985 * @mgr: Pointer to rotator manager
1986 * @pmon: Boolean indicate an on/off power event
1987 */
1988void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1989{
1990 struct sde_hw_rotator *rot;
1991 u32 l_ts, h_ts, swts;
1992
1993 /*
1994 * After a power on event, the rotator HW is reset to default setting.
1995 * It is necessary to synchronize the SW timestamp with the HW.
1996 */
1997 if (pmon && mgr && mgr->hw_data) {
1998 rot = mgr->hw_data;
1999 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2000 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2001
2002 /* contruct the combined timstamp */
2003 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2004 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2005 SDE_REGDMA_SWTS_SHIFT);
2006
2007 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2008 swts, h_ts, l_ts);
2009 SDEROT_EVTLOG(swts, h_ts, l_ts);
2010 rot->reset_hw_ts = true;
2011 rot->last_hw_ts = swts;
2012 }
2013}
2014
2015/*
Alan Kwong9487de22016-01-16 22:06:36 -05002016 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
2017 * @mgr: Pointer to rotator manager
2018 */
2019static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
2020{
2021 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2022 struct sde_hw_rotator *rot;
2023
2024 if (!mgr || !mgr->pdev || !mgr->hw_data) {
2025 SDEROT_ERR("null parameters\n");
2026 return;
2027 }
2028
2029 rot = mgr->hw_data;
2030 if (rot->irq_num >= 0)
2031 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2032
2033 if (rot->mode == ROT_REGDMA_ON)
2034 sde_hw_rotator_swtc_destroy(rot);
2035
2036 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2037 mgr->hw_data = NULL;
2038}
2039
2040/*
2041 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
2042 * @mgr: Pointer to rotator manager
2043 * @pipe_id: pipe identifier (not used)
2044 * @wb_id: writeback identifier/priority queue identifier
2045 *
2046 * This function allocates a new hw rotator resource for the given priority.
2047 */
2048static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
2049 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
2050{
2051 struct sde_hw_rotator_resource_info *resinfo;
2052
2053 if (!mgr || !mgr->hw_data) {
2054 SDEROT_ERR("null parameters\n");
2055 return NULL;
2056 }
2057
2058 /*
2059 * Allocate rotator resource info. Each allocation is per
2060 * HW priority queue
2061 */
2062 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
2063 if (!resinfo) {
2064 SDEROT_ERR("Failed allocation HW rotator resource info\n");
2065 return NULL;
2066 }
2067
2068 resinfo->rot = mgr->hw_data;
2069 resinfo->hw.wb_id = wb_id;
2070 atomic_set(&resinfo->hw.num_active, 0);
2071 init_waitqueue_head(&resinfo->hw.wait_queue);
2072
2073 /* For non-regdma, only support one active session */
2074 if (resinfo->rot->mode == ROT_REGDMA_OFF)
2075 resinfo->hw.max_active = 1;
2076 else {
2077 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
2078
2079 if (resinfo->rot->iclient == NULL)
2080 sde_hw_rotator_swts_create(resinfo->rot);
2081 }
2082
Alan Kwongf987ea32016-07-06 12:11:44 -04002083 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002084 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002085
Alan Kwong9487de22016-01-16 22:06:36 -05002086 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
2087 resinfo, wb_id);
2088
2089 return &resinfo->hw;
2090}
2091
2092/*
2093 * sde_hw_rotator_free_ext - free the given rotator resource
2094 * @mgr: Pointer to rotator manager
2095 * @hw: Pointer to rotator resource
2096 */
2097static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
2098 struct sde_rot_hw_resource *hw)
2099{
2100 struct sde_hw_rotator_resource_info *resinfo;
2101
2102 if (!mgr || !mgr->hw_data)
2103 return;
2104
2105 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2106
2107 SDEROT_DBG(
2108 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
2109 resinfo, hw->wb_id, atomic_read(&hw->num_active),
2110 hw->pending_count);
2111
Alan Kwongf987ea32016-07-06 12:11:44 -04002112 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002113 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002114
Alan Kwong9487de22016-01-16 22:06:36 -05002115 devm_kfree(&mgr->pdev->dev, resinfo);
2116}
2117
2118/*
2119 * sde_hw_rotator_alloc_rotctx - allocate rotator context
2120 * @rot: Pointer to rotator hw
2121 * @hw: Pointer to rotator resource
2122 * @session_id: Session identifier of this context
Clarence Ip9e6c3302017-06-02 11:02:57 -04002123 * @sequence_id: Sequence identifier of this request
Alan Kwong6bc64622017-02-04 17:36:03 -08002124 * @sbuf_mode: true if stream buffer is requested
Alan Kwong9487de22016-01-16 22:06:36 -05002125 *
2126 * This function allocates a new rotator context for the given session id.
2127 */
2128static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
2129 struct sde_hw_rotator *rot,
2130 struct sde_rot_hw_resource *hw,
Alan Kwong6bc64622017-02-04 17:36:03 -08002131 u32 session_id,
Clarence Ip9e6c3302017-06-02 11:02:57 -04002132 u32 sequence_id,
Alan Kwong6bc64622017-02-04 17:36:03 -08002133 bool sbuf_mode)
Alan Kwong9487de22016-01-16 22:06:36 -05002134{
2135 struct sde_hw_rotator_context *ctx;
2136
2137 /* Allocate rotator context */
2138 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
2139 if (!ctx) {
2140 SDEROT_ERR("Failed allocation HW rotator context\n");
2141 return NULL;
2142 }
2143
2144 ctx->rot = rot;
2145 ctx->q_id = hw->wb_id;
2146 ctx->session_id = session_id;
Clarence Ip9e6c3302017-06-02 11:02:57 -04002147 ctx->sequence_id = sequence_id;
Alan Kwong9487de22016-01-16 22:06:36 -05002148 ctx->hwres = hw;
2149 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
2150 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
2151 ctx->is_secure = false;
Alan Kwong6bc64622017-02-04 17:36:03 -08002152 ctx->sbuf_mode = sbuf_mode;
2153 INIT_LIST_HEAD(&ctx->list);
Alan Kwong9487de22016-01-16 22:06:36 -05002154
2155 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
2156 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
2157 ctx->regdma_wrptr = ctx->regdma_base;
2158 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
2159 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
2160 sde_hw_rotator_get_regdma_ctxidx(ctx));
2161
Alan Kwong818b7fc2016-07-24 22:07:41 -04002162 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
2163
Alan Kwong9487de22016-01-16 22:06:36 -05002164 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002165 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002166
2167 /* Store rotator context for lookup purpose */
2168 sde_hw_rotator_put_ctx(ctx);
2169
2170 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002171 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002172 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2173 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002174 atomic_read(&ctx->hwres->num_active),
2175 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002176
2177 return ctx;
2178}
2179
2180/*
2181 * sde_hw_rotator_free_rotctx - free the given rotator context
2182 * @rot: Pointer to rotator hw
2183 * @ctx: Pointer to rotator context
2184 */
2185static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
2186 struct sde_hw_rotator_context *ctx)
2187{
2188 if (!rot || !ctx)
2189 return;
2190
2191 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002192 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002193 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2194 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002195 atomic_read(&ctx->hwres->num_active),
2196 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002197
Benjamin Chanc3e185f2016-11-08 21:48:21 -05002198 /* Clear rotator context from lookup purpose */
2199 sde_hw_rotator_clr_ctx(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05002200
2201 devm_kfree(&rot->pdev->dev, ctx);
2202}
2203
2204/*
2205 * sde_hw_rotator_config - configure hw for the given rotation entry
2206 * @hw: Pointer to rotator resource
2207 * @entry: Pointer to rotation entry
2208 *
2209 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
2210 * based on the given rotation entry.
2211 */
2212static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
2213 struct sde_rot_entry *entry)
2214{
2215 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2216 struct sde_hw_rotator *rot;
2217 struct sde_hw_rotator_resource_info *resinfo;
2218 struct sde_hw_rotator_context *ctx;
2219 struct sde_hw_rot_sspp_cfg sspp_cfg;
2220 struct sde_hw_rot_wb_cfg wb_cfg;
2221 u32 danger_lut = 0; /* applicable for realtime client only */
2222 u32 safe_lut = 0; /* applicable for realtime client only */
2223 u32 flags = 0;
Benjamin Chana9dd3052017-02-14 17:39:32 -05002224 u32 rststs = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05002225 struct sde_rotation_item *item;
Alan Kwong6bc64622017-02-04 17:36:03 -08002226 int ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002227
2228 if (!hw || !entry) {
2229 SDEROT_ERR("null hw resource/entry\n");
2230 return -EINVAL;
2231 }
2232
2233 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2234 rot = resinfo->rot;
2235 item = &entry->item;
2236
Alan Kwong6bc64622017-02-04 17:36:03 -08002237 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
Clarence Ip9e6c3302017-06-02 11:02:57 -04002238 item->sequence_id, item->output.sbuf);
Alan Kwong9487de22016-01-16 22:06:36 -05002239 if (!ctx) {
2240 SDEROT_ERR("Failed allocating rotator context!!\n");
2241 return -EINVAL;
2242 }
2243
Alan Kwong6bc64622017-02-04 17:36:03 -08002244 /* save entry for debugging purposes */
2245 ctx->last_entry = entry;
2246
2247 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
2248 if (entry->dst_buf.sbuf) {
2249 u32 op_mode;
2250
2251 if (entry->item.trigger ==
2252 SDE_ROTATOR_TRIGGER_COMMAND)
2253 ctx->start_ctrl = (rot->cmd_trigger << 4);
2254 else if (entry->item.trigger ==
2255 SDE_ROTATOR_TRIGGER_VIDEO)
2256 ctx->start_ctrl = (rot->vid_trigger << 4);
2257 else
2258 ctx->start_ctrl = 0;
2259
2260 ctx->sys_cache_mode = BIT(15) |
2261 ((item->output.scid & 0x1f) << 8) |
2262 (item->output.writeback ? 0x5 : 0);
2263
2264 ctx->op_mode = BIT(4) |
2265 ((ctx->rot->sbuf_headroom & 0xff) << 8);
2266
2267 /* detect transition to inline mode */
2268 op_mode = (SDE_ROTREG_READ(rot->mdss_base,
2269 ROTTOP_OP_MODE) >> 4) & 0x3;
2270 if (!op_mode) {
2271 u32 status;
2272
2273 status = SDE_ROTREG_READ(rot->mdss_base,
2274 ROTTOP_STATUS);
2275 if (status & BIT(0)) {
2276 SDEROT_ERR("rotator busy 0x%x\n",
2277 status);
2278 sde_hw_rotator_dump_status(rot);
2279 SDEROT_EVTLOG_TOUT_HANDLER("rot",
2280 "vbif_dbg_bus",
2281 "panic");
2282 }
2283 }
2284
2285 } else {
2286 ctx->start_ctrl = BIT(0);
2287 ctx->sys_cache_mode = 0;
2288 ctx->op_mode = 0;
2289 }
2290 } else {
2291 ctx->start_ctrl = BIT(0);
2292 }
2293
2294 SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
2295
Benjamin Chana9dd3052017-02-14 17:39:32 -05002296 /*
2297 * if Rotator HW is reset, but missing PM event notification, we
2298 * need to init the SW timestamp automatically.
2299 */
2300 rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
2301 if (!rot->reset_hw_ts && rststs) {
2302 u32 l_ts, h_ts, swts;
2303
2304 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2305 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2306 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2307 SDEROT_EVTLOG(0xbad0, rststs, swts, h_ts, l_ts);
2308
2309 if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY)
2310 h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
2311 else
2312 l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
2313
2314 /* construct the combined timstamp */
2315 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2316 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2317 SDE_REGDMA_SWTS_SHIFT);
2318
2319 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2320 swts, h_ts, l_ts);
2321 SDEROT_EVTLOG(0x900d, swts, h_ts, l_ts);
2322 rot->last_hw_ts = swts;
2323
2324 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2325 rot->last_hw_ts);
2326 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
2327 /* ensure write is issued to the rotator HW */
2328 wmb();
2329 }
2330
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002331 if (rot->reset_hw_ts) {
2332 SDEROT_EVTLOG(rot->last_hw_ts);
2333 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2334 rot->last_hw_ts);
Benjamin Chana9dd3052017-02-14 17:39:32 -05002335 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002336 /* ensure write is issued to the rotator HW */
2337 wmb();
2338 rot->reset_hw_ts = false;
2339 }
2340
Alan Kwong9487de22016-01-16 22:06:36 -05002341 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
2342 SDE_ROT_FLAG_FLIP_LR : 0;
2343 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
2344 SDE_ROT_FLAG_FLIP_UD : 0;
2345 flags |= (item->flags & SDE_ROTATION_90) ?
2346 SDE_ROT_FLAG_ROT_90 : 0;
2347 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
2348 SDE_ROT_FLAG_DEINTERLACE : 0;
2349 flags |= (item->flags & SDE_ROTATION_SECURE) ?
2350 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002351 flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
2352 SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
2353
Alan Kwong9487de22016-01-16 22:06:36 -05002354
2355 sspp_cfg.img_width = item->input.width;
2356 sspp_cfg.img_height = item->input.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002357 sspp_cfg.fps = entry->perf->config.frame_rate;
2358 sspp_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002359 sspp_cfg.fmt = sde_get_format_params(item->input.format);
2360 if (!sspp_cfg.fmt) {
2361 SDEROT_ERR("null format\n");
Alan Kwong6bc64622017-02-04 17:36:03 -08002362 ret = -EINVAL;
2363 goto error;
Alan Kwong9487de22016-01-16 22:06:36 -05002364 }
2365 sspp_cfg.src_rect = &item->src_rect;
2366 sspp_cfg.data = &entry->src_buf;
2367 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
2368 item->input.height, &sspp_cfg.src_plane,
2369 0, /* No bwc_mode */
2370 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
2371 true : false);
2372
2373 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002374 &sspp_cfg, danger_lut, safe_lut,
2375 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05002376
2377 wb_cfg.img_width = item->output.width;
2378 wb_cfg.img_height = item->output.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002379 wb_cfg.fps = entry->perf->config.frame_rate;
2380 wb_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002381 wb_cfg.fmt = sde_get_format_params(item->output.format);
2382 wb_cfg.dst_rect = &item->dst_rect;
2383 wb_cfg.data = &entry->dst_buf;
2384 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
2385 item->output.height, &wb_cfg.dst_plane,
2386 0, /* No bwc_mode */
2387 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
2388
2389 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
2390 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
Alan Kwong498d59f2017-02-11 18:56:34 -08002391 wb_cfg.prefill_bw = item->prefill_bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002392
2393 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
2394
2395 /* setup VA mapping for debugfs */
2396 if (rot->dbgmem) {
2397 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
2398 &item->input,
2399 &entry->src_buf);
2400
2401 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
2402 &item->output,
2403 &entry->dst_buf);
2404 }
2405
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002406 SDEROT_EVTLOG(ctx->timestamp, flags,
2407 item->input.width, item->input.height,
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002408 item->output.width, item->output.height,
Benjamin Chan59a06052017-01-12 18:06:03 -05002409 entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
Benjamin Chan1b94f952017-01-23 17:42:30 -05002410 item->input.format, item->output.format,
2411 entry->perf->config.frame_rate);
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002412
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002413 if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002414 struct sde_mdp_set_ot_params ot_params;
2415
2416 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2417 ot_params.xin_id = XIN_SSPP;
2418 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002419 ot_params.width = entry->perf->config.input.width;
2420 ot_params.height = entry->perf->config.input.height;
2421 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002422 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
2423 ot_params.reg_off_mdp_clk_ctrl =
2424 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2425 ot_params.bit_off_mdp_clk_ctrl =
2426 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002427 ot_params.fmt = ctx->is_traffic_shaping ?
2428 SDE_PIX_FMT_ABGR_8888 :
2429 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002430 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2431 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002432 sde_mdp_set_ot_limit(&ot_params);
2433 }
2434
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002435 if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002436 struct sde_mdp_set_ot_params ot_params;
2437
2438 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2439 ot_params.xin_id = XIN_WRITEBACK;
2440 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002441 ot_params.width = entry->perf->config.input.width;
2442 ot_params.height = entry->perf->config.input.height;
2443 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002444 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
2445 ot_params.reg_off_mdp_clk_ctrl =
2446 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2447 ot_params.bit_off_mdp_clk_ctrl =
2448 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002449 ot_params.fmt = ctx->is_traffic_shaping ?
2450 SDE_PIX_FMT_ABGR_8888 :
2451 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002452 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2453 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002454 sde_mdp_set_ot_limit(&ot_params);
2455 }
2456
2457 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
2458 u32 qos_lut = 0; /* low priority for nrt read client */
2459
2460 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
2461 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
2462
2463 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
2464 }
2465
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -07002466 /* VBIF QoS and other settings */
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002467 if (!ctx->sbuf_mode)
2468 sde_hw_rotator_vbif_setting(rot);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002469
Alan Kwong9487de22016-01-16 22:06:36 -05002470 return 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08002471
2472error:
2473 sde_hw_rotator_free_rotctx(rot, ctx);
2474 return ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002475}
2476
2477/*
2478 * sde_hw_rotator_kickoff - kickoff processing on the given entry
2479 * @hw: Pointer to rotator resource
2480 * @entry: Pointer to rotation entry
2481 */
2482static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
2483 struct sde_rot_entry *entry)
2484{
2485 struct sde_hw_rotator *rot;
2486 struct sde_hw_rotator_resource_info *resinfo;
2487 struct sde_hw_rotator_context *ctx;
Alan Kwong9487de22016-01-16 22:06:36 -05002488
2489 if (!hw || !entry) {
2490 SDEROT_ERR("null hw resource/entry\n");
2491 return -EINVAL;
2492 }
2493
2494 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2495 rot = resinfo->rot;
2496
2497 /* Lookup rotator context from session-id */
Clarence Ip9e6c3302017-06-02 11:02:57 -04002498 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2499 entry->item.sequence_id, hw->wb_id);
Alan Kwong9487de22016-01-16 22:06:36 -05002500 if (!ctx) {
2501 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2502 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002503 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002504 }
Alan Kwong9487de22016-01-16 22:06:36 -05002505
Alan Kwong9487de22016-01-16 22:06:36 -05002506 rot->ops.start_rotator(ctx, ctx->q_id);
2507
2508 return 0;
2509}
2510
2511/*
2512 * sde_hw_rotator_wait4done - wait for completion notification
2513 * @hw: Pointer to rotator resource
2514 * @entry: Pointer to rotation entry
2515 *
2516 * This function blocks until the given entry is complete, error
2517 * is detected, or timeout.
2518 */
2519static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
2520 struct sde_rot_entry *entry)
2521{
2522 struct sde_hw_rotator *rot;
2523 struct sde_hw_rotator_resource_info *resinfo;
2524 struct sde_hw_rotator_context *ctx;
2525 int ret;
2526
2527 if (!hw || !entry) {
2528 SDEROT_ERR("null hw resource/entry\n");
2529 return -EINVAL;
2530 }
2531
2532 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2533 rot = resinfo->rot;
2534
2535 /* Lookup rotator context from session-id */
Clarence Ip9e6c3302017-06-02 11:02:57 -04002536 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2537 entry->item.sequence_id, hw->wb_id);
Alan Kwong9487de22016-01-16 22:06:36 -05002538 if (!ctx) {
2539 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2540 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002541 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002542 }
Alan Kwong9487de22016-01-16 22:06:36 -05002543
2544 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
2545
Alan Kwong9487de22016-01-16 22:06:36 -05002546 if (rot->dbgmem) {
2547 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
2548 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
2549 }
2550
2551 /* Current rotator context job is finished, time to free up*/
2552 sde_hw_rotator_free_rotctx(rot, ctx);
2553
2554 return ret;
2555}
2556
2557/*
2558 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
2559 * @rot: Pointer to hw rotator
2560 *
2561 * This function initializes feature and/or capability bitmask based on
2562 * h/w version read from the device.
2563 */
2564static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
2565{
2566 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2567 u32 hw_version;
2568
2569 if (!mdata) {
2570 SDEROT_ERR("null rotator data\n");
2571 return -EINVAL;
2572 }
2573
2574 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
2575 SDEROT_DBG("hw version %8.8x\n", hw_version);
2576
2577 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
2578 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
Alan Kwong9487de22016-01-16 22:06:36 -05002579 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
2580 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
2581 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
2582
2583 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
2584
Alan Kwong6bc64622017-02-04 17:36:03 -08002585 /* features exposed via rotator top h/w version */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002586 if (hw_version != SDE_ROT_TYPE_V1_0) {
2587 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
2588 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
2589 }
2590
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002591 set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
2592
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002593 mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
2594 mdata->nrt_vbif_dbg_bus_size =
2595 ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
2596
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002597 mdata->rot_dbg_bus = rot_dbgbus_r3;
2598 mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
2599
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002600 mdata->regdump = sde_rot_r3_regdump;
2601 mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002602 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
Alan Kwong6bc64622017-02-04 17:36:03 -08002603
2604 /* features exposed via mdss h/w version */
2605 if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400)) {
2606 SDEROT_DBG("Supporting sys cache inline rotation\n");
2607 set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
Alan Kwongfb8eeb22017-02-06 15:00:03 -08002608 set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
Clarence Ip22fed4c2017-05-16 15:30:51 -04002609 set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
Alan Kwong6bc64622017-02-04 17:36:03 -08002610 rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
2611 rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
2612 rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
2613 rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
2614 rot->downscale_caps =
2615 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
2616 } else {
2617 rot->inpixfmts = sde_hw_rotator_v3_inpixfmts;
2618 rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
2619 rot->outpixfmts = sde_hw_rotator_v3_outpixfmts;
2620 rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
2621 rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
2622 "LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
2623 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
2624 }
2625
Alan Kwong9487de22016-01-16 22:06:36 -05002626 return 0;
2627}
2628
2629/*
2630 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
2631 * @irq: Interrupt number
2632 * @ptr: Pointer to private handle provided during registration
2633 *
2634 * This function services rotator interrupt and wakes up waiting client
2635 * with pending rotation requests already submitted to h/w.
2636 */
2637static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
2638{
2639 struct sde_hw_rotator *rot = ptr;
2640 struct sde_hw_rotator_context *ctx;
2641 irqreturn_t ret = IRQ_NONE;
2642 u32 isr;
2643
2644 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
2645
2646 SDEROT_DBG("intr_status = %8.8x\n", isr);
2647
2648 if (isr & ROT_DONE_MASK) {
2649 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002650 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05002651 SDEROT_DBG("Notify rotator complete\n");
2652
2653 /* Normal rotator only 1 session, no need to lookup */
2654 ctx = rot->rotCtx[0][0];
2655 WARN_ON(ctx == NULL);
2656 complete_all(&ctx->rot_comp);
2657
2658 spin_lock(&rot->rotisr_lock);
2659 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
2660 ROT_DONE_CLEAR);
2661 spin_unlock(&rot->rotisr_lock);
2662 ret = IRQ_HANDLED;
2663 }
2664
2665 return ret;
2666}
2667
2668/*
2669 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
2670 * @irq: Interrupt number
2671 * @ptr: Pointer to private handle provided during registration
2672 *
2673 * This function services rotator interrupt, decoding the source of
2674 * events (high/low priority queue), and wakes up all waiting clients
2675 * with pending rotation requests already submitted to h/w.
2676 */
2677static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
2678{
2679 struct sde_hw_rotator *rot = ptr;
2680 struct sde_hw_rotator_context *ctx;
2681 irqreturn_t ret = IRQ_NONE;
2682 u32 isr;
2683 u32 ts;
2684 u32 q_id;
2685
2686 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002687 /* acknowledge interrupt before reading latest timestamp */
2688 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05002689 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2690
2691 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
2692
2693 /* Any REGDMA status, including error and watchdog timer, should
2694 * trigger and wake up waiting thread
2695 */
2696 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
2697 spin_lock(&rot->rotisr_lock);
2698
2699 /*
2700 * Obtain rotator context based on timestamp from regdma
2701 * and low/high interrupt status
2702 */
2703 if (isr & REGDMA_INT_HIGH_MASK) {
2704 q_id = ROT_QUEUE_HIGH_PRIORITY;
2705 ts = ts & SDE_REGDMA_SWTS_MASK;
2706 } else if (isr & REGDMA_INT_LOW_MASK) {
2707 q_id = ROT_QUEUE_LOW_PRIORITY;
2708 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
2709 SDE_REGDMA_SWTS_MASK;
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002710 } else {
2711 SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
2712 goto done_isr_handle;
Alan Kwong9487de22016-01-16 22:06:36 -05002713 }
Alan Kwong6bc64622017-02-04 17:36:03 -08002714
2715 /*
2716 * Timestamp packet is not available in sbuf mode.
2717 * Simulate timestamp update in the handler instead.
2718 */
2719 if (!list_empty(&rot->sbuf_ctx[q_id])) {
2720 ctx = list_first_entry_or_null(&rot->sbuf_ctx[q_id],
2721 struct sde_hw_rotator_context, list);
2722 if (ctx) {
2723 ts = ctx->timestamp;
2724 sde_hw_rotator_update_swts(rot, ctx, ts);
2725 SDEROT_DBG("update swts:0x%X\n", ts);
2726 } else {
2727 SDEROT_ERR("invalid swts ctx\n");
2728 }
2729 }
2730
Alan Kwong9487de22016-01-16 22:06:36 -05002731 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05002732
2733 /*
2734 * Wake up all waiting context from the current and previous
2735 * SW Timestamp.
2736 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04002737 while (ctx &&
2738 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05002739 ctx->last_regdma_isr_status = isr;
2740 ctx->last_regdma_timestamp = ts;
2741 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04002742 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002743 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002744
2745 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
2746 ctx = rot->rotCtx[q_id]
2747 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04002748 };
Alan Kwong9487de22016-01-16 22:06:36 -05002749
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002750done_isr_handle:
Alan Kwong9487de22016-01-16 22:06:36 -05002751 spin_unlock(&rot->rotisr_lock);
2752 ret = IRQ_HANDLED;
2753 } else if (isr & REGDMA_INT_ERR_MASK) {
2754 /*
2755 * For REGDMA Err, we save the isr info and wake up
2756 * all waiting contexts
2757 */
2758 int i, j;
2759
2760 SDEROT_ERR(
2761 "regdma err isr:%X, wake up all waiting contexts\n",
2762 isr);
2763
2764 spin_lock(&rot->rotisr_lock);
2765
2766 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2767 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
2768 ctx = rot->rotCtx[i][j];
2769 if (ctx && ctx->last_regdma_isr_status == 0) {
2770 ctx->last_regdma_isr_status = isr;
2771 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04002772 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002773 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
2774 i, j, ctx);
2775 }
2776 }
2777 }
2778
Alan Kwong9487de22016-01-16 22:06:36 -05002779 spin_unlock(&rot->rotisr_lock);
2780 ret = IRQ_HANDLED;
2781 }
2782
2783 return ret;
2784}
2785
2786/*
2787 * sde_hw_rotator_validate_entry - validate rotation entry
2788 * @mgr: Pointer to rotator manager
2789 * @entry: Pointer to rotation entry
2790 *
2791 * This function validates the given rotation entry and provides possible
2792 * fixup (future improvement) if available. This function returns 0 if
2793 * the entry is valid, and returns error code otherwise.
2794 */
2795static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
2796 struct sde_rot_entry *entry)
2797{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002798 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwongb6c049c2017-03-31 12:50:27 -07002799 struct sde_hw_rotator *hw_data;
Alan Kwong9487de22016-01-16 22:06:36 -05002800 int ret = 0;
2801 u16 src_w, src_h, dst_w, dst_h;
2802 struct sde_rotation_item *item = &entry->item;
2803 struct sde_mdp_format_params *fmt;
2804
Alan Kwongb6c049c2017-03-31 12:50:27 -07002805 if (!mgr || !entry || !mgr->hw_data) {
2806 SDEROT_ERR("invalid parameters\n");
2807 return -EINVAL;
2808 }
2809
2810 hw_data = mgr->hw_data;
2811
2812 if (hw_data->maxlinewidth < item->src_rect.w) {
2813 SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
2814 return -EINVAL;
2815 }
2816
Alan Kwong9487de22016-01-16 22:06:36 -05002817 src_w = item->src_rect.w;
2818 src_h = item->src_rect.h;
2819
2820 if (item->flags & SDE_ROTATION_90) {
2821 dst_w = item->dst_rect.h;
2822 dst_h = item->dst_rect.w;
2823 } else {
2824 dst_w = item->dst_rect.w;
2825 dst_h = item->dst_rect.h;
2826 }
2827
2828 entry->dnsc_factor_w = 0;
2829 entry->dnsc_factor_h = 0;
2830
Alan Kwong6bc64622017-02-04 17:36:03 -08002831 if (item->output.sbuf &&
2832 !test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
2833 SDEROT_ERR("stream buffer not supported\n");
2834 return -EINVAL;
2835 }
2836
Alan Kwong9487de22016-01-16 22:06:36 -05002837 if ((src_w != dst_w) || (src_h != dst_h)) {
Clarence Ip4db1ea82017-05-01 12:18:55 -07002838 if (!dst_w || !dst_h) {
2839 SDEROT_DBG("zero output width/height not support\n");
2840 ret = -EINVAL;
2841 goto dnsc_err;
2842 }
Alan Kwong9487de22016-01-16 22:06:36 -05002843 if ((src_w % dst_w) || (src_h % dst_h)) {
2844 SDEROT_DBG("non integral scale not support\n");
2845 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002846 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05002847 }
2848 entry->dnsc_factor_w = src_w / dst_w;
2849 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
2850 (entry->dnsc_factor_w > 64)) {
2851 SDEROT_DBG("non power-of-2 w_scale not support\n");
2852 ret = -EINVAL;
2853 goto dnsc_err;
2854 }
2855 entry->dnsc_factor_h = src_h / dst_h;
2856 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
2857 (entry->dnsc_factor_h > 64)) {
2858 SDEROT_DBG("non power-of-2 h_scale not support\n");
2859 ret = -EINVAL;
2860 goto dnsc_err;
2861 }
2862 }
2863
Benjamin Chan0e96afd2017-01-17 16:49:12 -05002864 fmt = sde_get_format_params(item->output.format);
Benjamin Chan886ff672016-11-07 15:23:17 -05002865 /*
2866 * Rotator downscale support max 4 times for UBWC format and
2867 * max 2 times for TP10/TP10_UBWC format
2868 */
2869 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
2870 SDEROT_DBG("max downscale for UBWC format is 4\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002871 ret = -EINVAL;
2872 goto dnsc_err;
2873 }
Benjamin Chan886ff672016-11-07 15:23:17 -05002874 if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
2875 SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002876 ret = -EINVAL;
2877 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002878 goto dnsc_err;
2879
2880dnsc_1p5_check:
2881 /* Check for 1.5 downscale that only applies to V2 HW */
2882 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
2883 entry->dnsc_factor_w = src_w / dst_w;
2884 if ((entry->dnsc_factor_w != 1) ||
2885 ((dst_w * 3) != (src_w * 2))) {
2886 SDEROT_DBG(
2887 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
2888 src_w, dst_w);
2889 ret = -EINVAL;
2890 goto dnsc_err;
2891 }
2892
2893 entry->dnsc_factor_h = src_h / dst_h;
2894 if ((entry->dnsc_factor_h != 1) ||
2895 ((dst_h * 3) != (src_h * 2))) {
2896 SDEROT_DBG(
2897 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
2898 src_h, dst_h);
2899 ret = -EINVAL;
2900 goto dnsc_err;
2901 }
2902 ret = 0;
2903 }
Alan Kwong9487de22016-01-16 22:06:36 -05002904
2905dnsc_err:
2906 /* Downscaler does not support asymmetrical dnsc */
2907 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
2908 SDEROT_DBG("asymmetric downscale not support\n");
2909 ret = -EINVAL;
2910 }
2911
2912 if (ret) {
2913 entry->dnsc_factor_w = 0;
2914 entry->dnsc_factor_h = 0;
2915 }
2916 return ret;
2917}
2918
2919/*
2920 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
2921 * @mgr: Pointer to rotator manager
2922 * @attr: Pointer to device attribute interface
2923 * @buf: Pointer to output buffer
2924 * @len: Length of output buffer
2925 */
2926static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
2927 struct device_attribute *attr, char *buf, ssize_t len)
2928{
2929 struct sde_hw_rotator *hw_data;
Benjamin Chan886ff672016-11-07 15:23:17 -05002930 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002931 int cnt = 0;
2932
2933 if (!mgr || !buf)
2934 return 0;
2935
2936 hw_data = mgr->hw_data;
2937
2938#define SPRINT(fmt, ...) \
2939 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2940
2941 /* insert capabilities here */
Benjamin Chan886ff672016-11-07 15:23:17 -05002942 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
2943 SPRINT("min_downscale=1.5\n");
2944 else
2945 SPRINT("min_downscale=2.0\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002946
Benjamin Chan42db2c92016-11-22 22:50:01 -05002947 SPRINT("downscale_compression=1\n");
2948
Alan Kwong6bc64622017-02-04 17:36:03 -08002949 if (hw_data->downscale_caps)
2950 SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
2951
Alan Kwong9487de22016-01-16 22:06:36 -05002952#undef SPRINT
2953 return cnt;
2954}
2955
2956/*
2957 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
2958 * @mgr: Pointer to rotator manager
2959 * @attr: Pointer to device attribute interface
2960 * @buf: Pointer to output buffer
2961 * @len: Length of output buffer
2962 */
2963static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
2964 struct device_attribute *attr, char *buf, ssize_t len)
2965{
2966 struct sde_hw_rotator *rot;
2967 struct sde_hw_rotator_context *ctx;
2968 int cnt = 0;
2969 int num_active = 0;
2970 int i, j;
2971
2972 if (!mgr || !buf) {
2973 SDEROT_ERR("null parameters\n");
2974 return 0;
2975 }
2976
2977 rot = mgr->hw_data;
2978
2979#define SPRINT(fmt, ...) \
2980 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2981
2982 if (rot) {
2983 SPRINT("rot_mode=%d\n", rot->mode);
2984 SPRINT("irq_num=%d\n", rot->irq_num);
2985
2986 if (rot->mode == ROT_REGDMA_OFF) {
2987 SPRINT("max_active=1\n");
2988 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
2989 } else {
2990 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2991 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
2992 j++) {
2993 ctx = rot->rotCtx[i][j];
2994
2995 if (ctx) {
2996 SPRINT(
2997 "rotCtx[%d][%d]:%p\n",
2998 i, j, ctx);
2999 ++num_active;
3000 }
3001 }
3002 }
3003
3004 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
3005 SPRINT("num_active=%d\n", num_active);
3006 }
3007 }
3008
3009#undef SPRINT
3010 return cnt;
3011}
3012
3013/*
Alan Kwongda16e442016-08-14 20:47:18 -04003014 * sde_hw_rotator_get_pixfmt - get the indexed pixel format
3015 * @mgr: Pointer to rotator manager
3016 * @index: index of pixel format
3017 * @input: true for input port; false for output port
3018 */
3019static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
3020 int index, bool input)
3021{
Alan Kwong6bc64622017-02-04 17:36:03 -08003022 struct sde_hw_rotator *rot;
3023
3024 if (!mgr || !mgr->hw_data) {
3025 SDEROT_ERR("null parameters\n");
3026 return 0;
3027 }
3028
3029 rot = mgr->hw_data;
3030
Alan Kwongda16e442016-08-14 20:47:18 -04003031 if (input) {
Alan Kwong6bc64622017-02-04 17:36:03 -08003032 if ((index < rot->num_inpixfmt) && rot->inpixfmts)
3033 return rot->inpixfmts[index];
Alan Kwongda16e442016-08-14 20:47:18 -04003034 else
3035 return 0;
3036 } else {
Alan Kwong6bc64622017-02-04 17:36:03 -08003037 if ((index < rot->num_outpixfmt) && rot->outpixfmts)
3038 return rot->outpixfmts[index];
Alan Kwongda16e442016-08-14 20:47:18 -04003039 else
3040 return 0;
3041 }
3042}
3043
3044/*
3045 * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
3046 * @mgr: Pointer to rotator manager
3047 * @pixfmt: pixel format to be verified
3048 * @input: true for input port; false for output port
3049 */
3050static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
3051 bool input)
3052{
Alan Kwong6bc64622017-02-04 17:36:03 -08003053 struct sde_hw_rotator *rot;
3054 u32 *pixfmts;
3055 u32 num_pixfmt;
Alan Kwongda16e442016-08-14 20:47:18 -04003056 int i;
3057
Alan Kwong6bc64622017-02-04 17:36:03 -08003058 if (!mgr || !mgr->hw_data) {
3059 SDEROT_ERR("null parameters\n");
3060 return false;
Alan Kwongda16e442016-08-14 20:47:18 -04003061 }
3062
Alan Kwong6bc64622017-02-04 17:36:03 -08003063 rot = mgr->hw_data;
3064
3065 if (input) {
3066 pixfmts = rot->inpixfmts;
3067 num_pixfmt = rot->num_inpixfmt;
3068 } else {
3069 pixfmts = rot->outpixfmts;
3070 num_pixfmt = rot->num_outpixfmt;
3071 }
3072
3073 if (!pixfmts || !num_pixfmt) {
3074 SDEROT_ERR("invalid pixel format tables\n");
3075 return false;
3076 }
3077
3078 for (i = 0; i < num_pixfmt; i++)
3079 if (pixfmts[i] == pixfmt)
3080 return true;
3081
Alan Kwongda16e442016-08-14 20:47:18 -04003082 return false;
3083}
3084
3085/*
Alan Kwong6bc64622017-02-04 17:36:03 -08003086 * sde_hw_rotator_get_downscale_caps - get scaling capability string
3087 * @mgr: Pointer to rotator manager
3088 * @caps: Pointer to capability string buffer; NULL to return maximum length
3089 * @len: length of capability string buffer
3090 * return: length of capability string
3091 */
3092static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
3093 char *caps, int len)
3094{
3095 struct sde_hw_rotator *rot;
3096 int rc = 0;
3097
3098 if (!mgr || !mgr->hw_data) {
3099 SDEROT_ERR("null parameters\n");
3100 return -EINVAL;
3101 }
3102
3103 rot = mgr->hw_data;
3104
3105 if (rot->downscale_caps) {
3106 if (caps)
3107 rc = snprintf(caps, len, "%s", rot->downscale_caps);
3108 else
3109 rc = strlen(rot->downscale_caps);
3110 }
3111
3112 return rc;
3113}
3114
3115/*
Alan Kwongb6c049c2017-03-31 12:50:27 -07003116 * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
3117 * @mgr: Pointer to rotator manager
3118 * return: maximum line width supported by hardware
3119 */
3120static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
3121{
3122 struct sde_hw_rotator *rot;
3123
3124 if (!mgr || !mgr->hw_data) {
3125 SDEROT_ERR("null parameters\n");
3126 return -EINVAL;
3127 }
3128
3129 rot = mgr->hw_data;
3130
3131 return rot->maxlinewidth;
3132}
3133
3134/*
Alan Kwong9487de22016-01-16 22:06:36 -05003135 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
3136 * @hw_data: Pointer to rotator hw
3137 * @dev: Pointer to platform device
3138 */
3139static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
3140 struct platform_device *dev)
3141{
3142 int ret = 0;
3143 u32 data;
3144
3145 if (!hw_data || !dev)
3146 return -EINVAL;
3147
3148 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
3149 &data);
3150 if (ret) {
3151 SDEROT_DBG("default to regdma off\n");
3152 ret = 0;
3153 hw_data->mode = ROT_REGDMA_OFF;
3154 } else if (data < ROT_REGDMA_MAX) {
3155 SDEROT_DBG("set to regdma mode %d\n", data);
3156 hw_data->mode = data;
3157 } else {
3158 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
3159 hw_data->mode = ROT_REGDMA_OFF;
3160 }
3161
3162 ret = of_property_read_u32(dev->dev.of_node,
3163 "qcom,mdss-highest-bank-bit", &data);
3164 if (ret) {
3165 SDEROT_DBG("default to A5X bank\n");
3166 ret = 0;
3167 hw_data->highest_bank = 2;
3168 } else {
3169 SDEROT_DBG("set highest bank bit to %d\n", data);
3170 hw_data->highest_bank = data;
3171 }
3172
Alan Kwong6bc64622017-02-04 17:36:03 -08003173 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwongfb8eeb22017-02-06 15:00:03 -08003174 "qcom,sde-ubwc-malsize", &data);
3175 if (ret) {
3176 ret = 0;
3177 hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
3178 } else {
3179 SDEROT_DBG("set ubwc malsize to %d\n", data);
3180 hw_data->ubwc_malsize = data;
3181 }
3182
3183 ret = of_property_read_u32(dev->dev.of_node,
3184 "qcom,sde-ubwc_swizzle", &data);
3185 if (ret) {
3186 ret = 0;
3187 hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
3188 } else {
3189 SDEROT_DBG("set ubwc swizzle to %d\n", data);
3190 hw_data->ubwc_swizzle = data;
3191 }
3192
3193 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwong6bc64622017-02-04 17:36:03 -08003194 "qcom,mdss-sbuf-headroom", &data);
3195 if (ret) {
3196 ret = 0;
3197 hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
3198 } else {
3199 SDEROT_DBG("set sbuf headroom to %d\n", data);
3200 hw_data->sbuf_headroom = data;
3201 }
3202
Alan Kwongb6c049c2017-03-31 12:50:27 -07003203 ret = of_property_read_u32(dev->dev.of_node,
3204 "qcom,mdss-rot-linewidth", &data);
3205 if (ret) {
3206 ret = 0;
3207 hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
3208 } else {
3209 SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
3210 hw_data->maxlinewidth = data;
3211 }
3212
Alan Kwong9487de22016-01-16 22:06:36 -05003213 return ret;
3214}
3215
3216/*
3217 * sde_rotator_r3_init - initialize the r3 module
3218 * @mgr: Pointer to rotator manager
3219 *
3220 * This function setup r3 callback functions, parses r3 specific
3221 * device tree settings, installs r3 specific interrupt handler,
3222 * as well as initializes r3 internal data structure.
3223 */
3224int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
3225{
3226 struct sde_hw_rotator *rot;
3227 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
3228 int i;
3229 int ret;
3230
3231 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
3232 if (!rot)
3233 return -ENOMEM;
3234
3235 mgr->hw_data = rot;
3236 mgr->queue_count = ROT_QUEUE_MAX;
3237
3238 rot->mdss_base = mdata->sde_io.base;
3239 rot->pdev = mgr->pdev;
Alan Kwong6bc64622017-02-04 17:36:03 -08003240 rot->koff_timeout = KOFF_TIMEOUT;
3241 rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
3242 rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
Alan Kwong9487de22016-01-16 22:06:36 -05003243
3244 /* Assign ops */
3245 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
3246 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
3247 mgr->ops_hw_free = sde_hw_rotator_free_ext;
3248 mgr->ops_config_hw = sde_hw_rotator_config;
3249 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
3250 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
3251 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
3252 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
3253 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
3254 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
Alan Kwongda16e442016-08-14 20:47:18 -04003255 mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
3256 mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04003257 mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
3258 mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
Alan Kwong6bc64622017-02-04 17:36:03 -08003259 mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
Alan Kwongb6c049c2017-03-31 12:50:27 -07003260 mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
Alan Kwong9487de22016-01-16 22:06:36 -05003261
3262 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
3263 if (ret)
3264 goto error_parse_dt;
3265
3266 rot->irq_num = platform_get_irq(mgr->pdev, 0);
3267 if (rot->irq_num < 0) {
3268 SDEROT_ERR("fail to get rotator irq\n");
3269 } else {
3270 if (rot->mode == ROT_REGDMA_OFF)
3271 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3272 rot->irq_num,
3273 sde_hw_rotator_rotirq_handler,
3274 NULL, 0, "sde_rotator_r3", rot);
3275 else
3276 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3277 rot->irq_num,
3278 sde_hw_rotator_regdmairq_handler,
3279 NULL, 0, "sde_rotator_r3", rot);
3280 if (ret) {
3281 SDEROT_ERR("fail to request irq r:%d\n", ret);
3282 rot->irq_num = -1;
3283 } else {
3284 disable_irq(rot->irq_num);
3285 }
3286 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04003287 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05003288
3289 setup_rotator_ops(&rot->ops, rot->mode);
3290
3291 spin_lock_init(&rot->rotctx_lock);
3292 spin_lock_init(&rot->rotisr_lock);
3293
3294 /* REGDMA initialization */
3295 if (rot->mode == ROT_REGDMA_OFF) {
3296 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3297 rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
3298 SDE_HW_ROT_REGDMA_SEG_SIZE * i];
3299 } else {
3300 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3301 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
3302 (u32 *)(rot->mdss_base +
3303 REGDMA_RAM_REGDMA_CMD_RAM +
3304 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
3305
3306 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3307 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
3308 (u32 *)(rot->mdss_base +
3309 REGDMA_RAM_REGDMA_CMD_RAM +
3310 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
3311 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
3312 }
3313
Alan Kwong6bc64622017-02-04 17:36:03 -08003314 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3315 atomic_set(&rot->timestamp[i], 0);
3316 INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
3317 }
Alan Kwong9487de22016-01-16 22:06:36 -05003318
3319 ret = sde_rotator_hw_rev_init(rot);
3320 if (ret)
3321 goto error_hw_rev_init;
3322
Alan Kwong315cd772016-08-03 22:29:42 -04003323 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
Clarence Ip77c053d2017-04-24 19:26:37 -07003324 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003325 CLKFLAG_NORETAIN_MEM);
Clarence Ip77c053d2017-04-24 19:26:37 -07003326 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003327 CLKFLAG_NORETAIN_PERIPH);
3328
Benjamin Chan53e3bce2016-08-31 14:43:29 -04003329 mdata->sde_rot_hw = rot;
Alan Kwong9487de22016-01-16 22:06:36 -05003330 return 0;
3331error_hw_rev_init:
3332 if (rot->irq_num >= 0)
3333 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
3334 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
3335error_parse_dt:
3336 return ret;
3337}