blob: 17fa2cc25c5b45ef518d60b4bdd5635779aeef1f [file] [log] [blame]
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Alan Kwong9487de22016-01-16 22:06:36 -05002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
Alan Kwong9487de22016-01-16 22:06:36 -050020#include <linux/delay.h>
21#include <linux/debugfs.h>
22#include <linux/interrupt.h>
23#include <linux/dma-mapping.h>
24#include <linux/dma-buf.h>
25#include <linux/msm_ion.h>
Alan Kwong6ce448d2016-11-24 18:45:20 -080026#include <linux/clk.h>
27#include <linux/clk/qcom.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
Benjamin Chan53e3bce2016-08-31 14:43:29 -040037#include "sde_rotator_debug.h"
Alan Kwong9487de22016-01-16 22:06:36 -050038
Benjamin Chan99eb63b2016-12-21 15:45:26 -050039#define RES_UHD (3840*2160)
40
41/* traffic shaping clock ticks = finish_time x 19.2MHz */
42#define TRAFFIC_SHAPE_CLKTICK_14MS 268800
43#define TRAFFIC_SHAPE_CLKTICK_12MS 230400
Alan Kwong498d59f2017-02-11 18:56:34 -080044#define TRAFFIC_SHAPE_VSYNC_CLK 19200000
Benjamin Chan99eb63b2016-12-21 15:45:26 -050045
Alan Kwong9487de22016-01-16 22:06:36 -050046/* XIN mapping */
47#define XIN_SSPP 0
48#define XIN_WRITEBACK 1
49
50/* wait for at most 2 vsync for lowest refresh rate (24hz) */
Alan Kwong9a11c452017-05-01 15:11:31 -070051#define KOFF_TIMEOUT (42 * 32)
Alan Kwong6bc64622017-02-04 17:36:03 -080052
53/* default stream buffer headroom in lines */
54#define DEFAULT_SBUF_HEADROOM 20
Clarence Ip37e013c2017-05-04 12:23:13 -070055#define DEFAULT_UBWC_MALSIZE 0
56#define DEFAULT_UBWC_SWIZZLE 0
Alan Kwong9487de22016-01-16 22:06:36 -050057
Alan Kwongb6c049c2017-03-31 12:50:27 -070058#define DEFAULT_MAXLINEWIDTH 4096
59
Alan Kwong9487de22016-01-16 22:06:36 -050060/* Macro for constructing the REGDMA command */
61#define SDE_REGDMA_WRITE(p, off, data) \
62 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080063 SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
64 (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050065 *p++ = REGDMA_OP_REGWRITE | \
66 ((off) & REGDMA_ADDR_OFFSET_MASK); \
67 *p++ = (data); \
68 } while (0)
69
70#define SDE_REGDMA_MODIFY(p, off, mask, data) \
71 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080072 SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
73 (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050074 *p++ = REGDMA_OP_REGMODIFY | \
75 ((off) & REGDMA_ADDR_OFFSET_MASK); \
76 *p++ = (mask); \
77 *p++ = (data); \
78 } while (0)
79
80#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
81 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080082 SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
83 (u32)(len));\
Alan Kwong9487de22016-01-16 22:06:36 -050084 *p++ = REGDMA_OP_BLKWRITE_INC | \
85 ((off) & REGDMA_ADDR_OFFSET_MASK); \
86 *p++ = (len); \
87 } while (0)
88
89#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
90 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080091 SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050092 *(p) = (data); \
93 (p)++; \
94 } while (0)
95
96/* Macro for directly accessing mapped registers */
97#define SDE_ROTREG_WRITE(base, off, data) \
Alan Kwong6bc64622017-02-04 17:36:03 -080098 do { \
99 SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
100 , (u32)(data));\
101 writel_relaxed(data, (base + (off))); \
102 } while (0)
Alan Kwong9487de22016-01-16 22:06:36 -0500103
104#define SDE_ROTREG_READ(base, off) \
105 readl_relaxed(base + (off))
106
Alan Kwong6bc64622017-02-04 17:36:03 -0800107static u32 sde_hw_rotator_v3_inpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400108 SDE_PIX_FMT_XRGB_8888,
109 SDE_PIX_FMT_ARGB_8888,
110 SDE_PIX_FMT_ABGR_8888,
111 SDE_PIX_FMT_RGBA_8888,
112 SDE_PIX_FMT_BGRA_8888,
113 SDE_PIX_FMT_RGBX_8888,
114 SDE_PIX_FMT_BGRX_8888,
115 SDE_PIX_FMT_XBGR_8888,
116 SDE_PIX_FMT_RGBA_5551,
117 SDE_PIX_FMT_ARGB_1555,
118 SDE_PIX_FMT_ABGR_1555,
119 SDE_PIX_FMT_BGRA_5551,
120 SDE_PIX_FMT_BGRX_5551,
121 SDE_PIX_FMT_RGBX_5551,
122 SDE_PIX_FMT_XBGR_1555,
123 SDE_PIX_FMT_XRGB_1555,
124 SDE_PIX_FMT_ARGB_4444,
125 SDE_PIX_FMT_RGBA_4444,
126 SDE_PIX_FMT_BGRA_4444,
127 SDE_PIX_FMT_ABGR_4444,
128 SDE_PIX_FMT_RGBX_4444,
129 SDE_PIX_FMT_XRGB_4444,
130 SDE_PIX_FMT_BGRX_4444,
131 SDE_PIX_FMT_XBGR_4444,
132 SDE_PIX_FMT_RGB_888,
133 SDE_PIX_FMT_BGR_888,
134 SDE_PIX_FMT_RGB_565,
135 SDE_PIX_FMT_BGR_565,
136 SDE_PIX_FMT_Y_CB_CR_H2V2,
137 SDE_PIX_FMT_Y_CR_CB_H2V2,
138 SDE_PIX_FMT_Y_CR_CB_GH2V2,
139 SDE_PIX_FMT_Y_CBCR_H2V2,
140 SDE_PIX_FMT_Y_CRCB_H2V2,
141 SDE_PIX_FMT_Y_CBCR_H1V2,
142 SDE_PIX_FMT_Y_CRCB_H1V2,
143 SDE_PIX_FMT_Y_CBCR_H2V1,
144 SDE_PIX_FMT_Y_CRCB_H2V1,
145 SDE_PIX_FMT_YCBYCR_H2V1,
146 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
147 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
148 SDE_PIX_FMT_RGBA_8888_UBWC,
149 SDE_PIX_FMT_RGBX_8888_UBWC,
150 SDE_PIX_FMT_RGB_565_UBWC,
151 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
152 SDE_PIX_FMT_RGBA_1010102,
153 SDE_PIX_FMT_RGBX_1010102,
154 SDE_PIX_FMT_ARGB_2101010,
155 SDE_PIX_FMT_XRGB_2101010,
156 SDE_PIX_FMT_BGRA_1010102,
157 SDE_PIX_FMT_BGRX_1010102,
158 SDE_PIX_FMT_ABGR_2101010,
159 SDE_PIX_FMT_XBGR_2101010,
160 SDE_PIX_FMT_RGBA_1010102_UBWC,
161 SDE_PIX_FMT_RGBX_1010102_UBWC,
162 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
163 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
164 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
165};
166
Alan Kwong6bc64622017-02-04 17:36:03 -0800167static u32 sde_hw_rotator_v3_outpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400168 SDE_PIX_FMT_XRGB_8888,
169 SDE_PIX_FMT_ARGB_8888,
170 SDE_PIX_FMT_ABGR_8888,
171 SDE_PIX_FMT_RGBA_8888,
172 SDE_PIX_FMT_BGRA_8888,
173 SDE_PIX_FMT_RGBX_8888,
174 SDE_PIX_FMT_BGRX_8888,
175 SDE_PIX_FMT_XBGR_8888,
176 SDE_PIX_FMT_RGBA_5551,
177 SDE_PIX_FMT_ARGB_1555,
178 SDE_PIX_FMT_ABGR_1555,
179 SDE_PIX_FMT_BGRA_5551,
180 SDE_PIX_FMT_BGRX_5551,
181 SDE_PIX_FMT_RGBX_5551,
182 SDE_PIX_FMT_XBGR_1555,
183 SDE_PIX_FMT_XRGB_1555,
184 SDE_PIX_FMT_ARGB_4444,
185 SDE_PIX_FMT_RGBA_4444,
186 SDE_PIX_FMT_BGRA_4444,
187 SDE_PIX_FMT_ABGR_4444,
188 SDE_PIX_FMT_RGBX_4444,
189 SDE_PIX_FMT_XRGB_4444,
190 SDE_PIX_FMT_BGRX_4444,
191 SDE_PIX_FMT_XBGR_4444,
192 SDE_PIX_FMT_RGB_888,
193 SDE_PIX_FMT_BGR_888,
194 SDE_PIX_FMT_RGB_565,
195 SDE_PIX_FMT_BGR_565,
196 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
197 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
198 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
199 SDE_PIX_FMT_Y_CBCR_H2V2,
200 SDE_PIX_FMT_Y_CRCB_H2V2,
201 SDE_PIX_FMT_Y_CBCR_H1V2,
202 SDE_PIX_FMT_Y_CRCB_H1V2,
203 SDE_PIX_FMT_Y_CBCR_H2V1,
204 SDE_PIX_FMT_Y_CRCB_H2V1,
205 /* SDE_PIX_FMT_YCBYCR_H2V1 */
206 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
207 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
208 SDE_PIX_FMT_RGBA_8888_UBWC,
209 SDE_PIX_FMT_RGBX_8888_UBWC,
210 SDE_PIX_FMT_RGB_565_UBWC,
211 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
212 SDE_PIX_FMT_RGBA_1010102,
213 SDE_PIX_FMT_RGBX_1010102,
214 /* SDE_PIX_FMT_ARGB_2101010 */
215 /* SDE_PIX_FMT_XRGB_2101010 */
216 SDE_PIX_FMT_BGRA_1010102,
217 SDE_PIX_FMT_BGRX_1010102,
218 /* SDE_PIX_FMT_ABGR_2101010 */
219 /* SDE_PIX_FMT_XBGR_2101010 */
220 SDE_PIX_FMT_RGBA_1010102_UBWC,
221 SDE_PIX_FMT_RGBX_1010102_UBWC,
222 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
223 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
224 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
225};
226
Alan Kwong6bc64622017-02-04 17:36:03 -0800227static u32 sde_hw_rotator_v4_inpixfmts[] = {
228 SDE_PIX_FMT_XRGB_8888,
229 SDE_PIX_FMT_ARGB_8888,
230 SDE_PIX_FMT_ABGR_8888,
231 SDE_PIX_FMT_RGBA_8888,
232 SDE_PIX_FMT_BGRA_8888,
233 SDE_PIX_FMT_RGBX_8888,
234 SDE_PIX_FMT_BGRX_8888,
235 SDE_PIX_FMT_XBGR_8888,
236 SDE_PIX_FMT_RGBA_5551,
237 SDE_PIX_FMT_ARGB_1555,
238 SDE_PIX_FMT_ABGR_1555,
239 SDE_PIX_FMT_BGRA_5551,
240 SDE_PIX_FMT_BGRX_5551,
241 SDE_PIX_FMT_RGBX_5551,
242 SDE_PIX_FMT_XBGR_1555,
243 SDE_PIX_FMT_XRGB_1555,
244 SDE_PIX_FMT_ARGB_4444,
245 SDE_PIX_FMT_RGBA_4444,
246 SDE_PIX_FMT_BGRA_4444,
247 SDE_PIX_FMT_ABGR_4444,
248 SDE_PIX_FMT_RGBX_4444,
249 SDE_PIX_FMT_XRGB_4444,
250 SDE_PIX_FMT_BGRX_4444,
251 SDE_PIX_FMT_XBGR_4444,
252 SDE_PIX_FMT_RGB_888,
253 SDE_PIX_FMT_BGR_888,
254 SDE_PIX_FMT_RGB_565,
255 SDE_PIX_FMT_BGR_565,
256 SDE_PIX_FMT_Y_CB_CR_H2V2,
257 SDE_PIX_FMT_Y_CR_CB_H2V2,
258 SDE_PIX_FMT_Y_CR_CB_GH2V2,
259 SDE_PIX_FMT_Y_CBCR_H2V2,
260 SDE_PIX_FMT_Y_CRCB_H2V2,
261 SDE_PIX_FMT_Y_CBCR_H1V2,
262 SDE_PIX_FMT_Y_CRCB_H1V2,
263 SDE_PIX_FMT_Y_CBCR_H2V1,
264 SDE_PIX_FMT_Y_CRCB_H2V1,
265 SDE_PIX_FMT_YCBYCR_H2V1,
266 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
267 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
268 SDE_PIX_FMT_RGBA_8888_UBWC,
269 SDE_PIX_FMT_RGBX_8888_UBWC,
270 SDE_PIX_FMT_RGB_565_UBWC,
271 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
272 SDE_PIX_FMT_RGBA_1010102,
273 SDE_PIX_FMT_RGBX_1010102,
274 SDE_PIX_FMT_ARGB_2101010,
275 SDE_PIX_FMT_XRGB_2101010,
276 SDE_PIX_FMT_BGRA_1010102,
277 SDE_PIX_FMT_BGRX_1010102,
278 SDE_PIX_FMT_ABGR_2101010,
279 SDE_PIX_FMT_XBGR_2101010,
280 SDE_PIX_FMT_RGBA_1010102_UBWC,
281 SDE_PIX_FMT_RGBX_1010102_UBWC,
282 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
283 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
284 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800285 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
286 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800287 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
288 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
289 SDE_PIX_FMT_XRGB_8888_TILE,
290 SDE_PIX_FMT_ARGB_8888_TILE,
291 SDE_PIX_FMT_ABGR_8888_TILE,
292 SDE_PIX_FMT_XBGR_8888_TILE,
293 SDE_PIX_FMT_RGBA_8888_TILE,
294 SDE_PIX_FMT_BGRA_8888_TILE,
295 SDE_PIX_FMT_RGBX_8888_TILE,
296 SDE_PIX_FMT_BGRX_8888_TILE,
297 SDE_PIX_FMT_RGBA_1010102_TILE,
298 SDE_PIX_FMT_RGBX_1010102_TILE,
299 SDE_PIX_FMT_ARGB_2101010_TILE,
300 SDE_PIX_FMT_XRGB_2101010_TILE,
301 SDE_PIX_FMT_BGRA_1010102_TILE,
302 SDE_PIX_FMT_BGRX_1010102_TILE,
303 SDE_PIX_FMT_ABGR_2101010_TILE,
304 SDE_PIX_FMT_XBGR_2101010_TILE,
305};
306
307static u32 sde_hw_rotator_v4_outpixfmts[] = {
308 SDE_PIX_FMT_XRGB_8888,
309 SDE_PIX_FMT_ARGB_8888,
310 SDE_PIX_FMT_ABGR_8888,
311 SDE_PIX_FMT_RGBA_8888,
312 SDE_PIX_FMT_BGRA_8888,
313 SDE_PIX_FMT_RGBX_8888,
314 SDE_PIX_FMT_BGRX_8888,
315 SDE_PIX_FMT_XBGR_8888,
316 SDE_PIX_FMT_RGBA_5551,
317 SDE_PIX_FMT_ARGB_1555,
318 SDE_PIX_FMT_ABGR_1555,
319 SDE_PIX_FMT_BGRA_5551,
320 SDE_PIX_FMT_BGRX_5551,
321 SDE_PIX_FMT_RGBX_5551,
322 SDE_PIX_FMT_XBGR_1555,
323 SDE_PIX_FMT_XRGB_1555,
324 SDE_PIX_FMT_ARGB_4444,
325 SDE_PIX_FMT_RGBA_4444,
326 SDE_PIX_FMT_BGRA_4444,
327 SDE_PIX_FMT_ABGR_4444,
328 SDE_PIX_FMT_RGBX_4444,
329 SDE_PIX_FMT_XRGB_4444,
330 SDE_PIX_FMT_BGRX_4444,
331 SDE_PIX_FMT_XBGR_4444,
332 SDE_PIX_FMT_RGB_888,
333 SDE_PIX_FMT_BGR_888,
334 SDE_PIX_FMT_RGB_565,
335 SDE_PIX_FMT_BGR_565,
336 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
337 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
338 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
339 SDE_PIX_FMT_Y_CBCR_H2V2,
340 SDE_PIX_FMT_Y_CRCB_H2V2,
341 SDE_PIX_FMT_Y_CBCR_H1V2,
342 SDE_PIX_FMT_Y_CRCB_H1V2,
343 SDE_PIX_FMT_Y_CBCR_H2V1,
344 SDE_PIX_FMT_Y_CRCB_H2V1,
345 /* SDE_PIX_FMT_YCBYCR_H2V1 */
346 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
347 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
348 SDE_PIX_FMT_RGBA_8888_UBWC,
349 SDE_PIX_FMT_RGBX_8888_UBWC,
350 SDE_PIX_FMT_RGB_565_UBWC,
351 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
352 SDE_PIX_FMT_RGBA_1010102,
353 SDE_PIX_FMT_RGBX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400354 SDE_PIX_FMT_ARGB_2101010,
355 SDE_PIX_FMT_XRGB_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800356 SDE_PIX_FMT_BGRA_1010102,
357 SDE_PIX_FMT_BGRX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400358 SDE_PIX_FMT_ABGR_2101010,
359 SDE_PIX_FMT_XBGR_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800360 SDE_PIX_FMT_RGBA_1010102_UBWC,
361 SDE_PIX_FMT_RGBX_1010102_UBWC,
362 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
363 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
364 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800365 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
366 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800367 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
368 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
369 SDE_PIX_FMT_XRGB_8888_TILE,
370 SDE_PIX_FMT_ARGB_8888_TILE,
371 SDE_PIX_FMT_ABGR_8888_TILE,
372 SDE_PIX_FMT_XBGR_8888_TILE,
373 SDE_PIX_FMT_RGBA_8888_TILE,
374 SDE_PIX_FMT_BGRA_8888_TILE,
375 SDE_PIX_FMT_RGBX_8888_TILE,
376 SDE_PIX_FMT_BGRX_8888_TILE,
377 SDE_PIX_FMT_RGBA_1010102_TILE,
378 SDE_PIX_FMT_RGBX_1010102_TILE,
379 SDE_PIX_FMT_ARGB_2101010_TILE,
380 SDE_PIX_FMT_XRGB_2101010_TILE,
381 SDE_PIX_FMT_BGRA_1010102_TILE,
382 SDE_PIX_FMT_BGRX_1010102_TILE,
383 SDE_PIX_FMT_ABGR_2101010_TILE,
384 SDE_PIX_FMT_XBGR_2101010_TILE,
385};
386
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400387static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400388 {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400389 {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
390 {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
391};
392
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400393static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
394 /*
395 * rottop - 0xA8850
396 */
397 /* REGDMA */
398 { 0XA8850, 0, 0 },
399 { 0XA8850, 0, 1 },
400 { 0XA8850, 0, 2 },
401 { 0XA8850, 0, 3 },
402 { 0XA8850, 0, 4 },
403
404 /* ROT_WB */
405 { 0XA8850, 1, 0 },
406 { 0XA8850, 1, 1 },
407 { 0XA8850, 1, 2 },
408 { 0XA8850, 1, 3 },
409 { 0XA8850, 1, 4 },
410 { 0XA8850, 1, 5 },
411 { 0XA8850, 1, 6 },
412 { 0XA8850, 1, 7 },
413
414 /* UBWC_DEC */
415 { 0XA8850, 2, 0 },
416
417 /* UBWC_ENC */
418 { 0XA8850, 3, 0 },
419
420 /* ROT_FETCH_0 */
421 { 0XA8850, 4, 0 },
422 { 0XA8850, 4, 1 },
423 { 0XA8850, 4, 2 },
424 { 0XA8850, 4, 3 },
425 { 0XA8850, 4, 4 },
426 { 0XA8850, 4, 5 },
427 { 0XA8850, 4, 6 },
428 { 0XA8850, 4, 7 },
429
430 /* ROT_FETCH_1 */
431 { 0XA8850, 5, 0 },
432 { 0XA8850, 5, 1 },
433 { 0XA8850, 5, 2 },
434 { 0XA8850, 5, 3 },
435 { 0XA8850, 5, 4 },
436 { 0XA8850, 5, 5 },
437 { 0XA8850, 5, 6 },
438 { 0XA8850, 5, 7 },
439
440 /* ROT_FETCH_2 */
441 { 0XA8850, 6, 0 },
442 { 0XA8850, 6, 1 },
443 { 0XA8850, 6, 2 },
444 { 0XA8850, 6, 3 },
445 { 0XA8850, 6, 4 },
446 { 0XA8850, 6, 5 },
447 { 0XA8850, 6, 6 },
448 { 0XA8850, 6, 7 },
449
450 /* ROT_FETCH_3 */
451 { 0XA8850, 7, 0 },
452 { 0XA8850, 7, 1 },
453 { 0XA8850, 7, 2 },
454 { 0XA8850, 7, 3 },
455 { 0XA8850, 7, 4 },
456 { 0XA8850, 7, 5 },
457 { 0XA8850, 7, 6 },
458 { 0XA8850, 7, 7 },
459
460 /* ROT_FETCH_4 */
461 { 0XA8850, 8, 0 },
462 { 0XA8850, 8, 1 },
463 { 0XA8850, 8, 2 },
464 { 0XA8850, 8, 3 },
465 { 0XA8850, 8, 4 },
466 { 0XA8850, 8, 5 },
467 { 0XA8850, 8, 6 },
468 { 0XA8850, 8, 7 },
469
470 /* ROT_UNPACK_0*/
471 { 0XA8850, 9, 0 },
472 { 0XA8850, 9, 1 },
473 { 0XA8850, 9, 2 },
474 { 0XA8850, 9, 3 },
475};
476
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400477static struct sde_rot_regdump sde_rot_r3_regdump[] = {
478 { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
479 { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
480 { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
481 { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
482 SDE_ROT_REGDUMP_READ },
483 /*
484 * Need to perform a SW reset to REGDMA in order to access the
485 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
486 * REGDMA RAM should be dump at last.
487 */
488 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
489 SDE_ROT_REGDUMP_WRITE },
490 { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
491 SDE_ROT_REGDUMP_READ },
Benjamin Chan59a06052017-01-12 18:06:03 -0500492 { "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
493 SDE_ROT_REGDUMP_VBIF },
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400494};
495
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -0700496struct sde_rot_cdp_params {
497 bool enable;
498 struct sde_mdp_format_params *fmt;
499 u32 offset;
500};
501
Alan Kwong818b7fc2016-07-24 22:07:41 -0400502/* Invalid software timestamp value for initialization */
503#define SDE_REGDMA_SWTS_INVALID (~0)
504
505/**
506 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
507 * @ts_curr: current software timestamp
508 * @ts_prev: previous software timestamp
509 * @return: the amount ts_curr is ahead of ts_prev
510 */
511static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
512{
513 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
514
515 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
516}
517
518/**
519 * sde_hw_rotator_pending_swts - Check if the given context is still pending
520 * @rot: Pointer to hw rotator
521 * @ctx: Pointer to rotator context
522 * @pswts: Pointer to returned reference software timestamp, optional
523 * @return: true if context has pending requests
524 */
525static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
526 struct sde_hw_rotator_context *ctx, u32 *pswts)
527{
528 u32 swts;
529 int ts_diff;
530 bool pending;
531
532 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
533 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
534 else
535 swts = ctx->last_regdma_timestamp;
536
537 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
538 swts >>= SDE_REGDMA_SWTS_SHIFT;
539
540 swts &= SDE_REGDMA_SWTS_MASK;
541
542 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
543
544 if (pswts)
545 *pswts = swts;
546
547 pending = (ts_diff > 0) ? true : false;
548
549 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
550 ctx->timestamp, ctx->q_id, swts, pending);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -0400551 SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400552 return pending;
553}
554
555/**
Alan Kwong6bc64622017-02-04 17:36:03 -0800556 * sde_hw_rotator_update_swts - update software timestamp with given value
557 * @rot: Pointer to hw rotator
558 * @ctx: Pointer to rotator contxt
559 * @swts: new software timestamp
560 * @return: new combined swts
561 */
562static u32 sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
563 struct sde_hw_rotator_context *ctx, u32 swts)
564{
565 u32 mask = SDE_REGDMA_SWTS_MASK;
566
567 swts &= SDE_REGDMA_SWTS_MASK;
568 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY) {
569 swts <<= SDE_REGDMA_SWTS_SHIFT;
570 mask <<= SDE_REGDMA_SWTS_SHIFT;
571 }
572
573 swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
574 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
575
576 return swts;
577}
578
579/**
Alan Kwong818b7fc2016-07-24 22:07:41 -0400580 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
581 * Also, clear rotator/regdma irq status.
582 * @rot: Pointer to hw rotator
583 */
584static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
585{
586 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
587 atomic_read(&rot->irq_enabled));
588
589 if (!atomic_read(&rot->irq_enabled)) {
590 if (rot->mode == ROT_REGDMA_OFF)
591 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
592 ROT_DONE_MASK);
593 else
594 SDE_ROTREG_WRITE(rot->mdss_base,
595 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
596
597 enable_irq(rot->irq_num);
598 }
599 atomic_inc(&rot->irq_enabled);
600}
601
602/**
603 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
604 * Also, clear rotator/regdma irq enable masks.
605 * @rot: Pointer to hw rotator
606 */
607static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
608{
609 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
610 atomic_read(&rot->irq_enabled));
611
612 if (!atomic_read(&rot->irq_enabled)) {
613 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
614 return;
615 }
616
617 if (!atomic_dec_return(&rot->irq_enabled)) {
618 if (rot->mode == ROT_REGDMA_OFF)
619 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
620 else
621 SDE_ROTREG_WRITE(rot->mdss_base,
622 REGDMA_CSR_REGDMA_INT_EN, 0);
623 /* disable irq after last pending irq is handled, if any */
624 synchronize_irq(rot->irq_num);
625 disable_irq_nosync(rot->irq_num);
626 }
627}
628
629/**
630 * sde_hw_rotator_dump_status - Dump hw rotator status on error
631 * @rot: Pointer to hw rotator
632 */
633static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
634{
Benjamin Chan1b94f952017-01-23 17:42:30 -0500635 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
636
Alan Kwong818b7fc2016-07-24 22:07:41 -0400637 SDEROT_ERR(
638 "op_mode = %x, int_en = %x, int_status = %x\n",
639 SDE_ROTREG_READ(rot->mdss_base,
640 REGDMA_CSR_REGDMA_OP_MODE),
641 SDE_ROTREG_READ(rot->mdss_base,
642 REGDMA_CSR_REGDMA_INT_EN),
643 SDE_ROTREG_READ(rot->mdss_base,
644 REGDMA_CSR_REGDMA_INT_STATUS));
645
646 SDEROT_ERR(
647 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
648 SDE_ROTREG_READ(rot->mdss_base,
649 REGDMA_TIMESTAMP_REG),
650 SDE_ROTREG_READ(rot->mdss_base,
651 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
652 SDE_ROTREG_READ(rot->mdss_base,
653 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
654 SDE_ROTREG_READ(rot->mdss_base,
655 REGDMA_CSR_REGDMA_BLOCK_STATUS));
656
657 SDEROT_ERR(
658 "invalid_cmd_offset = %x, fsm_state = %x\n",
659 SDE_ROTREG_READ(rot->mdss_base,
660 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
661 SDE_ROTREG_READ(rot->mdss_base,
662 REGDMA_CSR_REGDMA_FSM_STATE));
Benjamin Chan59a06052017-01-12 18:06:03 -0500663
664 SDEROT_ERR(
665 "UBWC decode status = %x, UBWC encode status = %x\n",
666 SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS),
667 SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
Benjamin Chan1b94f952017-01-23 17:42:30 -0500668
669 SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
670 SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
671 SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
Alan Kwong6bc64622017-02-04 17:36:03 -0800672
673 SDEROT_ERR(
674 "sbuf_status_plane0 = %x, sbuf_status_plane1 = %x\n",
675 SDE_ROTREG_READ(rot->mdss_base,
676 ROT_WB_SBUF_STATUS_PLANE0),
677 SDE_ROTREG_READ(rot->mdss_base,
678 ROT_WB_SBUF_STATUS_PLANE1));
Alan Kwong818b7fc2016-07-24 22:07:41 -0400679}
680
Alan Kwong9487de22016-01-16 22:06:36 -0500681/**
682 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
683 * on provided session_id. Each rotator has a different session_id.
684 */
685static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
686 struct sde_hw_rotator *rot, u32 session_id,
687 enum sde_rot_queue_prio q_id)
688{
689 int i;
690 struct sde_hw_rotator_context *ctx = NULL;
691
692 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
693 ctx = rot->rotCtx[q_id][i];
694
695 if (ctx && (ctx->session_id == session_id)) {
696 SDEROT_DBG(
697 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d\n",
698 q_id, i, ctx, ctx->session_id);
699 return ctx;
700 }
701 }
702
703 return NULL;
704}
705
706/*
707 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
708 * @dbgbuf: Pointer to debug buffer
709 * @buf: Pointer to layer buffer structure
710 * @data: Pointer to h/w mapped buffer structure
711 */
712static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
713 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
714{
715 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
716 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
717
718 dbgbuf->vaddr = NULL;
719 dbgbuf->width = buf->width;
720 dbgbuf->height = buf->height;
721
722 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
Alan Kwong6ce448d2016-11-24 18:45:20 -0800723 dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500724 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
725 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
726 dbgbuf->vaddr, dbgbuf->buflen,
727 dbgbuf->width, dbgbuf->height);
728 }
729}
730
731/*
732 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
733 * @dbgbuf: Pointer to debug buffer
734 */
735static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
736{
737 if (dbgbuf->vaddr) {
738 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
Alan Kwong6ce448d2016-11-24 18:45:20 -0800739 dma_buf_end_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500740 }
741
742 dbgbuf->vaddr = NULL;
743 dbgbuf->dmabuf = NULL;
744 dbgbuf->buflen = 0;
745 dbgbuf->width = 0;
746 dbgbuf->height = 0;
747}
748
749/*
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -0700750 * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
751 * levels, enable write gather enable and avoid clk gating setting for
752 * debug purpose.
753 *
754 * @rot: Pointer to rotator hw
755 */
756static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
757{
758 u32 i, mask, vbif_qos, reg_val = 0;
759 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
760
761 /* VBIF_ROT QoS remapper setting */
762 switch (mdata->npriority_lvl) {
763
764 case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
765 for (i = 0; i < mdata->npriority_lvl; i++) {
766 reg_val = SDE_VBIF_READ(mdata,
767 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
768 mask = 0x3 << (XIN_SSPP * 2);
769 vbif_qos = mdata->vbif_nrt_qos[i];
770 reg_val |= vbif_qos << (XIN_SSPP * 2);
771 /* ensure write is issued after the read operation */
772 mb();
773 SDE_VBIF_WRITE(mdata,
774 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
775 reg_val);
776 }
777 break;
778
779 case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
780 mask = mdata->npriority_lvl - 1;
781 for (i = 0; i < mdata->npriority_lvl; i++) {
782 /* RD and WR client */
783 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
784 << (XIN_SSPP * 4);
785 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
786 << (XIN_WRITEBACK * 4);
787
788 SDE_VBIF_WRITE(mdata,
789 MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
790 reg_val);
791 SDE_VBIF_WRITE(mdata,
792 MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
793 reg_val);
794 }
795 break;
796
797 default:
798 SDEROT_DBG("invalid vbif remapper levels\n");
799 }
800
801 /* Enable write gather for writeback to remove write gaps, which
802 * may hang AXI/BIMC/SDE.
803 */
804 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
805 BIT(XIN_WRITEBACK));
806
807 /*
808 * For debug purpose, disable clock gating, i.e. Clocks always on
809 */
810 if (mdata->clk_always_on) {
811 SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
812 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
813 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
814 0xFFFF);
815 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
816 }
817}
818
819/*
Alan Kwong9487de22016-01-16 22:06:36 -0500820 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
821 * @ctx: Pointer to rotator context
822 * @mask: Bit mask location of the timestamp
823 * @swts: Software timestamp
824 */
825static void sde_hw_rotator_setup_timestamp_packet(
826 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
827{
828 u32 *wrptr;
829
830 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
831
832 /*
833 * Create a dummy packet write out to 1 location for timestamp
834 * generation.
835 */
836 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
837 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
838 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
839 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
840 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
841 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
842 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
843 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
844 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
845 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
846 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
847 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
848 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
Benjamin Chan15c93d82016-08-29 10:04:22 -0400849 /*
850 * Must clear secure buffer setting for SW timestamp because
851 * SW timstamp buffer allocation is always non-secure region.
852 */
853 if (ctx->is_secure) {
854 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
855 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
856 }
Alan Kwong9487de22016-01-16 22:06:36 -0500857 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
858 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
859 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
860 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
861 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
862 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
863 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
864 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
865 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
866 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
867 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
868 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
869 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
870
871 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
872}
873
874/*
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -0700875 * sde_hw_rotator_cdp_configs - configures the CDP registers
876 * @ctx: Pointer to rotator context
877 * @params: Pointer to parameters needed for CDP configs
878 */
879static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
880 struct sde_rot_cdp_params *params)
881{
882 int reg_val;
883 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
884
885 if (!params->enable) {
886 SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
887 goto end;
888 }
889
890 reg_val = BIT(0); /* enable cdp */
891
892 if (sde_mdp_is_ubwc_format(params->fmt))
893 reg_val |= BIT(1); /* enable UBWC meta cdp */
894
895 if (sde_mdp_is_ubwc_format(params->fmt)
896 || sde_mdp_is_tilea4x_format(params->fmt)
897 || sde_mdp_is_tilea5x_format(params->fmt))
898 reg_val |= BIT(2); /* enable tile amortize */
899
900 reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
901
902 SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
903
904end:
905 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
906}
907
908/*
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -0700909 * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
910 * for the WRITEBACK rotator for inline and offline rotation.
911 *
912 * @ctx: Pointer to rotator context
913 */
914static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
915{
916 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
917 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
918
919 /* Offline rotation setting */
920 if (!ctx->sbuf_mode) {
921 /* QOS LUT WR setting */
922 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
923 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
924 mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
925 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
926 mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
927 }
928
929 /* Danger LUT WR setting */
930 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
931 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
932 mdata->lut_cfg[SDE_ROT_WR].danger_lut);
933
934 /* Safe LUT WR setting */
935 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
936 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
937 mdata->lut_cfg[SDE_ROT_WR].safe_lut);
938
939 /* Inline rotation setting */
940 } else {
941 /* QOS LUT WR setting */
942 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
943 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
944 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
945 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
946 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
947 }
948
949 /* Danger LUT WR setting */
950 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
951 mdata->sde_inline_qos_map))
952 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
953 mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
954
955 /* Safe LUT WR setting */
956 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
957 mdata->sde_inline_qos_map))
958 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
959 mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
960 }
961
962 /* Update command queue write ptr */
963 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
964}
965
966/*
967 * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
968 * for the SSPP rotator for inline and offline rotation.
969 *
970 * @ctx: Pointer to rotator context
971 */
972static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
973{
974 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
975 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
976
977 /* Offline rotation setting */
978 if (!ctx->sbuf_mode) {
979 /* QOS LUT RD setting */
980 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
981 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
982 mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
983 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
984 mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
985 }
986
987 /* Danger LUT RD setting */
988 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
989 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
990 mdata->lut_cfg[SDE_ROT_RD].danger_lut);
991
992 /* Safe LUT RD setting */
993 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
994 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
995 mdata->lut_cfg[SDE_ROT_RD].safe_lut);
996
997 /* inline rotation setting */
998 } else {
999 /* QOS LUT RD setting */
1000 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
1001 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
1002 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
1003 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
1004 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
1005 }
1006
1007 /* Danger LUT RD setting */
1008 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
1009 mdata->sde_inline_qos_map))
1010 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
1011 mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
1012
1013 /* Safe LUT RD setting */
1014 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
1015 mdata->sde_inline_qos_map))
1016 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1017 mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
1018 }
1019
1020 /* Update command queue write ptr */
1021 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1022}
1023
1024/*
Alan Kwong9487de22016-01-16 22:06:36 -05001025 * sde_hw_rotator_setup_fetchengine - setup fetch engine
1026 * @ctx: Pointer to rotator context
1027 * @queue_id: Priority queue identifier
1028 * @cfg: Fetch configuration
1029 * @danger_lut: real-time QoS LUT for danger setting (not used)
1030 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001031 * @dnsc_factor_w: downscale factor for width
1032 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -05001033 * @flags: Control flag
1034 */
1035static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
1036 enum sde_rot_queue_prio queue_id,
1037 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001038 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -05001039{
1040 struct sde_hw_rotator *rot = ctx->rot;
1041 struct sde_mdp_format_params *fmt;
1042 struct sde_mdp_data *data;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001043 struct sde_rot_cdp_params cdp_params = {0};
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001044 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001045 u32 *wrptr;
1046 u32 opmode = 0;
1047 u32 chroma_samp = 0;
1048 u32 src_format = 0;
1049 u32 unpack = 0;
1050 u32 width = cfg->img_width;
1051 u32 height = cfg->img_height;
1052 u32 fetch_blocksize = 0;
1053 int i;
1054
1055 if (ctx->rot->mode == ROT_REGDMA_ON) {
1056 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_EN,
1057 REGDMA_INT_MASK);
1058 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
1059 REGDMA_EN);
1060 }
1061
1062 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1063
Alan Kwong5b4d71b2017-02-10 20:52:59 -08001064 /*
1065 * initialize start control trigger selection first
1066 */
1067 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
1068 if (ctx->sbuf_mode)
1069 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
1070 ctx->start_ctrl);
1071 else
1072 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
1073 }
1074
Alan Kwong9487de22016-01-16 22:06:36 -05001075 /* source image setup */
1076 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
1077 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
1078 for (i = 0; i < cfg->src_plane.num_planes; i++)
1079 cfg->src_plane.ystride[i] *= 2;
1080 width *= 2;
1081 height /= 2;
1082 }
1083
1084 /*
1085 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
1086 */
1087 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
1088
1089 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
1090 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1091 cfg->src_rect->w | (cfg->src_rect->h << 16));
1092 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
1093 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1094 cfg->src_rect->x | (cfg->src_rect->y << 16));
1095 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1096 cfg->src_rect->w | (cfg->src_rect->h << 16));
1097 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1098 cfg->src_rect->x | (cfg->src_rect->y << 16));
1099
1100 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
1101 data = cfg->data;
1102 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1103 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
1104 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
1105 (cfg->src_plane.ystride[1] << 16));
1106 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
1107 (cfg->src_plane.ystride[3] << 16));
1108
1109 /* UNUSED, write 0 */
1110 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1111
1112 /* setup source format */
1113 fmt = cfg->fmt;
1114
1115 chroma_samp = fmt->chroma_sample;
1116 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
1117 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
1118 chroma_samp = SDE_MDP_CHROMA_H1V2;
1119 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
1120 chroma_samp = SDE_MDP_CHROMA_H2V1;
1121 }
1122
1123 src_format = (chroma_samp << 23) |
1124 (fmt->fetch_planes << 19) |
1125 (fmt->bits[C3_ALPHA] << 6) |
1126 (fmt->bits[C2_R_Cr] << 4) |
1127 (fmt->bits[C1_B_Cb] << 2) |
1128 (fmt->bits[C0_G_Y] << 0);
1129
1130 if (fmt->alpha_enable &&
1131 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
1132 src_format |= BIT(8); /* SRCC3_EN */
1133
1134 src_format |= ((fmt->unpack_count - 1) << 12) |
1135 (fmt->unpack_tight << 17) |
1136 (fmt->unpack_align_msb << 18) |
1137 ((fmt->bpp - 1) << 9) |
1138 ((fmt->frame_format & 3) << 30);
1139
1140 if (flags & SDE_ROT_FLAG_ROT_90)
1141 src_format |= BIT(11); /* ROT90 */
1142
1143 if (sde_mdp_is_ubwc_format(fmt))
1144 opmode |= BIT(0); /* BWC_DEC_EN */
1145
1146 /* if this is YUV pixel format, enable CSC */
1147 if (sde_mdp_is_yuv_format(fmt))
1148 src_format |= BIT(15); /* SRC_COLOR_SPACE */
1149
1150 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1151 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
1152
Alan Kwong3bef26f2017-02-26 15:38:09 -08001153 if (rot->solid_fill)
1154 src_format |= BIT(22); /* SOLID_FILL */
1155
Alan Kwong9487de22016-01-16 22:06:36 -05001156 /* SRC_FORMAT */
1157 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
1158
1159 /* setup source unpack pattern */
1160 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1161 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1162
1163 /* SRC_UNPACK_PATTERN */
1164 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
1165
1166 /* setup source op mode */
1167 if (flags & SDE_ROT_FLAG_FLIP_LR)
1168 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
1169 if (flags & SDE_ROT_FLAG_FLIP_UD)
1170 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
1171 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
1172
1173 /* SRC_OP_MODE */
1174 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
1175
1176 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001177 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
1178 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
1179 if (sde_mdp_is_tp10_format(fmt))
1180 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
1181 else
1182 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
1183 } else {
1184 if (sde_mdp_is_tp10_format(fmt))
1185 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
1186 else
1187 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
1188 }
1189
Alan Kwong3bef26f2017-02-26 15:38:09 -08001190 if (rot->solid_fill)
1191 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
1192 rot->constant_color);
1193
Alan Kwong9487de22016-01-16 22:06:36 -05001194 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
1195 fetch_blocksize |
1196 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
1197 ((rot->highest_bank & 0x3) << 18));
1198
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001199 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1200 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(31) |
1201 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1202 ((ctx->rot->highest_bank & 0x3) << 4) |
1203 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1204
Alan Kwong9487de22016-01-16 22:06:36 -05001205 /* setup source buffer plane security status */
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001206 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1207 SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
Alan Kwong9487de22016-01-16 22:06:36 -05001208 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
1209 ctx->is_secure = true;
Benjamin Chan15c93d82016-08-29 10:04:22 -04001210 } else {
1211 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
1212 ctx->is_secure = false;
Alan Kwong9487de22016-01-16 22:06:36 -05001213 }
1214
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001215 /* Update command queue write ptr */
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001216 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1217
1218 /* CDP register RD setting */
1219 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1220 mdata->enable_cdp[SDE_ROT_RD] : false;
1221 cdp_params.fmt = fmt;
1222 cdp_params.offset = ROT_SSPP_CDP_CNTL;
1223 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1224
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001225 /* QOS LUT/ Danger LUT/ Safe Lut WR setting */
1226 sde_hw_rotator_setup_qos_lut_rd(ctx);
1227
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001228 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1229
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001230 /*
1231 * Determine if traffic shaping is required. Only enable traffic
1232 * shaping when content is 4k@30fps. The actual traffic shaping
1233 * bandwidth calculation is done in output setup.
1234 */
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001235 if (((!ctx->sbuf_mode)
1236 && (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
1237 && (cfg->fps <= 30)) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001238 SDEROT_DBG("Enable Traffic Shaper\n");
1239 ctx->is_traffic_shaping = true;
1240 } else {
1241 SDEROT_DBG("Disable Traffic Shaper\n");
1242 ctx->is_traffic_shaping = false;
1243 }
1244
Alan Kwong9487de22016-01-16 22:06:36 -05001245 /* Update command queue write ptr */
1246 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1247}
1248
1249/*
1250 * sde_hw_rotator_setup_wbengine - setup writeback engine
1251 * @ctx: Pointer to rotator context
1252 * @queue_id: Priority queue identifier
1253 * @cfg: Writeback configuration
1254 * @flags: Control flag
1255 */
1256static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
1257 enum sde_rot_queue_prio queue_id,
1258 struct sde_hw_rot_wb_cfg *cfg,
1259 u32 flags)
1260{
Alan Kwong6bc64622017-02-04 17:36:03 -08001261 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001262 struct sde_mdp_format_params *fmt;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001263 struct sde_rot_cdp_params cdp_params = {0};
Alan Kwong9487de22016-01-16 22:06:36 -05001264 u32 *wrptr;
1265 u32 pack = 0;
1266 u32 dst_format = 0;
Clarence Ip22fed4c2017-05-16 15:30:51 -04001267 u32 partial_write = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001268 int i;
1269
1270 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1271
1272 fmt = cfg->fmt;
1273
1274 /* setup WB DST format */
1275 dst_format |= (fmt->chroma_sample << 23) |
1276 (fmt->fetch_planes << 19) |
1277 (fmt->bits[C3_ALPHA] << 6) |
1278 (fmt->bits[C2_R_Cr] << 4) |
1279 (fmt->bits[C1_B_Cb] << 2) |
1280 (fmt->bits[C0_G_Y] << 0);
1281
1282 /* alpha control */
1283 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
1284 dst_format |= BIT(8);
1285 if (!fmt->alpha_enable) {
1286 dst_format |= BIT(14);
1287 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
1288 }
1289 }
1290
1291 dst_format |= ((fmt->unpack_count - 1) << 12) |
1292 (fmt->unpack_tight << 17) |
1293 (fmt->unpack_align_msb << 18) |
1294 ((fmt->bpp - 1) << 9) |
1295 ((fmt->frame_format & 3) << 30);
1296
1297 if (sde_mdp_is_yuv_format(fmt))
1298 dst_format |= BIT(15);
1299
1300 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1301 dst_format |= BIT(21); /* PACK_DX_FORMAT */
1302
1303 /*
1304 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
1305 */
1306 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
1307
1308 /* DST_FORMAT */
1309 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
1310
1311 /* DST_OP_MODE */
1312 if (sde_mdp_is_ubwc_format(fmt))
1313 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
1314 else
1315 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1316
1317 /* DST_PACK_PATTERN */
1318 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1319 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1320 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
1321
1322 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
1323 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1324 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
1325 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
1326 (cfg->dst_plane.ystride[1] << 16));
1327 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
1328 (cfg->dst_plane.ystride[3] << 16));
1329
1330 /* setup WB out image size and ROI */
1331 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
1332 cfg->img_width | (cfg->img_height << 16));
1333 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
1334 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
1335 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
1336 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
1337
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001338 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1339 SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
Benjamin Chan15c93d82016-08-29 10:04:22 -04001340 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
1341 else
1342 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
1343
Alan Kwong9487de22016-01-16 22:06:36 -05001344 /*
1345 * setup Downscale factor
1346 */
1347 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
1348 cfg->v_downscale_factor |
1349 (cfg->h_downscale_factor << 16));
1350
Clarence Ip22fed4c2017-05-16 15:30:51 -04001351 /* partial write check */
1352 if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map) &&
1353 !sde_mdp_is_ubwc_format(fmt))
1354 partial_write = BIT(10);
1355
Alan Kwong6bc64622017-02-04 17:36:03 -08001356 /* write config setup for bank configuration */
Clarence Ip22fed4c2017-05-16 15:30:51 -04001357 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, partial_write |
Alan Kwong9487de22016-01-16 22:06:36 -05001358 (ctx->rot->highest_bank & 0x3) << 8);
1359
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001360 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1361 SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
1362 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1363 ((ctx->rot->highest_bank & 0x3) << 4) |
1364 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1365
Alan Kwong6bc64622017-02-04 17:36:03 -08001366 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
1367 SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
1368 ctx->sys_cache_mode);
1369
1370 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
1371 (flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
Alan Kwong9487de22016-01-16 22:06:36 -05001372
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001373 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1374
1375 /* CDP register WR setting */
1376 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1377 mdata->enable_cdp[SDE_ROT_WR] : false;
1378 cdp_params.fmt = fmt;
1379 cdp_params.offset = ROT_WB_CDP_CNTL;
1380 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1381
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001382 /* QOS LUT/ Danger LUT/ Safe LUT WR setting */
1383 sde_hw_rotator_setup_qos_lut_wr(ctx);
1384
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001385 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1386
Alan Kwong498d59f2017-02-11 18:56:34 -08001387 /* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001388 if (!ctx->sbuf_mode &&
1389 (ctx->is_traffic_shaping || cfg->prefill_bw)) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001390 u32 bw;
1391
1392 /*
1393 * Target to finish in 12ms, and we need to set number of bytes
1394 * per clock tick for traffic shaping.
1395 * Each clock tick run @ 19.2MHz, so we need we know total of
1396 * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
1397 * Finally, calcualte the byte count per clock tick based on
1398 * resolution, bpp and compression ratio.
1399 */
1400 bw = cfg->dst_rect->w * cfg->dst_rect->h;
1401
1402 if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
1403 bw = (bw * 3) / 2;
1404 else
1405 bw *= fmt->bpp;
1406
1407 bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
Alan Kwong498d59f2017-02-11 18:56:34 -08001408
1409 /* use prefill bandwidth instead if specified */
1410 if (cfg->prefill_bw)
1411 bw = DIV_ROUND_UP(cfg->prefill_bw,
1412 TRAFFIC_SHAPE_VSYNC_CLK);
1413
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001414 if (bw > 0xFF)
1415 bw = 0xFF;
1416 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
Alan Kwong498d59f2017-02-11 18:56:34 -08001417 BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001418 SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
1419 } else {
1420 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
1421 SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
1422 }
1423
Alan Kwong9487de22016-01-16 22:06:36 -05001424 /* Update command queue write ptr */
1425 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1426}
1427
1428/*
1429 * sde_hw_rotator_start_no_regdma - start non-regdma operation
1430 * @ctx: Pointer to rotator context
1431 * @queue_id: Priority queue identifier
1432 */
1433static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
1434 enum sde_rot_queue_prio queue_id)
1435{
1436 struct sde_hw_rotator *rot = ctx->rot;
1437 u32 *wrptr;
1438 u32 *rdptr;
1439 u8 *addr;
1440 u32 mask;
1441 u32 blksize;
1442
1443 rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
1444 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1445
1446 if (rot->irq_num >= 0) {
1447 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
1448 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
1449 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001450 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001451 }
1452
Alan Kwong6bc64622017-02-04 17:36:03 -08001453 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
Alan Kwong9487de22016-01-16 22:06:36 -05001454
1455 /* Update command queue write ptr */
1456 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1457
1458 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
1459 /* Write all command stream to Rotator blocks */
1460 /* Rotator will start right away after command stream finish writing */
1461 while (rdptr < wrptr) {
1462 u32 op = REGDMA_OP_MASK & *rdptr;
1463
1464 switch (op) {
1465 case REGDMA_OP_NOP:
1466 SDEROT_DBG("NOP\n");
1467 rdptr++;
1468 break;
1469 case REGDMA_OP_REGWRITE:
1470 SDEROT_DBG("REGW %6.6x %8.8x\n",
1471 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1472 rdptr[1]);
1473 addr = rot->mdss_base +
1474 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1475 writel_relaxed(*rdptr++, addr);
1476 break;
1477 case REGDMA_OP_REGMODIFY:
1478 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
1479 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1480 rdptr[1], rdptr[2]);
1481 addr = rot->mdss_base +
1482 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1483 mask = *rdptr++;
1484 writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
1485 addr);
1486 break;
1487 case REGDMA_OP_BLKWRITE_SINGLE:
1488 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
1489 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1490 rdptr[1]);
1491 addr = rot->mdss_base +
1492 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1493 blksize = *rdptr++;
1494 while (blksize--) {
1495 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
1496 writel_relaxed(*rdptr++, addr);
1497 }
1498 break;
1499 case REGDMA_OP_BLKWRITE_INC:
1500 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
1501 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1502 rdptr[1]);
1503 addr = rot->mdss_base +
1504 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1505 blksize = *rdptr++;
1506 while (blksize--) {
1507 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
1508 writel_relaxed(*rdptr++, addr);
1509 addr += 4;
1510 }
1511 break;
1512 default:
1513 /* Other not supported OP mode
1514 * Skip data for now for unregonized OP mode
1515 */
1516 SDEROT_DBG("UNDEFINED\n");
1517 rdptr++;
1518 break;
1519 }
1520 }
1521 SDEROT_DBG("END %d\n", ctx->timestamp);
1522
1523 return ctx->timestamp;
1524}
1525
1526/*
1527 * sde_hw_rotator_start_regdma - start regdma operation
1528 * @ctx: Pointer to rotator context
1529 * @queue_id: Priority queue identifier
1530 */
1531static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
1532 enum sde_rot_queue_prio queue_id)
1533{
1534 struct sde_hw_rotator *rot = ctx->rot;
1535 u32 *wrptr;
1536 u32 regdmaSlot;
1537 u32 offset;
1538 long length;
1539 long ts_length;
1540 u32 enableInt;
1541 u32 swts = 0;
1542 u32 mask = 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08001543 u32 trig_sel;
Alan Kwong9487de22016-01-16 22:06:36 -05001544
1545 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1546
Alan Kwong9487de22016-01-16 22:06:36 -05001547 /*
1548 * Last ROT command must be ROT_START before REGDMA start
1549 */
Alan Kwong6bc64622017-02-04 17:36:03 -08001550 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
1551
Alan Kwong9487de22016-01-16 22:06:36 -05001552 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1553
1554 /*
1555 * Start REGDMA with command offset and size
1556 */
1557 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
1558 length = ((long)wrptr - (long)ctx->regdma_base) / 4;
1559 offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
1560 REGDMA_RAM_REGDMA_CMD_RAM));
1561 enableInt = ((ctx->timestamp & 1) + 1) << 30;
Alan Kwong6bc64622017-02-04 17:36:03 -08001562 trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
1563 REGDMA_CMD_TRIG_SEL_SW_START;
Alan Kwong9487de22016-01-16 22:06:36 -05001564
1565 SDEROT_DBG(
1566 "regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
1567 queue_id, regdmaSlot, enableInt, length, offset,
1568 ctx->timestamp);
1569
1570 /* ensure the command packet is issued before the submit command */
1571 wmb();
1572
1573 /* REGDMA submission for current context */
1574 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1575 SDE_ROTREG_WRITE(rot->mdss_base,
1576 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001577 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1578 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001579 swts = ctx->timestamp;
1580 mask = ~SDE_REGDMA_SWTS_MASK;
1581 } else {
1582 SDE_ROTREG_WRITE(rot->mdss_base,
1583 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001584 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1585 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001586 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
1587 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
1588 }
1589
Alan Kwong6bc64622017-02-04 17:36:03 -08001590 /* timestamp update can only be used in offline multi-context mode */
1591 if (!ctx->sbuf_mode) {
1592 /* Write timestamp after previous rotator job finished */
1593 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
1594 offset += length;
1595 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
1596 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
Alan Kwong9487de22016-01-16 22:06:36 -05001597
Alan Kwong6bc64622017-02-04 17:36:03 -08001598 /* ensure command packet is issue before the submit command */
1599 wmb();
Alan Kwong9487de22016-01-16 22:06:36 -05001600
Alan Kwong6bc64622017-02-04 17:36:03 -08001601 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1602 SDE_ROTREG_WRITE(rot->mdss_base,
1603 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
1604 enableInt | (ts_length << 14) | offset);
1605 } else {
1606 SDE_ROTREG_WRITE(rot->mdss_base,
1607 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
1608 enableInt | (ts_length << 14) | offset);
1609 }
Alan Kwong9487de22016-01-16 22:06:36 -05001610 }
1611
Alan Kwong9487de22016-01-16 22:06:36 -05001612 /* Update command queue write ptr */
1613 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1614
1615 return ctx->timestamp;
1616}
1617
1618/*
1619 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
1620 * @ctx: Pointer to rotator context
1621 * @queue_id: Priority queue identifier
1622 * @flags: Option flag
1623 */
1624static u32 sde_hw_rotator_wait_done_no_regdma(
1625 struct sde_hw_rotator_context *ctx,
1626 enum sde_rot_queue_prio queue_id, u32 flag)
1627{
1628 struct sde_hw_rotator *rot = ctx->rot;
1629 int rc = 0;
1630 u32 sts = 0;
1631 u32 status;
1632 unsigned long flags;
1633
1634 if (rot->irq_num >= 0) {
1635 SDEROT_DBG("Wait for Rotator completion\n");
1636 rc = wait_for_completion_timeout(&ctx->rot_comp,
Alan Kwong6bc64622017-02-04 17:36:03 -08001637 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001638
1639 spin_lock_irqsave(&rot->rotisr_lock, flags);
1640 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1641 if (rc == 0) {
1642 /*
1643 * Timeout, there might be error,
1644 * or rotator still busy
1645 */
1646 if (status & ROT_BUSY_BIT)
1647 SDEROT_ERR(
1648 "Timeout waiting for rotator done\n");
1649 else if (status & ROT_ERROR_BIT)
1650 SDEROT_ERR(
1651 "Rotator report error status\n");
1652 else
1653 SDEROT_WARN(
1654 "Timeout waiting, but rotator job is done!!\n");
1655
Alan Kwong818b7fc2016-07-24 22:07:41 -04001656 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001657 }
1658 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1659 } else {
1660 int cnt = 200;
1661
1662 do {
1663 udelay(500);
1664 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1665 cnt--;
1666 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
1667 && ((status & ROT_ERROR_BIT) == 0));
1668
1669 if (status & ROT_ERROR_BIT)
1670 SDEROT_ERR("Rotator error\n");
1671 else if (status & ROT_BUSY_BIT)
1672 SDEROT_ERR("Rotator busy\n");
1673
1674 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1675 ROT_DONE_CLEAR);
1676 }
1677
1678 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1679
1680 return sts;
1681}
1682
1683/*
1684 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
1685 * @ctx: Pointer to rotator context
1686 * @queue_id: Priority queue identifier
1687 * @flags: Option flag
1688 */
1689static u32 sde_hw_rotator_wait_done_regdma(
1690 struct sde_hw_rotator_context *ctx,
1691 enum sde_rot_queue_prio queue_id, u32 flag)
1692{
1693 struct sde_hw_rotator *rot = ctx->rot;
1694 int rc = 0;
1695 u32 status;
1696 u32 last_isr;
1697 u32 last_ts;
1698 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001699 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -05001700 u32 sts = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001701 unsigned long flags;
1702
1703 if (rot->irq_num >= 0) {
1704 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
1705 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001706 rc = wait_event_timeout(ctx->regdma_waitq,
1707 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Alan Kwong6bc64622017-02-04 17:36:03 -08001708 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001709
Benjamin Chane7ca72e2016-12-22 18:42:34 -05001710 ATRACE_INT("sde_rot_done", 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001711 spin_lock_irqsave(&rot->rotisr_lock, flags);
1712
1713 last_isr = ctx->last_regdma_isr_status;
1714 last_ts = ctx->last_regdma_timestamp;
1715 status = last_isr & REGDMA_INT_MASK;
1716 int_id = last_ts & 1;
1717 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
1718 status, int_id, last_ts);
1719
1720 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
Alan Kwong818b7fc2016-07-24 22:07:41 -04001721 bool pending;
1722
1723 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001724 SDEROT_ERR(
Alan Kwong818b7fc2016-07-24 22:07:41 -04001725 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
1726 ctx->timestamp, swts, pending);
Alan Kwong9487de22016-01-16 22:06:36 -05001727
1728 if (status & REGDMA_WATCHDOG_INT)
1729 SDEROT_ERR("REGDMA watchdog interrupt\n");
1730 else if (status & REGDMA_INVALID_DESCRIPTOR)
1731 SDEROT_ERR("REGDMA invalid descriptor\n");
1732 else if (status & REGDMA_INCOMPLETE_CMD)
1733 SDEROT_ERR("REGDMA incomplete command\n");
1734 else if (status & REGDMA_INVALID_CMD)
1735 SDEROT_ERR("REGDMA invalid command\n");
1736
Alan Kwong818b7fc2016-07-24 22:07:41 -04001737 sde_hw_rotator_dump_status(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001738 status = ROT_ERROR_BIT;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001739 } else {
1740 if (rc == 1)
1741 SDEROT_WARN(
1742 "REGDMA done but no irq, ts:0x%X/0x%X\n",
1743 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001744 status = 0;
1745 }
1746
Alan Kwong9487de22016-01-16 22:06:36 -05001747 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1748 } else {
1749 int cnt = 200;
Alan Kwongb0679602016-11-27 17:04:13 -08001750 bool pending;
Alan Kwong9487de22016-01-16 22:06:36 -05001751
1752 do {
1753 udelay(500);
Alan Kwongb0679602016-11-27 17:04:13 -08001754 last_isr = SDE_ROTREG_READ(rot->mdss_base,
1755 REGDMA_CSR_REGDMA_INT_STATUS);
1756 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001757 cnt--;
Alan Kwongb0679602016-11-27 17:04:13 -08001758 } while ((cnt > 0) && pending &&
1759 ((last_isr & REGDMA_INT_ERR_MASK) == 0));
Alan Kwong9487de22016-01-16 22:06:36 -05001760
Alan Kwongb0679602016-11-27 17:04:13 -08001761 if (last_isr & REGDMA_INT_ERR_MASK) {
1762 SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
1763 ctx->timestamp, swts, last_isr);
1764 sde_hw_rotator_dump_status(rot);
1765 status = ROT_ERROR_BIT;
1766 } else if (pending) {
1767 SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
1768 ctx->timestamp, swts, last_isr);
1769 sde_hw_rotator_dump_status(rot);
1770 status = ROT_ERROR_BIT;
1771 } else {
1772 status = 0;
1773 }
Alan Kwong9487de22016-01-16 22:06:36 -05001774
1775 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
Alan Kwongb0679602016-11-27 17:04:13 -08001776 last_isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001777 }
1778
1779 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1780
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001781 if (status & ROT_ERROR_BIT)
Benjamin Chan2d6411a2017-03-28 18:01:53 -04001782 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
1783 "vbif_dbg_bus", "panic");
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001784
Alan Kwong9487de22016-01-16 22:06:36 -05001785 return sts;
1786}
1787
1788/*
1789 * setup_rotator_ops - setup callback functions for the low-level HAL
1790 * @ops: Pointer to low-level ops callback
1791 * @mode: Operation mode (non-regdma or regdma)
1792 */
1793static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
1794 enum sde_rotator_regdma_mode mode)
1795{
1796 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
1797 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
1798 if (mode == ROT_REGDMA_ON) {
1799 ops->start_rotator = sde_hw_rotator_start_regdma;
1800 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
1801 } else {
1802 ops->start_rotator = sde_hw_rotator_start_no_regdma;
1803 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
1804 }
1805}
1806
1807/*
1808 * sde_hw_rotator_swts_create - create software timestamp buffer
1809 * @rot: Pointer to rotator hw
1810 *
1811 * This buffer is used by regdma to keep track of last completed command.
1812 */
1813static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
1814{
1815 int rc = 0;
1816 struct ion_handle *handle;
1817 struct sde_mdp_img_data *data;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001818 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001819 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
1820
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001821 rot->iclient = mdata->iclient;
Alan Kwong9487de22016-01-16 22:06:36 -05001822
1823 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
1824 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
1825 if (IS_ERR_OR_NULL(handle)) {
1826 SDEROT_ERR("ion memory allocation failed\n");
1827 return -ENOMEM;
1828 }
1829
1830 data = &rot->swts_buf;
1831 data->len = bufsize;
1832 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
1833 if (IS_ERR(data->srcp_dma_buf)) {
1834 SDEROT_ERR("ion_dma_buf setup failed\n");
1835 rc = -ENOMEM;
1836 goto imap_err;
1837 }
1838
1839 sde_smmu_ctrl(1);
1840
1841 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
1842 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
1843 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
1844 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
1845 rc = -ENOMEM;
1846 goto err_put;
1847 }
1848
1849 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
1850 DMA_BIDIRECTIONAL);
1851 if (IS_ERR_OR_NULL(data->srcp_table)) {
1852 SDEROT_ERR("dma_buf_map_attachment error\n");
1853 rc = -ENOMEM;
1854 goto err_detach;
1855 }
1856
1857 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
1858 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
1859 &data->len, DMA_BIDIRECTIONAL);
Alan Kwong6ce448d2016-11-24 18:45:20 -08001860 if (rc < 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05001861 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
1862 goto err_unmap;
1863 }
1864
Alan Kwong6ce448d2016-11-24 18:45:20 -08001865 dma_buf_begin_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05001866 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
1867 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
1868 SDEROT_ERR("ion kernel memory mapping failed\n");
1869 rc = IS_ERR(rot->swts_buffer);
1870 goto kmap_err;
1871 }
1872
1873 data->mapped = true;
1874 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
1875 data->len, rot->swts_buffer);
1876
1877 ion_free(rot->iclient, handle);
1878
1879 sde_smmu_ctrl(0);
1880
1881 return rc;
1882kmap_err:
1883 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1884 DMA_FROM_DEVICE, data->srcp_dma_buf);
1885err_unmap:
1886 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1887 DMA_FROM_DEVICE);
1888err_detach:
1889 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1890err_put:
1891 dma_buf_put(data->srcp_dma_buf);
1892 data->srcp_dma_buf = NULL;
1893imap_err:
1894 ion_free(rot->iclient, handle);
1895
1896 return rc;
1897}
1898
1899/*
1900 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
1901 * @rot: Pointer to rotator hw
1902 */
1903static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
1904{
1905 struct sde_mdp_img_data *data;
1906
1907 data = &rot->swts_buf;
1908
Alan Kwong6ce448d2016-11-24 18:45:20 -08001909 dma_buf_end_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05001910 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
1911
1912 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1913 DMA_FROM_DEVICE, data->srcp_dma_buf);
1914 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1915 DMA_FROM_DEVICE);
1916 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1917 dma_buf_put(data->srcp_dma_buf);
1918 data->srcp_dma_buf = NULL;
1919}
1920
1921/*
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001922 * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
1923 * PM event occurs
1924 * @mgr: Pointer to rotator manager
1925 * @pmon: Boolean indicate an on/off power event
1926 */
1927void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1928{
1929 struct sde_hw_rotator *rot;
1930 u32 l_ts, h_ts, swts, hwts;
1931 u32 rotsts, regdmasts;
1932
1933 /*
1934 * Check last HW timestamp with SW timestamp before power off event.
1935 * If there is a mismatch, that will be quite possible the rotator HW
1936 * is either hang or not finishing last submitted job. In that case,
1937 * it is best to do a timeout eventlog to capture some good events
1938 * log data for analysis.
1939 */
1940 if (!pmon && mgr && mgr->hw_data) {
1941 rot = mgr->hw_data;
1942 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1943 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1944
1945 /* contruct the combined timstamp */
1946 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1947 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1948 SDE_REGDMA_SWTS_SHIFT);
1949
1950 /* Need to turn on clock to access rotator register */
1951 sde_rotator_clk_ctrl(mgr, true);
1952 hwts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
1953 regdmasts = SDE_ROTREG_READ(rot->mdss_base,
1954 REGDMA_CSR_REGDMA_BLOCK_STATUS);
1955 rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1956
1957 SDEROT_DBG(
1958 "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1959 swts, hwts, regdmasts, rotsts);
1960 SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
1961
1962 if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
1963 (rotsts & ROT_STATUS_MASK))) {
1964 SDEROT_ERR(
1965 "Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
1966 swts, hwts, regdmasts, rotsts);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04001967 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
1968 "vbif_dbg_bus", "panic");
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001969 }
1970
1971 /* Turn off rotator clock after checking rotator registers */
1972 sde_rotator_clk_ctrl(mgr, false);
1973 }
1974}
1975
1976/*
1977 * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
1978 * PM event occurs
1979 * @mgr: Pointer to rotator manager
1980 * @pmon: Boolean indicate an on/off power event
1981 */
1982void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1983{
1984 struct sde_hw_rotator *rot;
1985 u32 l_ts, h_ts, swts;
1986
1987 /*
1988 * After a power on event, the rotator HW is reset to default setting.
1989 * It is necessary to synchronize the SW timestamp with the HW.
1990 */
1991 if (pmon && mgr && mgr->hw_data) {
1992 rot = mgr->hw_data;
1993 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1994 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1995
1996 /* contruct the combined timstamp */
1997 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
1998 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
1999 SDE_REGDMA_SWTS_SHIFT);
2000
2001 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2002 swts, h_ts, l_ts);
2003 SDEROT_EVTLOG(swts, h_ts, l_ts);
2004 rot->reset_hw_ts = true;
2005 rot->last_hw_ts = swts;
2006 }
2007}
2008
2009/*
Alan Kwong9487de22016-01-16 22:06:36 -05002010 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
2011 * @mgr: Pointer to rotator manager
2012 */
2013static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
2014{
2015 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2016 struct sde_hw_rotator *rot;
2017
2018 if (!mgr || !mgr->pdev || !mgr->hw_data) {
2019 SDEROT_ERR("null parameters\n");
2020 return;
2021 }
2022
2023 rot = mgr->hw_data;
2024 if (rot->irq_num >= 0)
2025 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2026
2027 if (rot->mode == ROT_REGDMA_ON)
2028 sde_hw_rotator_swtc_destroy(rot);
2029
2030 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2031 mgr->hw_data = NULL;
2032}
2033
2034/*
2035 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
2036 * @mgr: Pointer to rotator manager
2037 * @pipe_id: pipe identifier (not used)
2038 * @wb_id: writeback identifier/priority queue identifier
2039 *
2040 * This function allocates a new hw rotator resource for the given priority.
2041 */
2042static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
2043 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
2044{
2045 struct sde_hw_rotator_resource_info *resinfo;
2046
2047 if (!mgr || !mgr->hw_data) {
2048 SDEROT_ERR("null parameters\n");
2049 return NULL;
2050 }
2051
2052 /*
2053 * Allocate rotator resource info. Each allocation is per
2054 * HW priority queue
2055 */
2056 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
2057 if (!resinfo) {
2058 SDEROT_ERR("Failed allocation HW rotator resource info\n");
2059 return NULL;
2060 }
2061
2062 resinfo->rot = mgr->hw_data;
2063 resinfo->hw.wb_id = wb_id;
2064 atomic_set(&resinfo->hw.num_active, 0);
2065 init_waitqueue_head(&resinfo->hw.wait_queue);
2066
2067 /* For non-regdma, only support one active session */
2068 if (resinfo->rot->mode == ROT_REGDMA_OFF)
2069 resinfo->hw.max_active = 1;
2070 else {
2071 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
2072
2073 if (resinfo->rot->iclient == NULL)
2074 sde_hw_rotator_swts_create(resinfo->rot);
2075 }
2076
Alan Kwongf987ea32016-07-06 12:11:44 -04002077 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002078 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002079
Alan Kwong9487de22016-01-16 22:06:36 -05002080 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
2081 resinfo, wb_id);
2082
2083 return &resinfo->hw;
2084}
2085
2086/*
2087 * sde_hw_rotator_free_ext - free the given rotator resource
2088 * @mgr: Pointer to rotator manager
2089 * @hw: Pointer to rotator resource
2090 */
2091static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
2092 struct sde_rot_hw_resource *hw)
2093{
2094 struct sde_hw_rotator_resource_info *resinfo;
2095
2096 if (!mgr || !mgr->hw_data)
2097 return;
2098
2099 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2100
2101 SDEROT_DBG(
2102 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
2103 resinfo, hw->wb_id, atomic_read(&hw->num_active),
2104 hw->pending_count);
2105
Alan Kwongf987ea32016-07-06 12:11:44 -04002106 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002107 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002108
Alan Kwong9487de22016-01-16 22:06:36 -05002109 devm_kfree(&mgr->pdev->dev, resinfo);
2110}
2111
2112/*
2113 * sde_hw_rotator_alloc_rotctx - allocate rotator context
2114 * @rot: Pointer to rotator hw
2115 * @hw: Pointer to rotator resource
2116 * @session_id: Session identifier of this context
Alan Kwong6bc64622017-02-04 17:36:03 -08002117 * @sbuf_mode: true if stream buffer is requested
Alan Kwong9487de22016-01-16 22:06:36 -05002118 *
2119 * This function allocates a new rotator context for the given session id.
2120 */
2121static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
2122 struct sde_hw_rotator *rot,
2123 struct sde_rot_hw_resource *hw,
Alan Kwong6bc64622017-02-04 17:36:03 -08002124 u32 session_id,
2125 bool sbuf_mode)
Alan Kwong9487de22016-01-16 22:06:36 -05002126{
2127 struct sde_hw_rotator_context *ctx;
2128
2129 /* Allocate rotator context */
2130 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
2131 if (!ctx) {
2132 SDEROT_ERR("Failed allocation HW rotator context\n");
2133 return NULL;
2134 }
2135
2136 ctx->rot = rot;
2137 ctx->q_id = hw->wb_id;
2138 ctx->session_id = session_id;
2139 ctx->hwres = hw;
2140 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
2141 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
2142 ctx->is_secure = false;
Alan Kwong6bc64622017-02-04 17:36:03 -08002143 ctx->sbuf_mode = sbuf_mode;
2144 INIT_LIST_HEAD(&ctx->list);
Alan Kwong9487de22016-01-16 22:06:36 -05002145
2146 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
2147 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
2148 ctx->regdma_wrptr = ctx->regdma_base;
2149 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
2150 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
2151 sde_hw_rotator_get_regdma_ctxidx(ctx));
2152
Alan Kwong818b7fc2016-07-24 22:07:41 -04002153 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
2154
Alan Kwong9487de22016-01-16 22:06:36 -05002155 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002156 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002157
2158 /* Store rotator context for lookup purpose */
2159 sde_hw_rotator_put_ctx(ctx);
2160
2161 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002162 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002163 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2164 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002165 atomic_read(&ctx->hwres->num_active),
2166 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002167
2168 return ctx;
2169}
2170
2171/*
2172 * sde_hw_rotator_free_rotctx - free the given rotator context
2173 * @rot: Pointer to rotator hw
2174 * @ctx: Pointer to rotator context
2175 */
2176static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
2177 struct sde_hw_rotator_context *ctx)
2178{
2179 if (!rot || !ctx)
2180 return;
2181
2182 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002183 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002184 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2185 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002186 atomic_read(&ctx->hwres->num_active),
2187 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002188
Benjamin Chanc3e185f2016-11-08 21:48:21 -05002189 /* Clear rotator context from lookup purpose */
2190 sde_hw_rotator_clr_ctx(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05002191
2192 devm_kfree(&rot->pdev->dev, ctx);
2193}
2194
2195/*
2196 * sde_hw_rotator_config - configure hw for the given rotation entry
2197 * @hw: Pointer to rotator resource
2198 * @entry: Pointer to rotation entry
2199 *
2200 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
2201 * based on the given rotation entry.
2202 */
2203static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
2204 struct sde_rot_entry *entry)
2205{
2206 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2207 struct sde_hw_rotator *rot;
2208 struct sde_hw_rotator_resource_info *resinfo;
2209 struct sde_hw_rotator_context *ctx;
2210 struct sde_hw_rot_sspp_cfg sspp_cfg;
2211 struct sde_hw_rot_wb_cfg wb_cfg;
2212 u32 danger_lut = 0; /* applicable for realtime client only */
2213 u32 safe_lut = 0; /* applicable for realtime client only */
2214 u32 flags = 0;
Benjamin Chana9dd3052017-02-14 17:39:32 -05002215 u32 rststs = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05002216 struct sde_rotation_item *item;
Alan Kwong6bc64622017-02-04 17:36:03 -08002217 int ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002218
2219 if (!hw || !entry) {
2220 SDEROT_ERR("null hw resource/entry\n");
2221 return -EINVAL;
2222 }
2223
2224 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2225 rot = resinfo->rot;
2226 item = &entry->item;
2227
Alan Kwong6bc64622017-02-04 17:36:03 -08002228 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
2229 item->output.sbuf);
Alan Kwong9487de22016-01-16 22:06:36 -05002230 if (!ctx) {
2231 SDEROT_ERR("Failed allocating rotator context!!\n");
2232 return -EINVAL;
2233 }
2234
Alan Kwong6bc64622017-02-04 17:36:03 -08002235 /* save entry for debugging purposes */
2236 ctx->last_entry = entry;
2237
2238 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
2239 if (entry->dst_buf.sbuf) {
2240 u32 op_mode;
2241
2242 if (entry->item.trigger ==
2243 SDE_ROTATOR_TRIGGER_COMMAND)
2244 ctx->start_ctrl = (rot->cmd_trigger << 4);
2245 else if (entry->item.trigger ==
2246 SDE_ROTATOR_TRIGGER_VIDEO)
2247 ctx->start_ctrl = (rot->vid_trigger << 4);
2248 else
2249 ctx->start_ctrl = 0;
2250
2251 ctx->sys_cache_mode = BIT(15) |
2252 ((item->output.scid & 0x1f) << 8) |
2253 (item->output.writeback ? 0x5 : 0);
2254
2255 ctx->op_mode = BIT(4) |
2256 ((ctx->rot->sbuf_headroom & 0xff) << 8);
2257
2258 /* detect transition to inline mode */
2259 op_mode = (SDE_ROTREG_READ(rot->mdss_base,
2260 ROTTOP_OP_MODE) >> 4) & 0x3;
2261 if (!op_mode) {
2262 u32 status;
2263
2264 status = SDE_ROTREG_READ(rot->mdss_base,
2265 ROTTOP_STATUS);
2266 if (status & BIT(0)) {
2267 SDEROT_ERR("rotator busy 0x%x\n",
2268 status);
2269 sde_hw_rotator_dump_status(rot);
2270 SDEROT_EVTLOG_TOUT_HANDLER("rot",
2271 "vbif_dbg_bus",
2272 "panic");
2273 }
2274 }
2275
2276 } else {
2277 ctx->start_ctrl = BIT(0);
2278 ctx->sys_cache_mode = 0;
2279 ctx->op_mode = 0;
2280 }
2281 } else {
2282 ctx->start_ctrl = BIT(0);
2283 }
2284
2285 SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
2286
Benjamin Chana9dd3052017-02-14 17:39:32 -05002287 /*
2288 * if Rotator HW is reset, but missing PM event notification, we
2289 * need to init the SW timestamp automatically.
2290 */
2291 rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
2292 if (!rot->reset_hw_ts && rststs) {
2293 u32 l_ts, h_ts, swts;
2294
2295 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2296 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2297 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2298 SDEROT_EVTLOG(0xbad0, rststs, swts, h_ts, l_ts);
2299
2300 if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY)
2301 h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
2302 else
2303 l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
2304
2305 /* construct the combined timstamp */
2306 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2307 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2308 SDE_REGDMA_SWTS_SHIFT);
2309
2310 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2311 swts, h_ts, l_ts);
2312 SDEROT_EVTLOG(0x900d, swts, h_ts, l_ts);
2313 rot->last_hw_ts = swts;
2314
2315 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2316 rot->last_hw_ts);
2317 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
2318 /* ensure write is issued to the rotator HW */
2319 wmb();
2320 }
2321
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002322 if (rot->reset_hw_ts) {
2323 SDEROT_EVTLOG(rot->last_hw_ts);
2324 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2325 rot->last_hw_ts);
Benjamin Chana9dd3052017-02-14 17:39:32 -05002326 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002327 /* ensure write is issued to the rotator HW */
2328 wmb();
2329 rot->reset_hw_ts = false;
2330 }
2331
Alan Kwong9487de22016-01-16 22:06:36 -05002332 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
2333 SDE_ROT_FLAG_FLIP_LR : 0;
2334 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
2335 SDE_ROT_FLAG_FLIP_UD : 0;
2336 flags |= (item->flags & SDE_ROTATION_90) ?
2337 SDE_ROT_FLAG_ROT_90 : 0;
2338 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
2339 SDE_ROT_FLAG_DEINTERLACE : 0;
2340 flags |= (item->flags & SDE_ROTATION_SECURE) ?
2341 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002342 flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
2343 SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
2344
Alan Kwong9487de22016-01-16 22:06:36 -05002345
2346 sspp_cfg.img_width = item->input.width;
2347 sspp_cfg.img_height = item->input.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002348 sspp_cfg.fps = entry->perf->config.frame_rate;
2349 sspp_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002350 sspp_cfg.fmt = sde_get_format_params(item->input.format);
2351 if (!sspp_cfg.fmt) {
2352 SDEROT_ERR("null format\n");
Alan Kwong6bc64622017-02-04 17:36:03 -08002353 ret = -EINVAL;
2354 goto error;
Alan Kwong9487de22016-01-16 22:06:36 -05002355 }
2356 sspp_cfg.src_rect = &item->src_rect;
2357 sspp_cfg.data = &entry->src_buf;
2358 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
2359 item->input.height, &sspp_cfg.src_plane,
2360 0, /* No bwc_mode */
2361 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
2362 true : false);
2363
2364 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002365 &sspp_cfg, danger_lut, safe_lut,
2366 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05002367
2368 wb_cfg.img_width = item->output.width;
2369 wb_cfg.img_height = item->output.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002370 wb_cfg.fps = entry->perf->config.frame_rate;
2371 wb_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002372 wb_cfg.fmt = sde_get_format_params(item->output.format);
2373 wb_cfg.dst_rect = &item->dst_rect;
2374 wb_cfg.data = &entry->dst_buf;
2375 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
2376 item->output.height, &wb_cfg.dst_plane,
2377 0, /* No bwc_mode */
2378 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
2379
2380 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
2381 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
Alan Kwong498d59f2017-02-11 18:56:34 -08002382 wb_cfg.prefill_bw = item->prefill_bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002383
2384 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
2385
2386 /* setup VA mapping for debugfs */
2387 if (rot->dbgmem) {
2388 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
2389 &item->input,
2390 &entry->src_buf);
2391
2392 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
2393 &item->output,
2394 &entry->dst_buf);
2395 }
2396
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002397 SDEROT_EVTLOG(ctx->timestamp, flags,
2398 item->input.width, item->input.height,
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002399 item->output.width, item->output.height,
Benjamin Chan59a06052017-01-12 18:06:03 -05002400 entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
Benjamin Chan1b94f952017-01-23 17:42:30 -05002401 item->input.format, item->output.format,
2402 entry->perf->config.frame_rate);
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002403
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002404 if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002405 struct sde_mdp_set_ot_params ot_params;
2406
2407 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2408 ot_params.xin_id = XIN_SSPP;
2409 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002410 ot_params.width = entry->perf->config.input.width;
2411 ot_params.height = entry->perf->config.input.height;
2412 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002413 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
2414 ot_params.reg_off_mdp_clk_ctrl =
2415 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2416 ot_params.bit_off_mdp_clk_ctrl =
2417 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002418 ot_params.fmt = ctx->is_traffic_shaping ?
2419 SDE_PIX_FMT_ABGR_8888 :
2420 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002421 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2422 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002423 sde_mdp_set_ot_limit(&ot_params);
2424 }
2425
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002426 if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002427 struct sde_mdp_set_ot_params ot_params;
2428
2429 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2430 ot_params.xin_id = XIN_WRITEBACK;
2431 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002432 ot_params.width = entry->perf->config.input.width;
2433 ot_params.height = entry->perf->config.input.height;
2434 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002435 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
2436 ot_params.reg_off_mdp_clk_ctrl =
2437 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2438 ot_params.bit_off_mdp_clk_ctrl =
2439 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002440 ot_params.fmt = ctx->is_traffic_shaping ?
2441 SDE_PIX_FMT_ABGR_8888 :
2442 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002443 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2444 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002445 sde_mdp_set_ot_limit(&ot_params);
2446 }
2447
2448 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
2449 u32 qos_lut = 0; /* low priority for nrt read client */
2450
2451 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
2452 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
2453
2454 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
2455 }
2456
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -07002457 /* VBIF QoS and other settings */
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002458 if (!ctx->sbuf_mode)
2459 sde_hw_rotator_vbif_setting(rot);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002460
Alan Kwong9487de22016-01-16 22:06:36 -05002461 return 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08002462
2463error:
2464 sde_hw_rotator_free_rotctx(rot, ctx);
2465 return ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002466}
2467
2468/*
2469 * sde_hw_rotator_kickoff - kickoff processing on the given entry
2470 * @hw: Pointer to rotator resource
2471 * @entry: Pointer to rotation entry
2472 */
2473static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
2474 struct sde_rot_entry *entry)
2475{
2476 struct sde_hw_rotator *rot;
2477 struct sde_hw_rotator_resource_info *resinfo;
2478 struct sde_hw_rotator_context *ctx;
Alan Kwong9487de22016-01-16 22:06:36 -05002479
2480 if (!hw || !entry) {
2481 SDEROT_ERR("null hw resource/entry\n");
2482 return -EINVAL;
2483 }
2484
2485 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2486 rot = resinfo->rot;
2487
2488 /* Lookup rotator context from session-id */
2489 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
2490 if (!ctx) {
2491 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2492 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002493 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002494 }
Alan Kwong9487de22016-01-16 22:06:36 -05002495
Alan Kwong9487de22016-01-16 22:06:36 -05002496 rot->ops.start_rotator(ctx, ctx->q_id);
2497
2498 return 0;
2499}
2500
2501/*
2502 * sde_hw_rotator_wait4done - wait for completion notification
2503 * @hw: Pointer to rotator resource
2504 * @entry: Pointer to rotation entry
2505 *
2506 * This function blocks until the given entry is complete, error
2507 * is detected, or timeout.
2508 */
2509static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
2510 struct sde_rot_entry *entry)
2511{
2512 struct sde_hw_rotator *rot;
2513 struct sde_hw_rotator_resource_info *resinfo;
2514 struct sde_hw_rotator_context *ctx;
2515 int ret;
2516
2517 if (!hw || !entry) {
2518 SDEROT_ERR("null hw resource/entry\n");
2519 return -EINVAL;
2520 }
2521
2522 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2523 rot = resinfo->rot;
2524
2525 /* Lookup rotator context from session-id */
2526 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
2527 if (!ctx) {
2528 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2529 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002530 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002531 }
Alan Kwong9487de22016-01-16 22:06:36 -05002532
2533 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
2534
Alan Kwong9487de22016-01-16 22:06:36 -05002535 if (rot->dbgmem) {
2536 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
2537 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
2538 }
2539
2540 /* Current rotator context job is finished, time to free up*/
2541 sde_hw_rotator_free_rotctx(rot, ctx);
2542
2543 return ret;
2544}
2545
2546/*
2547 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
2548 * @rot: Pointer to hw rotator
2549 *
2550 * This function initializes feature and/or capability bitmask based on
2551 * h/w version read from the device.
2552 */
2553static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
2554{
2555 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2556 u32 hw_version;
2557
2558 if (!mdata) {
2559 SDEROT_ERR("null rotator data\n");
2560 return -EINVAL;
2561 }
2562
2563 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
2564 SDEROT_DBG("hw version %8.8x\n", hw_version);
2565
2566 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
2567 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
Alan Kwong9487de22016-01-16 22:06:36 -05002568 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
2569 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
2570 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
2571
2572 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
2573
Alan Kwong6bc64622017-02-04 17:36:03 -08002574 /* features exposed via rotator top h/w version */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002575 if (hw_version != SDE_ROT_TYPE_V1_0) {
2576 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
2577 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
2578 }
2579
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002580 set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
2581
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002582 mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
2583 mdata->nrt_vbif_dbg_bus_size =
2584 ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
2585
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002586 mdata->rot_dbg_bus = rot_dbgbus_r3;
2587 mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
2588
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002589 mdata->regdump = sde_rot_r3_regdump;
2590 mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002591 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
Alan Kwong6bc64622017-02-04 17:36:03 -08002592
2593 /* features exposed via mdss h/w version */
2594 if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400)) {
2595 SDEROT_DBG("Supporting sys cache inline rotation\n");
2596 set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
Alan Kwongfb8eeb22017-02-06 15:00:03 -08002597 set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
Clarence Ip22fed4c2017-05-16 15:30:51 -04002598 set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
Alan Kwong6bc64622017-02-04 17:36:03 -08002599 rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
2600 rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
2601 rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
2602 rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
2603 rot->downscale_caps =
2604 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
2605 } else {
2606 rot->inpixfmts = sde_hw_rotator_v3_inpixfmts;
2607 rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
2608 rot->outpixfmts = sde_hw_rotator_v3_outpixfmts;
2609 rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
2610 rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
2611 "LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
2612 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
2613 }
2614
Alan Kwong9487de22016-01-16 22:06:36 -05002615 return 0;
2616}
2617
2618/*
2619 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
2620 * @irq: Interrupt number
2621 * @ptr: Pointer to private handle provided during registration
2622 *
2623 * This function services rotator interrupt and wakes up waiting client
2624 * with pending rotation requests already submitted to h/w.
2625 */
2626static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
2627{
2628 struct sde_hw_rotator *rot = ptr;
2629 struct sde_hw_rotator_context *ctx;
2630 irqreturn_t ret = IRQ_NONE;
2631 u32 isr;
2632
2633 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
2634
2635 SDEROT_DBG("intr_status = %8.8x\n", isr);
2636
2637 if (isr & ROT_DONE_MASK) {
2638 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002639 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05002640 SDEROT_DBG("Notify rotator complete\n");
2641
2642 /* Normal rotator only 1 session, no need to lookup */
2643 ctx = rot->rotCtx[0][0];
2644 WARN_ON(ctx == NULL);
2645 complete_all(&ctx->rot_comp);
2646
2647 spin_lock(&rot->rotisr_lock);
2648 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
2649 ROT_DONE_CLEAR);
2650 spin_unlock(&rot->rotisr_lock);
2651 ret = IRQ_HANDLED;
2652 }
2653
2654 return ret;
2655}
2656
2657/*
2658 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
2659 * @irq: Interrupt number
2660 * @ptr: Pointer to private handle provided during registration
2661 *
2662 * This function services rotator interrupt, decoding the source of
2663 * events (high/low priority queue), and wakes up all waiting clients
2664 * with pending rotation requests already submitted to h/w.
2665 */
2666static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
2667{
2668 struct sde_hw_rotator *rot = ptr;
2669 struct sde_hw_rotator_context *ctx;
2670 irqreturn_t ret = IRQ_NONE;
2671 u32 isr;
2672 u32 ts;
2673 u32 q_id;
2674
2675 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002676 /* acknowledge interrupt before reading latest timestamp */
2677 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05002678 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2679
2680 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
2681
2682 /* Any REGDMA status, including error and watchdog timer, should
2683 * trigger and wake up waiting thread
2684 */
2685 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
2686 spin_lock(&rot->rotisr_lock);
2687
2688 /*
2689 * Obtain rotator context based on timestamp from regdma
2690 * and low/high interrupt status
2691 */
2692 if (isr & REGDMA_INT_HIGH_MASK) {
2693 q_id = ROT_QUEUE_HIGH_PRIORITY;
2694 ts = ts & SDE_REGDMA_SWTS_MASK;
2695 } else if (isr & REGDMA_INT_LOW_MASK) {
2696 q_id = ROT_QUEUE_LOW_PRIORITY;
2697 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
2698 SDE_REGDMA_SWTS_MASK;
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002699 } else {
2700 SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
2701 goto done_isr_handle;
Alan Kwong9487de22016-01-16 22:06:36 -05002702 }
Alan Kwong6bc64622017-02-04 17:36:03 -08002703
2704 /*
2705 * Timestamp packet is not available in sbuf mode.
2706 * Simulate timestamp update in the handler instead.
2707 */
2708 if (!list_empty(&rot->sbuf_ctx[q_id])) {
2709 ctx = list_first_entry_or_null(&rot->sbuf_ctx[q_id],
2710 struct sde_hw_rotator_context, list);
2711 if (ctx) {
2712 ts = ctx->timestamp;
2713 sde_hw_rotator_update_swts(rot, ctx, ts);
2714 SDEROT_DBG("update swts:0x%X\n", ts);
2715 } else {
2716 SDEROT_ERR("invalid swts ctx\n");
2717 }
2718 }
2719
Alan Kwong9487de22016-01-16 22:06:36 -05002720 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05002721
2722 /*
2723 * Wake up all waiting context from the current and previous
2724 * SW Timestamp.
2725 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04002726 while (ctx &&
2727 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05002728 ctx->last_regdma_isr_status = isr;
2729 ctx->last_regdma_timestamp = ts;
2730 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04002731 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002732 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002733
2734 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
2735 ctx = rot->rotCtx[q_id]
2736 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04002737 };
Alan Kwong9487de22016-01-16 22:06:36 -05002738
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002739done_isr_handle:
Alan Kwong9487de22016-01-16 22:06:36 -05002740 spin_unlock(&rot->rotisr_lock);
2741 ret = IRQ_HANDLED;
2742 } else if (isr & REGDMA_INT_ERR_MASK) {
2743 /*
2744 * For REGDMA Err, we save the isr info and wake up
2745 * all waiting contexts
2746 */
2747 int i, j;
2748
2749 SDEROT_ERR(
2750 "regdma err isr:%X, wake up all waiting contexts\n",
2751 isr);
2752
2753 spin_lock(&rot->rotisr_lock);
2754
2755 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2756 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
2757 ctx = rot->rotCtx[i][j];
2758 if (ctx && ctx->last_regdma_isr_status == 0) {
2759 ctx->last_regdma_isr_status = isr;
2760 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04002761 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002762 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
2763 i, j, ctx);
2764 }
2765 }
2766 }
2767
Alan Kwong9487de22016-01-16 22:06:36 -05002768 spin_unlock(&rot->rotisr_lock);
2769 ret = IRQ_HANDLED;
2770 }
2771
2772 return ret;
2773}
2774
2775/*
2776 * sde_hw_rotator_validate_entry - validate rotation entry
2777 * @mgr: Pointer to rotator manager
2778 * @entry: Pointer to rotation entry
2779 *
2780 * This function validates the given rotation entry and provides possible
2781 * fixup (future improvement) if available. This function returns 0 if
2782 * the entry is valid, and returns error code otherwise.
2783 */
2784static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
2785 struct sde_rot_entry *entry)
2786{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002787 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwongb6c049c2017-03-31 12:50:27 -07002788 struct sde_hw_rotator *hw_data;
Alan Kwong9487de22016-01-16 22:06:36 -05002789 int ret = 0;
2790 u16 src_w, src_h, dst_w, dst_h;
2791 struct sde_rotation_item *item = &entry->item;
2792 struct sde_mdp_format_params *fmt;
2793
Alan Kwongb6c049c2017-03-31 12:50:27 -07002794 if (!mgr || !entry || !mgr->hw_data) {
2795 SDEROT_ERR("invalid parameters\n");
2796 return -EINVAL;
2797 }
2798
2799 hw_data = mgr->hw_data;
2800
2801 if (hw_data->maxlinewidth < item->src_rect.w) {
2802 SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
2803 return -EINVAL;
2804 }
2805
Alan Kwong9487de22016-01-16 22:06:36 -05002806 src_w = item->src_rect.w;
2807 src_h = item->src_rect.h;
2808
2809 if (item->flags & SDE_ROTATION_90) {
2810 dst_w = item->dst_rect.h;
2811 dst_h = item->dst_rect.w;
2812 } else {
2813 dst_w = item->dst_rect.w;
2814 dst_h = item->dst_rect.h;
2815 }
2816
2817 entry->dnsc_factor_w = 0;
2818 entry->dnsc_factor_h = 0;
2819
Alan Kwong6bc64622017-02-04 17:36:03 -08002820 if (item->output.sbuf &&
2821 !test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
2822 SDEROT_ERR("stream buffer not supported\n");
2823 return -EINVAL;
2824 }
2825
Alan Kwong9487de22016-01-16 22:06:36 -05002826 if ((src_w != dst_w) || (src_h != dst_h)) {
Clarence Ip4db1ea82017-05-01 12:18:55 -07002827 if (!dst_w || !dst_h) {
2828 SDEROT_DBG("zero output width/height not support\n");
2829 ret = -EINVAL;
2830 goto dnsc_err;
2831 }
Alan Kwong9487de22016-01-16 22:06:36 -05002832 if ((src_w % dst_w) || (src_h % dst_h)) {
2833 SDEROT_DBG("non integral scale not support\n");
2834 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002835 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05002836 }
2837 entry->dnsc_factor_w = src_w / dst_w;
2838 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
2839 (entry->dnsc_factor_w > 64)) {
2840 SDEROT_DBG("non power-of-2 w_scale not support\n");
2841 ret = -EINVAL;
2842 goto dnsc_err;
2843 }
2844 entry->dnsc_factor_h = src_h / dst_h;
2845 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
2846 (entry->dnsc_factor_h > 64)) {
2847 SDEROT_DBG("non power-of-2 h_scale not support\n");
2848 ret = -EINVAL;
2849 goto dnsc_err;
2850 }
2851 }
2852
Benjamin Chan0e96afd2017-01-17 16:49:12 -05002853 fmt = sde_get_format_params(item->output.format);
Benjamin Chan886ff672016-11-07 15:23:17 -05002854 /*
2855 * Rotator downscale support max 4 times for UBWC format and
2856 * max 2 times for TP10/TP10_UBWC format
2857 */
2858 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
2859 SDEROT_DBG("max downscale for UBWC format is 4\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002860 ret = -EINVAL;
2861 goto dnsc_err;
2862 }
Benjamin Chan886ff672016-11-07 15:23:17 -05002863 if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
2864 SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002865 ret = -EINVAL;
2866 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002867 goto dnsc_err;
2868
2869dnsc_1p5_check:
2870 /* Check for 1.5 downscale that only applies to V2 HW */
2871 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
2872 entry->dnsc_factor_w = src_w / dst_w;
2873 if ((entry->dnsc_factor_w != 1) ||
2874 ((dst_w * 3) != (src_w * 2))) {
2875 SDEROT_DBG(
2876 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
2877 src_w, dst_w);
2878 ret = -EINVAL;
2879 goto dnsc_err;
2880 }
2881
2882 entry->dnsc_factor_h = src_h / dst_h;
2883 if ((entry->dnsc_factor_h != 1) ||
2884 ((dst_h * 3) != (src_h * 2))) {
2885 SDEROT_DBG(
2886 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
2887 src_h, dst_h);
2888 ret = -EINVAL;
2889 goto dnsc_err;
2890 }
2891 ret = 0;
2892 }
Alan Kwong9487de22016-01-16 22:06:36 -05002893
2894dnsc_err:
2895 /* Downscaler does not support asymmetrical dnsc */
2896 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
2897 SDEROT_DBG("asymmetric downscale not support\n");
2898 ret = -EINVAL;
2899 }
2900
2901 if (ret) {
2902 entry->dnsc_factor_w = 0;
2903 entry->dnsc_factor_h = 0;
2904 }
2905 return ret;
2906}
2907
2908/*
2909 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
2910 * @mgr: Pointer to rotator manager
2911 * @attr: Pointer to device attribute interface
2912 * @buf: Pointer to output buffer
2913 * @len: Length of output buffer
2914 */
2915static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
2916 struct device_attribute *attr, char *buf, ssize_t len)
2917{
2918 struct sde_hw_rotator *hw_data;
Benjamin Chan886ff672016-11-07 15:23:17 -05002919 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002920 int cnt = 0;
2921
2922 if (!mgr || !buf)
2923 return 0;
2924
2925 hw_data = mgr->hw_data;
2926
2927#define SPRINT(fmt, ...) \
2928 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2929
2930 /* insert capabilities here */
Benjamin Chan886ff672016-11-07 15:23:17 -05002931 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
2932 SPRINT("min_downscale=1.5\n");
2933 else
2934 SPRINT("min_downscale=2.0\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002935
Benjamin Chan42db2c92016-11-22 22:50:01 -05002936 SPRINT("downscale_compression=1\n");
2937
Alan Kwong6bc64622017-02-04 17:36:03 -08002938 if (hw_data->downscale_caps)
2939 SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
2940
Alan Kwong9487de22016-01-16 22:06:36 -05002941#undef SPRINT
2942 return cnt;
2943}
2944
2945/*
2946 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
2947 * @mgr: Pointer to rotator manager
2948 * @attr: Pointer to device attribute interface
2949 * @buf: Pointer to output buffer
2950 * @len: Length of output buffer
2951 */
2952static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
2953 struct device_attribute *attr, char *buf, ssize_t len)
2954{
2955 struct sde_hw_rotator *rot;
2956 struct sde_hw_rotator_context *ctx;
2957 int cnt = 0;
2958 int num_active = 0;
2959 int i, j;
2960
2961 if (!mgr || !buf) {
2962 SDEROT_ERR("null parameters\n");
2963 return 0;
2964 }
2965
2966 rot = mgr->hw_data;
2967
2968#define SPRINT(fmt, ...) \
2969 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2970
2971 if (rot) {
2972 SPRINT("rot_mode=%d\n", rot->mode);
2973 SPRINT("irq_num=%d\n", rot->irq_num);
2974
2975 if (rot->mode == ROT_REGDMA_OFF) {
2976 SPRINT("max_active=1\n");
2977 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
2978 } else {
2979 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2980 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
2981 j++) {
2982 ctx = rot->rotCtx[i][j];
2983
2984 if (ctx) {
2985 SPRINT(
2986 "rotCtx[%d][%d]:%p\n",
2987 i, j, ctx);
2988 ++num_active;
2989 }
2990 }
2991 }
2992
2993 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
2994 SPRINT("num_active=%d\n", num_active);
2995 }
2996 }
2997
2998#undef SPRINT
2999 return cnt;
3000}
3001
3002/*
Alan Kwongda16e442016-08-14 20:47:18 -04003003 * sde_hw_rotator_get_pixfmt - get the indexed pixel format
3004 * @mgr: Pointer to rotator manager
3005 * @index: index of pixel format
3006 * @input: true for input port; false for output port
3007 */
3008static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
3009 int index, bool input)
3010{
Alan Kwong6bc64622017-02-04 17:36:03 -08003011 struct sde_hw_rotator *rot;
3012
3013 if (!mgr || !mgr->hw_data) {
3014 SDEROT_ERR("null parameters\n");
3015 return 0;
3016 }
3017
3018 rot = mgr->hw_data;
3019
Alan Kwongda16e442016-08-14 20:47:18 -04003020 if (input) {
Alan Kwong6bc64622017-02-04 17:36:03 -08003021 if ((index < rot->num_inpixfmt) && rot->inpixfmts)
3022 return rot->inpixfmts[index];
Alan Kwongda16e442016-08-14 20:47:18 -04003023 else
3024 return 0;
3025 } else {
Alan Kwong6bc64622017-02-04 17:36:03 -08003026 if ((index < rot->num_outpixfmt) && rot->outpixfmts)
3027 return rot->outpixfmts[index];
Alan Kwongda16e442016-08-14 20:47:18 -04003028 else
3029 return 0;
3030 }
3031}
3032
3033/*
3034 * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
3035 * @mgr: Pointer to rotator manager
3036 * @pixfmt: pixel format to be verified
3037 * @input: true for input port; false for output port
3038 */
3039static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
3040 bool input)
3041{
Alan Kwong6bc64622017-02-04 17:36:03 -08003042 struct sde_hw_rotator *rot;
3043 u32 *pixfmts;
3044 u32 num_pixfmt;
Alan Kwongda16e442016-08-14 20:47:18 -04003045 int i;
3046
Alan Kwong6bc64622017-02-04 17:36:03 -08003047 if (!mgr || !mgr->hw_data) {
3048 SDEROT_ERR("null parameters\n");
3049 return false;
Alan Kwongda16e442016-08-14 20:47:18 -04003050 }
3051
Alan Kwong6bc64622017-02-04 17:36:03 -08003052 rot = mgr->hw_data;
3053
3054 if (input) {
3055 pixfmts = rot->inpixfmts;
3056 num_pixfmt = rot->num_inpixfmt;
3057 } else {
3058 pixfmts = rot->outpixfmts;
3059 num_pixfmt = rot->num_outpixfmt;
3060 }
3061
3062 if (!pixfmts || !num_pixfmt) {
3063 SDEROT_ERR("invalid pixel format tables\n");
3064 return false;
3065 }
3066
3067 for (i = 0; i < num_pixfmt; i++)
3068 if (pixfmts[i] == pixfmt)
3069 return true;
3070
Alan Kwongda16e442016-08-14 20:47:18 -04003071 return false;
3072}
3073
3074/*
Alan Kwong6bc64622017-02-04 17:36:03 -08003075 * sde_hw_rotator_get_downscale_caps - get scaling capability string
3076 * @mgr: Pointer to rotator manager
3077 * @caps: Pointer to capability string buffer; NULL to return maximum length
3078 * @len: length of capability string buffer
3079 * return: length of capability string
3080 */
3081static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
3082 char *caps, int len)
3083{
3084 struct sde_hw_rotator *rot;
3085 int rc = 0;
3086
3087 if (!mgr || !mgr->hw_data) {
3088 SDEROT_ERR("null parameters\n");
3089 return -EINVAL;
3090 }
3091
3092 rot = mgr->hw_data;
3093
3094 if (rot->downscale_caps) {
3095 if (caps)
3096 rc = snprintf(caps, len, "%s", rot->downscale_caps);
3097 else
3098 rc = strlen(rot->downscale_caps);
3099 }
3100
3101 return rc;
3102}
3103
3104/*
Alan Kwongb6c049c2017-03-31 12:50:27 -07003105 * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
3106 * @mgr: Pointer to rotator manager
3107 * return: maximum line width supported by hardware
3108 */
3109static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
3110{
3111 struct sde_hw_rotator *rot;
3112
3113 if (!mgr || !mgr->hw_data) {
3114 SDEROT_ERR("null parameters\n");
3115 return -EINVAL;
3116 }
3117
3118 rot = mgr->hw_data;
3119
3120 return rot->maxlinewidth;
3121}
3122
3123/*
Alan Kwong9487de22016-01-16 22:06:36 -05003124 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
3125 * @hw_data: Pointer to rotator hw
3126 * @dev: Pointer to platform device
3127 */
3128static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
3129 struct platform_device *dev)
3130{
3131 int ret = 0;
3132 u32 data;
3133
3134 if (!hw_data || !dev)
3135 return -EINVAL;
3136
3137 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
3138 &data);
3139 if (ret) {
3140 SDEROT_DBG("default to regdma off\n");
3141 ret = 0;
3142 hw_data->mode = ROT_REGDMA_OFF;
3143 } else if (data < ROT_REGDMA_MAX) {
3144 SDEROT_DBG("set to regdma mode %d\n", data);
3145 hw_data->mode = data;
3146 } else {
3147 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
3148 hw_data->mode = ROT_REGDMA_OFF;
3149 }
3150
3151 ret = of_property_read_u32(dev->dev.of_node,
3152 "qcom,mdss-highest-bank-bit", &data);
3153 if (ret) {
3154 SDEROT_DBG("default to A5X bank\n");
3155 ret = 0;
3156 hw_data->highest_bank = 2;
3157 } else {
3158 SDEROT_DBG("set highest bank bit to %d\n", data);
3159 hw_data->highest_bank = data;
3160 }
3161
Alan Kwong6bc64622017-02-04 17:36:03 -08003162 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwongfb8eeb22017-02-06 15:00:03 -08003163 "qcom,sde-ubwc-malsize", &data);
3164 if (ret) {
3165 ret = 0;
3166 hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
3167 } else {
3168 SDEROT_DBG("set ubwc malsize to %d\n", data);
3169 hw_data->ubwc_malsize = data;
3170 }
3171
3172 ret = of_property_read_u32(dev->dev.of_node,
3173 "qcom,sde-ubwc_swizzle", &data);
3174 if (ret) {
3175 ret = 0;
3176 hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
3177 } else {
3178 SDEROT_DBG("set ubwc swizzle to %d\n", data);
3179 hw_data->ubwc_swizzle = data;
3180 }
3181
3182 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwong6bc64622017-02-04 17:36:03 -08003183 "qcom,mdss-sbuf-headroom", &data);
3184 if (ret) {
3185 ret = 0;
3186 hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
3187 } else {
3188 SDEROT_DBG("set sbuf headroom to %d\n", data);
3189 hw_data->sbuf_headroom = data;
3190 }
3191
Alan Kwongb6c049c2017-03-31 12:50:27 -07003192 ret = of_property_read_u32(dev->dev.of_node,
3193 "qcom,mdss-rot-linewidth", &data);
3194 if (ret) {
3195 ret = 0;
3196 hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
3197 } else {
3198 SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
3199 hw_data->maxlinewidth = data;
3200 }
3201
Alan Kwong9487de22016-01-16 22:06:36 -05003202 return ret;
3203}
3204
3205/*
3206 * sde_rotator_r3_init - initialize the r3 module
3207 * @mgr: Pointer to rotator manager
3208 *
3209 * This function setup r3 callback functions, parses r3 specific
3210 * device tree settings, installs r3 specific interrupt handler,
3211 * as well as initializes r3 internal data structure.
3212 */
3213int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
3214{
3215 struct sde_hw_rotator *rot;
3216 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
3217 int i;
3218 int ret;
3219
3220 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
3221 if (!rot)
3222 return -ENOMEM;
3223
3224 mgr->hw_data = rot;
3225 mgr->queue_count = ROT_QUEUE_MAX;
3226
3227 rot->mdss_base = mdata->sde_io.base;
3228 rot->pdev = mgr->pdev;
Alan Kwong6bc64622017-02-04 17:36:03 -08003229 rot->koff_timeout = KOFF_TIMEOUT;
3230 rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
3231 rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
Alan Kwong9487de22016-01-16 22:06:36 -05003232
3233 /* Assign ops */
3234 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
3235 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
3236 mgr->ops_hw_free = sde_hw_rotator_free_ext;
3237 mgr->ops_config_hw = sde_hw_rotator_config;
3238 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
3239 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
3240 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
3241 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
3242 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
3243 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
Alan Kwongda16e442016-08-14 20:47:18 -04003244 mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
3245 mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04003246 mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
3247 mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
Alan Kwong6bc64622017-02-04 17:36:03 -08003248 mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
Alan Kwongb6c049c2017-03-31 12:50:27 -07003249 mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
Alan Kwong9487de22016-01-16 22:06:36 -05003250
3251 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
3252 if (ret)
3253 goto error_parse_dt;
3254
3255 rot->irq_num = platform_get_irq(mgr->pdev, 0);
3256 if (rot->irq_num < 0) {
3257 SDEROT_ERR("fail to get rotator irq\n");
3258 } else {
3259 if (rot->mode == ROT_REGDMA_OFF)
3260 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3261 rot->irq_num,
3262 sde_hw_rotator_rotirq_handler,
3263 NULL, 0, "sde_rotator_r3", rot);
3264 else
3265 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3266 rot->irq_num,
3267 sde_hw_rotator_regdmairq_handler,
3268 NULL, 0, "sde_rotator_r3", rot);
3269 if (ret) {
3270 SDEROT_ERR("fail to request irq r:%d\n", ret);
3271 rot->irq_num = -1;
3272 } else {
3273 disable_irq(rot->irq_num);
3274 }
3275 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04003276 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05003277
3278 setup_rotator_ops(&rot->ops, rot->mode);
3279
3280 spin_lock_init(&rot->rotctx_lock);
3281 spin_lock_init(&rot->rotisr_lock);
3282
3283 /* REGDMA initialization */
3284 if (rot->mode == ROT_REGDMA_OFF) {
3285 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3286 rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
3287 SDE_HW_ROT_REGDMA_SEG_SIZE * i];
3288 } else {
3289 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3290 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
3291 (u32 *)(rot->mdss_base +
3292 REGDMA_RAM_REGDMA_CMD_RAM +
3293 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
3294
3295 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3296 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
3297 (u32 *)(rot->mdss_base +
3298 REGDMA_RAM_REGDMA_CMD_RAM +
3299 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
3300 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
3301 }
3302
Alan Kwong6bc64622017-02-04 17:36:03 -08003303 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3304 atomic_set(&rot->timestamp[i], 0);
3305 INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
3306 }
Alan Kwong9487de22016-01-16 22:06:36 -05003307
3308 ret = sde_rotator_hw_rev_init(rot);
3309 if (ret)
3310 goto error_hw_rev_init;
3311
Alan Kwong315cd772016-08-03 22:29:42 -04003312 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
Clarence Ip77c053d2017-04-24 19:26:37 -07003313 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003314 CLKFLAG_NORETAIN_MEM);
Clarence Ip77c053d2017-04-24 19:26:37 -07003315 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003316 CLKFLAG_NORETAIN_PERIPH);
3317
Benjamin Chan53e3bce2016-08-31 14:43:29 -04003318 mdata->sde_rot_hw = rot;
Alan Kwong9487de22016-01-16 22:06:36 -05003319 return 0;
3320error_hw_rev_init:
3321 if (rot->irq_num >= 0)
3322 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
3323 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
3324error_parse_dt:
3325 return ret;
3326}