blob: 3c47dd70f5f5d2a1d902e6d8271955af9363070e [file] [log] [blame]
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Alan Kwong9487de22016-01-16 22:06:36 -05002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
Alan Kwong4b416162017-08-11 21:03:10 -040014#define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
Alan Kwong9487de22016-01-16 22:06:36 -050015
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
Alan Kwong9487de22016-01-16 22:06:36 -050020#include <linux/delay.h>
21#include <linux/debugfs.h>
22#include <linux/interrupt.h>
23#include <linux/dma-mapping.h>
24#include <linux/dma-buf.h>
25#include <linux/msm_ion.h>
Alan Kwong6ce448d2016-11-24 18:45:20 -080026#include <linux/clk.h>
27#include <linux/clk/qcom.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
Benjamin Chan53e3bce2016-08-31 14:43:29 -040037#include "sde_rotator_debug.h"
Alan Kwong9487de22016-01-16 22:06:36 -050038
Benjamin Chan99eb63b2016-12-21 15:45:26 -050039#define RES_UHD (3840*2160)
40
41/* traffic shaping clock ticks = finish_time x 19.2MHz */
42#define TRAFFIC_SHAPE_CLKTICK_14MS 268800
43#define TRAFFIC_SHAPE_CLKTICK_12MS 230400
Alan Kwong498d59f2017-02-11 18:56:34 -080044#define TRAFFIC_SHAPE_VSYNC_CLK 19200000
Benjamin Chan99eb63b2016-12-21 15:45:26 -050045
Alan Kwong9487de22016-01-16 22:06:36 -050046/* XIN mapping */
47#define XIN_SSPP 0
48#define XIN_WRITEBACK 1
49
50/* wait for at most 2 vsync for lowest refresh rate (24hz) */
Alan Kwong9a11c452017-05-01 15:11:31 -070051#define KOFF_TIMEOUT (42 * 32)
Alan Kwong6bc64622017-02-04 17:36:03 -080052
53/* default stream buffer headroom in lines */
54#define DEFAULT_SBUF_HEADROOM 20
Clarence Ip37e013c2017-05-04 12:23:13 -070055#define DEFAULT_UBWC_MALSIZE 0
56#define DEFAULT_UBWC_SWIZZLE 0
Alan Kwong9487de22016-01-16 22:06:36 -050057
Alan Kwongb6c049c2017-03-31 12:50:27 -070058#define DEFAULT_MAXLINEWIDTH 4096
59
Clarence Ip77cadd12017-06-19 17:51:46 -040060/* stride alignment requirement for avoiding partial writes */
61#define PARTIAL_WRITE_ALIGNMENT 0x1F
62
Alan Kwong9487de22016-01-16 22:06:36 -050063/* Macro for constructing the REGDMA command */
64#define SDE_REGDMA_WRITE(p, off, data) \
65 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080066 SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
67 (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050068 *p++ = REGDMA_OP_REGWRITE | \
69 ((off) & REGDMA_ADDR_OFFSET_MASK); \
70 *p++ = (data); \
71 } while (0)
72
73#define SDE_REGDMA_MODIFY(p, off, mask, data) \
74 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080075 SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
76 (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050077 *p++ = REGDMA_OP_REGMODIFY | \
78 ((off) & REGDMA_ADDR_OFFSET_MASK); \
79 *p++ = (mask); \
80 *p++ = (data); \
81 } while (0)
82
83#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
84 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080085 SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
86 (u32)(len));\
Alan Kwong9487de22016-01-16 22:06:36 -050087 *p++ = REGDMA_OP_BLKWRITE_INC | \
88 ((off) & REGDMA_ADDR_OFFSET_MASK); \
89 *p++ = (len); \
90 } while (0)
91
92#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
93 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080094 SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
Alan Kwong9487de22016-01-16 22:06:36 -050095 *(p) = (data); \
96 (p)++; \
97 } while (0)
98
99/* Macro for directly accessing mapped registers */
100#define SDE_ROTREG_WRITE(base, off, data) \
Alan Kwong6bc64622017-02-04 17:36:03 -0800101 do { \
102 SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
103 , (u32)(data));\
104 writel_relaxed(data, (base + (off))); \
105 } while (0)
Alan Kwong9487de22016-01-16 22:06:36 -0500106
107#define SDE_ROTREG_READ(base, off) \
108 readl_relaxed(base + (off))
109
Alan Kwong4b416162017-08-11 21:03:10 -0400110static const u32 sde_hw_rotator_v3_inpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400111 SDE_PIX_FMT_XRGB_8888,
112 SDE_PIX_FMT_ARGB_8888,
113 SDE_PIX_FMT_ABGR_8888,
114 SDE_PIX_FMT_RGBA_8888,
115 SDE_PIX_FMT_BGRA_8888,
116 SDE_PIX_FMT_RGBX_8888,
117 SDE_PIX_FMT_BGRX_8888,
118 SDE_PIX_FMT_XBGR_8888,
119 SDE_PIX_FMT_RGBA_5551,
120 SDE_PIX_FMT_ARGB_1555,
121 SDE_PIX_FMT_ABGR_1555,
122 SDE_PIX_FMT_BGRA_5551,
123 SDE_PIX_FMT_BGRX_5551,
124 SDE_PIX_FMT_RGBX_5551,
125 SDE_PIX_FMT_XBGR_1555,
126 SDE_PIX_FMT_XRGB_1555,
127 SDE_PIX_FMT_ARGB_4444,
128 SDE_PIX_FMT_RGBA_4444,
129 SDE_PIX_FMT_BGRA_4444,
130 SDE_PIX_FMT_ABGR_4444,
131 SDE_PIX_FMT_RGBX_4444,
132 SDE_PIX_FMT_XRGB_4444,
133 SDE_PIX_FMT_BGRX_4444,
134 SDE_PIX_FMT_XBGR_4444,
135 SDE_PIX_FMT_RGB_888,
136 SDE_PIX_FMT_BGR_888,
137 SDE_PIX_FMT_RGB_565,
138 SDE_PIX_FMT_BGR_565,
139 SDE_PIX_FMT_Y_CB_CR_H2V2,
140 SDE_PIX_FMT_Y_CR_CB_H2V2,
141 SDE_PIX_FMT_Y_CR_CB_GH2V2,
142 SDE_PIX_FMT_Y_CBCR_H2V2,
143 SDE_PIX_FMT_Y_CRCB_H2V2,
144 SDE_PIX_FMT_Y_CBCR_H1V2,
145 SDE_PIX_FMT_Y_CRCB_H1V2,
146 SDE_PIX_FMT_Y_CBCR_H2V1,
147 SDE_PIX_FMT_Y_CRCB_H2V1,
148 SDE_PIX_FMT_YCBYCR_H2V1,
149 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
150 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
151 SDE_PIX_FMT_RGBA_8888_UBWC,
152 SDE_PIX_FMT_RGBX_8888_UBWC,
153 SDE_PIX_FMT_RGB_565_UBWC,
154 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
155 SDE_PIX_FMT_RGBA_1010102,
156 SDE_PIX_FMT_RGBX_1010102,
157 SDE_PIX_FMT_ARGB_2101010,
158 SDE_PIX_FMT_XRGB_2101010,
159 SDE_PIX_FMT_BGRA_1010102,
160 SDE_PIX_FMT_BGRX_1010102,
161 SDE_PIX_FMT_ABGR_2101010,
162 SDE_PIX_FMT_XBGR_2101010,
163 SDE_PIX_FMT_RGBA_1010102_UBWC,
164 SDE_PIX_FMT_RGBX_1010102_UBWC,
165 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
166 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
167 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
168};
169
Alan Kwong4b416162017-08-11 21:03:10 -0400170static const u32 sde_hw_rotator_v3_outpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400171 SDE_PIX_FMT_XRGB_8888,
172 SDE_PIX_FMT_ARGB_8888,
173 SDE_PIX_FMT_ABGR_8888,
174 SDE_PIX_FMT_RGBA_8888,
175 SDE_PIX_FMT_BGRA_8888,
176 SDE_PIX_FMT_RGBX_8888,
177 SDE_PIX_FMT_BGRX_8888,
178 SDE_PIX_FMT_XBGR_8888,
179 SDE_PIX_FMT_RGBA_5551,
180 SDE_PIX_FMT_ARGB_1555,
181 SDE_PIX_FMT_ABGR_1555,
182 SDE_PIX_FMT_BGRA_5551,
183 SDE_PIX_FMT_BGRX_5551,
184 SDE_PIX_FMT_RGBX_5551,
185 SDE_PIX_FMT_XBGR_1555,
186 SDE_PIX_FMT_XRGB_1555,
187 SDE_PIX_FMT_ARGB_4444,
188 SDE_PIX_FMT_RGBA_4444,
189 SDE_PIX_FMT_BGRA_4444,
190 SDE_PIX_FMT_ABGR_4444,
191 SDE_PIX_FMT_RGBX_4444,
192 SDE_PIX_FMT_XRGB_4444,
193 SDE_PIX_FMT_BGRX_4444,
194 SDE_PIX_FMT_XBGR_4444,
195 SDE_PIX_FMT_RGB_888,
196 SDE_PIX_FMT_BGR_888,
197 SDE_PIX_FMT_RGB_565,
198 SDE_PIX_FMT_BGR_565,
199 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
200 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
201 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
202 SDE_PIX_FMT_Y_CBCR_H2V2,
203 SDE_PIX_FMT_Y_CRCB_H2V2,
204 SDE_PIX_FMT_Y_CBCR_H1V2,
205 SDE_PIX_FMT_Y_CRCB_H1V2,
206 SDE_PIX_FMT_Y_CBCR_H2V1,
207 SDE_PIX_FMT_Y_CRCB_H2V1,
208 /* SDE_PIX_FMT_YCBYCR_H2V1 */
209 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
210 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
211 SDE_PIX_FMT_RGBA_8888_UBWC,
212 SDE_PIX_FMT_RGBX_8888_UBWC,
213 SDE_PIX_FMT_RGB_565_UBWC,
214 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
215 SDE_PIX_FMT_RGBA_1010102,
216 SDE_PIX_FMT_RGBX_1010102,
217 /* SDE_PIX_FMT_ARGB_2101010 */
218 /* SDE_PIX_FMT_XRGB_2101010 */
219 SDE_PIX_FMT_BGRA_1010102,
220 SDE_PIX_FMT_BGRX_1010102,
221 /* SDE_PIX_FMT_ABGR_2101010 */
222 /* SDE_PIX_FMT_XBGR_2101010 */
223 SDE_PIX_FMT_RGBA_1010102_UBWC,
224 SDE_PIX_FMT_RGBX_1010102_UBWC,
225 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
226 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
227 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
228};
229
Alan Kwong4b416162017-08-11 21:03:10 -0400230static const u32 sde_hw_rotator_v4_inpixfmts[] = {
Alan Kwong6bc64622017-02-04 17:36:03 -0800231 SDE_PIX_FMT_XRGB_8888,
232 SDE_PIX_FMT_ARGB_8888,
233 SDE_PIX_FMT_ABGR_8888,
234 SDE_PIX_FMT_RGBA_8888,
235 SDE_PIX_FMT_BGRA_8888,
236 SDE_PIX_FMT_RGBX_8888,
237 SDE_PIX_FMT_BGRX_8888,
238 SDE_PIX_FMT_XBGR_8888,
239 SDE_PIX_FMT_RGBA_5551,
240 SDE_PIX_FMT_ARGB_1555,
241 SDE_PIX_FMT_ABGR_1555,
242 SDE_PIX_FMT_BGRA_5551,
243 SDE_PIX_FMT_BGRX_5551,
244 SDE_PIX_FMT_RGBX_5551,
245 SDE_PIX_FMT_XBGR_1555,
246 SDE_PIX_FMT_XRGB_1555,
247 SDE_PIX_FMT_ARGB_4444,
248 SDE_PIX_FMT_RGBA_4444,
249 SDE_PIX_FMT_BGRA_4444,
250 SDE_PIX_FMT_ABGR_4444,
251 SDE_PIX_FMT_RGBX_4444,
252 SDE_PIX_FMT_XRGB_4444,
253 SDE_PIX_FMT_BGRX_4444,
254 SDE_PIX_FMT_XBGR_4444,
255 SDE_PIX_FMT_RGB_888,
256 SDE_PIX_FMT_BGR_888,
257 SDE_PIX_FMT_RGB_565,
258 SDE_PIX_FMT_BGR_565,
259 SDE_PIX_FMT_Y_CB_CR_H2V2,
260 SDE_PIX_FMT_Y_CR_CB_H2V2,
261 SDE_PIX_FMT_Y_CR_CB_GH2V2,
262 SDE_PIX_FMT_Y_CBCR_H2V2,
263 SDE_PIX_FMT_Y_CRCB_H2V2,
264 SDE_PIX_FMT_Y_CBCR_H1V2,
265 SDE_PIX_FMT_Y_CRCB_H1V2,
266 SDE_PIX_FMT_Y_CBCR_H2V1,
267 SDE_PIX_FMT_Y_CRCB_H2V1,
268 SDE_PIX_FMT_YCBYCR_H2V1,
269 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
270 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
271 SDE_PIX_FMT_RGBA_8888_UBWC,
272 SDE_PIX_FMT_RGBX_8888_UBWC,
273 SDE_PIX_FMT_RGB_565_UBWC,
274 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
275 SDE_PIX_FMT_RGBA_1010102,
276 SDE_PIX_FMT_RGBX_1010102,
277 SDE_PIX_FMT_ARGB_2101010,
278 SDE_PIX_FMT_XRGB_2101010,
279 SDE_PIX_FMT_BGRA_1010102,
280 SDE_PIX_FMT_BGRX_1010102,
281 SDE_PIX_FMT_ABGR_2101010,
282 SDE_PIX_FMT_XBGR_2101010,
283 SDE_PIX_FMT_RGBA_1010102_UBWC,
284 SDE_PIX_FMT_RGBX_1010102_UBWC,
285 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
286 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
287 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800288 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
289 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800290 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
291 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
292 SDE_PIX_FMT_XRGB_8888_TILE,
293 SDE_PIX_FMT_ARGB_8888_TILE,
294 SDE_PIX_FMT_ABGR_8888_TILE,
295 SDE_PIX_FMT_XBGR_8888_TILE,
296 SDE_PIX_FMT_RGBA_8888_TILE,
297 SDE_PIX_FMT_BGRA_8888_TILE,
298 SDE_PIX_FMT_RGBX_8888_TILE,
299 SDE_PIX_FMT_BGRX_8888_TILE,
300 SDE_PIX_FMT_RGBA_1010102_TILE,
301 SDE_PIX_FMT_RGBX_1010102_TILE,
302 SDE_PIX_FMT_ARGB_2101010_TILE,
303 SDE_PIX_FMT_XRGB_2101010_TILE,
304 SDE_PIX_FMT_BGRA_1010102_TILE,
305 SDE_PIX_FMT_BGRX_1010102_TILE,
306 SDE_PIX_FMT_ABGR_2101010_TILE,
307 SDE_PIX_FMT_XBGR_2101010_TILE,
308};
309
Alan Kwong4b416162017-08-11 21:03:10 -0400310static const u32 sde_hw_rotator_v4_outpixfmts[] = {
Alan Kwong6bc64622017-02-04 17:36:03 -0800311 SDE_PIX_FMT_XRGB_8888,
312 SDE_PIX_FMT_ARGB_8888,
313 SDE_PIX_FMT_ABGR_8888,
314 SDE_PIX_FMT_RGBA_8888,
315 SDE_PIX_FMT_BGRA_8888,
316 SDE_PIX_FMT_RGBX_8888,
317 SDE_PIX_FMT_BGRX_8888,
318 SDE_PIX_FMT_XBGR_8888,
319 SDE_PIX_FMT_RGBA_5551,
320 SDE_PIX_FMT_ARGB_1555,
321 SDE_PIX_FMT_ABGR_1555,
322 SDE_PIX_FMT_BGRA_5551,
323 SDE_PIX_FMT_BGRX_5551,
324 SDE_PIX_FMT_RGBX_5551,
325 SDE_PIX_FMT_XBGR_1555,
326 SDE_PIX_FMT_XRGB_1555,
327 SDE_PIX_FMT_ARGB_4444,
328 SDE_PIX_FMT_RGBA_4444,
329 SDE_PIX_FMT_BGRA_4444,
330 SDE_PIX_FMT_ABGR_4444,
331 SDE_PIX_FMT_RGBX_4444,
332 SDE_PIX_FMT_XRGB_4444,
333 SDE_PIX_FMT_BGRX_4444,
334 SDE_PIX_FMT_XBGR_4444,
335 SDE_PIX_FMT_RGB_888,
336 SDE_PIX_FMT_BGR_888,
337 SDE_PIX_FMT_RGB_565,
338 SDE_PIX_FMT_BGR_565,
339 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
340 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
341 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
342 SDE_PIX_FMT_Y_CBCR_H2V2,
343 SDE_PIX_FMT_Y_CRCB_H2V2,
344 SDE_PIX_FMT_Y_CBCR_H1V2,
345 SDE_PIX_FMT_Y_CRCB_H1V2,
346 SDE_PIX_FMT_Y_CBCR_H2V1,
347 SDE_PIX_FMT_Y_CRCB_H2V1,
348 /* SDE_PIX_FMT_YCBYCR_H2V1 */
349 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
350 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
351 SDE_PIX_FMT_RGBA_8888_UBWC,
352 SDE_PIX_FMT_RGBX_8888_UBWC,
353 SDE_PIX_FMT_RGB_565_UBWC,
354 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
355 SDE_PIX_FMT_RGBA_1010102,
356 SDE_PIX_FMT_RGBX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400357 SDE_PIX_FMT_ARGB_2101010,
358 SDE_PIX_FMT_XRGB_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800359 SDE_PIX_FMT_BGRA_1010102,
360 SDE_PIX_FMT_BGRX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400361 SDE_PIX_FMT_ABGR_2101010,
362 SDE_PIX_FMT_XBGR_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800363 SDE_PIX_FMT_RGBA_1010102_UBWC,
364 SDE_PIX_FMT_RGBX_1010102_UBWC,
365 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
366 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
367 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800368 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
369 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800370 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
371 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
372 SDE_PIX_FMT_XRGB_8888_TILE,
373 SDE_PIX_FMT_ARGB_8888_TILE,
374 SDE_PIX_FMT_ABGR_8888_TILE,
375 SDE_PIX_FMT_XBGR_8888_TILE,
376 SDE_PIX_FMT_RGBA_8888_TILE,
377 SDE_PIX_FMT_BGRA_8888_TILE,
378 SDE_PIX_FMT_RGBX_8888_TILE,
379 SDE_PIX_FMT_BGRX_8888_TILE,
380 SDE_PIX_FMT_RGBA_1010102_TILE,
381 SDE_PIX_FMT_RGBX_1010102_TILE,
382 SDE_PIX_FMT_ARGB_2101010_TILE,
383 SDE_PIX_FMT_XRGB_2101010_TILE,
384 SDE_PIX_FMT_BGRA_1010102_TILE,
385 SDE_PIX_FMT_BGRX_1010102_TILE,
386 SDE_PIX_FMT_ABGR_2101010_TILE,
387 SDE_PIX_FMT_XBGR_2101010_TILE,
388};
389
Alan Kwong4b416162017-08-11 21:03:10 -0400390static const u32 sde_hw_rotator_v4_inpixfmts_sbuf[] = {
391 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
392 SDE_PIX_FMT_Y_CBCR_H2V2,
393 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
394 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
395 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
396 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
397 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
398 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
399};
400
401static const u32 sde_hw_rotator_v4_outpixfmts_sbuf[] = {
402 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
403 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
404 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
405};
406
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400407static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400408 {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400409 {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
410 {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
411};
412
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400413static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
414 /*
415 * rottop - 0xA8850
416 */
417 /* REGDMA */
418 { 0XA8850, 0, 0 },
419 { 0XA8850, 0, 1 },
420 { 0XA8850, 0, 2 },
421 { 0XA8850, 0, 3 },
422 { 0XA8850, 0, 4 },
423
424 /* ROT_WB */
425 { 0XA8850, 1, 0 },
426 { 0XA8850, 1, 1 },
427 { 0XA8850, 1, 2 },
428 { 0XA8850, 1, 3 },
429 { 0XA8850, 1, 4 },
430 { 0XA8850, 1, 5 },
431 { 0XA8850, 1, 6 },
432 { 0XA8850, 1, 7 },
433
434 /* UBWC_DEC */
435 { 0XA8850, 2, 0 },
436
437 /* UBWC_ENC */
438 { 0XA8850, 3, 0 },
439
440 /* ROT_FETCH_0 */
441 { 0XA8850, 4, 0 },
442 { 0XA8850, 4, 1 },
443 { 0XA8850, 4, 2 },
444 { 0XA8850, 4, 3 },
445 { 0XA8850, 4, 4 },
446 { 0XA8850, 4, 5 },
447 { 0XA8850, 4, 6 },
448 { 0XA8850, 4, 7 },
449
450 /* ROT_FETCH_1 */
451 { 0XA8850, 5, 0 },
452 { 0XA8850, 5, 1 },
453 { 0XA8850, 5, 2 },
454 { 0XA8850, 5, 3 },
455 { 0XA8850, 5, 4 },
456 { 0XA8850, 5, 5 },
457 { 0XA8850, 5, 6 },
458 { 0XA8850, 5, 7 },
459
460 /* ROT_FETCH_2 */
461 { 0XA8850, 6, 0 },
462 { 0XA8850, 6, 1 },
463 { 0XA8850, 6, 2 },
464 { 0XA8850, 6, 3 },
465 { 0XA8850, 6, 4 },
466 { 0XA8850, 6, 5 },
467 { 0XA8850, 6, 6 },
468 { 0XA8850, 6, 7 },
469
470 /* ROT_FETCH_3 */
471 { 0XA8850, 7, 0 },
472 { 0XA8850, 7, 1 },
473 { 0XA8850, 7, 2 },
474 { 0XA8850, 7, 3 },
475 { 0XA8850, 7, 4 },
476 { 0XA8850, 7, 5 },
477 { 0XA8850, 7, 6 },
478 { 0XA8850, 7, 7 },
479
480 /* ROT_FETCH_4 */
481 { 0XA8850, 8, 0 },
482 { 0XA8850, 8, 1 },
483 { 0XA8850, 8, 2 },
484 { 0XA8850, 8, 3 },
485 { 0XA8850, 8, 4 },
486 { 0XA8850, 8, 5 },
487 { 0XA8850, 8, 6 },
488 { 0XA8850, 8, 7 },
489
490 /* ROT_UNPACK_0*/
491 { 0XA8850, 9, 0 },
492 { 0XA8850, 9, 1 },
493 { 0XA8850, 9, 2 },
494 { 0XA8850, 9, 3 },
495};
496
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400497static struct sde_rot_regdump sde_rot_r3_regdump[] = {
498 { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
499 { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
500 { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
501 { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
502 SDE_ROT_REGDUMP_READ },
503 /*
504 * Need to perform a SW reset to REGDMA in order to access the
505 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
506 * REGDMA RAM should be dump at last.
507 */
508 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
509 SDE_ROT_REGDUMP_WRITE },
510 { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
511 SDE_ROT_REGDUMP_READ },
Benjamin Chan59a06052017-01-12 18:06:03 -0500512 { "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
513 SDE_ROT_REGDUMP_VBIF },
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400514};
515
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -0700516struct sde_rot_cdp_params {
517 bool enable;
518 struct sde_mdp_format_params *fmt;
519 u32 offset;
520};
521
Alan Kwong818b7fc2016-07-24 22:07:41 -0400522/* Invalid software timestamp value for initialization */
523#define SDE_REGDMA_SWTS_INVALID (~0)
524
525/**
526 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
527 * @ts_curr: current software timestamp
528 * @ts_prev: previous software timestamp
529 * @return: the amount ts_curr is ahead of ts_prev
530 */
531static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
532{
533 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
534
535 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
536}
537
538/**
539 * sde_hw_rotator_pending_swts - Check if the given context is still pending
540 * @rot: Pointer to hw rotator
541 * @ctx: Pointer to rotator context
542 * @pswts: Pointer to returned reference software timestamp, optional
543 * @return: true if context has pending requests
544 */
545static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
546 struct sde_hw_rotator_context *ctx, u32 *pswts)
547{
548 u32 swts;
549 int ts_diff;
550 bool pending;
551
552 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
553 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
554 else
555 swts = ctx->last_regdma_timestamp;
556
557 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
558 swts >>= SDE_REGDMA_SWTS_SHIFT;
559
560 swts &= SDE_REGDMA_SWTS_MASK;
561
562 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
563
564 if (pswts)
565 *pswts = swts;
566
567 pending = (ts_diff > 0) ? true : false;
568
569 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
570 ctx->timestamp, ctx->q_id, swts, pending);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -0400571 SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400572 return pending;
573}
574
575/**
Alan Kwong6bc64622017-02-04 17:36:03 -0800576 * sde_hw_rotator_update_swts - update software timestamp with given value
577 * @rot: Pointer to hw rotator
578 * @ctx: Pointer to rotator contxt
579 * @swts: new software timestamp
580 * @return: new combined swts
581 */
582static u32 sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
583 struct sde_hw_rotator_context *ctx, u32 swts)
584{
585 u32 mask = SDE_REGDMA_SWTS_MASK;
586
587 swts &= SDE_REGDMA_SWTS_MASK;
588 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY) {
589 swts <<= SDE_REGDMA_SWTS_SHIFT;
590 mask <<= SDE_REGDMA_SWTS_SHIFT;
591 }
592
593 swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
594 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
595
596 return swts;
597}
598
599/**
Alan Kwong818b7fc2016-07-24 22:07:41 -0400600 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
601 * Also, clear rotator/regdma irq status.
602 * @rot: Pointer to hw rotator
603 */
604static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
605{
606 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
607 atomic_read(&rot->irq_enabled));
608
609 if (!atomic_read(&rot->irq_enabled)) {
610 if (rot->mode == ROT_REGDMA_OFF)
611 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
612 ROT_DONE_MASK);
613 else
614 SDE_ROTREG_WRITE(rot->mdss_base,
615 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
616
617 enable_irq(rot->irq_num);
618 }
619 atomic_inc(&rot->irq_enabled);
620}
621
622/**
623 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
624 * Also, clear rotator/regdma irq enable masks.
625 * @rot: Pointer to hw rotator
626 */
627static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
628{
629 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
630 atomic_read(&rot->irq_enabled));
631
632 if (!atomic_read(&rot->irq_enabled)) {
633 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
634 return;
635 }
636
637 if (!atomic_dec_return(&rot->irq_enabled)) {
638 if (rot->mode == ROT_REGDMA_OFF)
639 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
640 else
641 SDE_ROTREG_WRITE(rot->mdss_base,
642 REGDMA_CSR_REGDMA_INT_EN, 0);
643 /* disable irq after last pending irq is handled, if any */
644 synchronize_irq(rot->irq_num);
645 disable_irq_nosync(rot->irq_num);
646 }
647}
648
649/**
650 * sde_hw_rotator_dump_status - Dump hw rotator status on error
651 * @rot: Pointer to hw rotator
652 */
653static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
654{
Benjamin Chan1b94f952017-01-23 17:42:30 -0500655 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
656
Alan Kwong818b7fc2016-07-24 22:07:41 -0400657 SDEROT_ERR(
658 "op_mode = %x, int_en = %x, int_status = %x\n",
659 SDE_ROTREG_READ(rot->mdss_base,
660 REGDMA_CSR_REGDMA_OP_MODE),
661 SDE_ROTREG_READ(rot->mdss_base,
662 REGDMA_CSR_REGDMA_INT_EN),
663 SDE_ROTREG_READ(rot->mdss_base,
664 REGDMA_CSR_REGDMA_INT_STATUS));
665
666 SDEROT_ERR(
667 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
668 SDE_ROTREG_READ(rot->mdss_base,
669 REGDMA_TIMESTAMP_REG),
670 SDE_ROTREG_READ(rot->mdss_base,
671 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
672 SDE_ROTREG_READ(rot->mdss_base,
673 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
674 SDE_ROTREG_READ(rot->mdss_base,
675 REGDMA_CSR_REGDMA_BLOCK_STATUS));
676
677 SDEROT_ERR(
678 "invalid_cmd_offset = %x, fsm_state = %x\n",
679 SDE_ROTREG_READ(rot->mdss_base,
680 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
681 SDE_ROTREG_READ(rot->mdss_base,
682 REGDMA_CSR_REGDMA_FSM_STATE));
Benjamin Chan59a06052017-01-12 18:06:03 -0500683
684 SDEROT_ERR(
685 "UBWC decode status = %x, UBWC encode status = %x\n",
686 SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS),
687 SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
Benjamin Chan1b94f952017-01-23 17:42:30 -0500688
689 SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
690 SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
691 SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
Alan Kwong6bc64622017-02-04 17:36:03 -0800692
693 SDEROT_ERR(
694 "sbuf_status_plane0 = %x, sbuf_status_plane1 = %x\n",
695 SDE_ROTREG_READ(rot->mdss_base,
696 ROT_WB_SBUF_STATUS_PLANE0),
697 SDE_ROTREG_READ(rot->mdss_base,
698 ROT_WB_SBUF_STATUS_PLANE1));
Alan Kwong818b7fc2016-07-24 22:07:41 -0400699}
700
Alan Kwong9487de22016-01-16 22:06:36 -0500701/**
702 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
703 * on provided session_id. Each rotator has a different session_id.
Clarence Ip9e6c3302017-06-02 11:02:57 -0400704 * @rot: Pointer to rotator hw
705 * @session_id: Identifier for rotator session
706 * @sequence_id: Identifier for rotation request within the session
707 * @q_id: Rotator queue identifier
Alan Kwong9487de22016-01-16 22:06:36 -0500708 */
709static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
Clarence Ip9e6c3302017-06-02 11:02:57 -0400710 struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
Alan Kwong9487de22016-01-16 22:06:36 -0500711 enum sde_rot_queue_prio q_id)
712{
713 int i;
714 struct sde_hw_rotator_context *ctx = NULL;
715
716 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
717 ctx = rot->rotCtx[q_id][i];
718
Clarence Ip9e6c3302017-06-02 11:02:57 -0400719 if (ctx && (ctx->session_id == session_id) &&
720 (ctx->sequence_id == sequence_id)) {
Alan Kwong9487de22016-01-16 22:06:36 -0500721 SDEROT_DBG(
Clarence Ip9e6c3302017-06-02 11:02:57 -0400722 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d | sequence-id:%d\n",
723 q_id, i, ctx, ctx->session_id,
724 ctx->sequence_id);
Alan Kwong9487de22016-01-16 22:06:36 -0500725 return ctx;
726 }
727 }
728
729 return NULL;
730}
731
732/*
733 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
734 * @dbgbuf: Pointer to debug buffer
735 * @buf: Pointer to layer buffer structure
736 * @data: Pointer to h/w mapped buffer structure
737 */
738static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
739 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
740{
741 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
742 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
743
744 dbgbuf->vaddr = NULL;
745 dbgbuf->width = buf->width;
746 dbgbuf->height = buf->height;
747
748 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
Alan Kwong6ce448d2016-11-24 18:45:20 -0800749 dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500750 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
751 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
752 dbgbuf->vaddr, dbgbuf->buflen,
753 dbgbuf->width, dbgbuf->height);
754 }
755}
756
757/*
758 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
759 * @dbgbuf: Pointer to debug buffer
760 */
761static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
762{
763 if (dbgbuf->vaddr) {
764 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
Alan Kwong6ce448d2016-11-24 18:45:20 -0800765 dma_buf_end_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500766 }
767
768 dbgbuf->vaddr = NULL;
769 dbgbuf->dmabuf = NULL;
770 dbgbuf->buflen = 0;
771 dbgbuf->width = 0;
772 dbgbuf->height = 0;
773}
774
775/*
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -0700776 * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
777 * levels, enable write gather enable and avoid clk gating setting for
778 * debug purpose.
779 *
780 * @rot: Pointer to rotator hw
781 */
782static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
783{
784 u32 i, mask, vbif_qos, reg_val = 0;
785 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
786
787 /* VBIF_ROT QoS remapper setting */
788 switch (mdata->npriority_lvl) {
789
790 case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
791 for (i = 0; i < mdata->npriority_lvl; i++) {
792 reg_val = SDE_VBIF_READ(mdata,
793 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
794 mask = 0x3 << (XIN_SSPP * 2);
795 vbif_qos = mdata->vbif_nrt_qos[i];
796 reg_val |= vbif_qos << (XIN_SSPP * 2);
797 /* ensure write is issued after the read operation */
798 mb();
799 SDE_VBIF_WRITE(mdata,
800 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
801 reg_val);
802 }
803 break;
804
805 case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
806 mask = mdata->npriority_lvl - 1;
807 for (i = 0; i < mdata->npriority_lvl; i++) {
808 /* RD and WR client */
809 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
810 << (XIN_SSPP * 4);
811 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
812 << (XIN_WRITEBACK * 4);
813
814 SDE_VBIF_WRITE(mdata,
815 MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
816 reg_val);
817 SDE_VBIF_WRITE(mdata,
818 MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
819 reg_val);
820 }
821 break;
822
823 default:
824 SDEROT_DBG("invalid vbif remapper levels\n");
825 }
826
827 /* Enable write gather for writeback to remove write gaps, which
828 * may hang AXI/BIMC/SDE.
829 */
830 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
831 BIT(XIN_WRITEBACK));
832
833 /*
834 * For debug purpose, disable clock gating, i.e. Clocks always on
835 */
836 if (mdata->clk_always_on) {
837 SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
838 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
839 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
840 0xFFFF);
841 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
842 }
843}
844
845/*
Alan Kwong9487de22016-01-16 22:06:36 -0500846 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
847 * @ctx: Pointer to rotator context
848 * @mask: Bit mask location of the timestamp
849 * @swts: Software timestamp
850 */
851static void sde_hw_rotator_setup_timestamp_packet(
852 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
853{
854 u32 *wrptr;
855
856 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
857
858 /*
859 * Create a dummy packet write out to 1 location for timestamp
860 * generation.
861 */
862 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
863 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
864 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
865 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
866 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
867 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
868 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
869 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
870 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
871 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
872 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
873 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
874 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
Benjamin Chan15c93d82016-08-29 10:04:22 -0400875 /*
876 * Must clear secure buffer setting for SW timestamp because
877 * SW timstamp buffer allocation is always non-secure region.
878 */
879 if (ctx->is_secure) {
880 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
881 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
882 }
Alan Kwong9487de22016-01-16 22:06:36 -0500883 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
884 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
885 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
886 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
887 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
888 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
889 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
890 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
891 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
Clarence Ip77cadd12017-06-19 17:51:46 -0400892 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
893 (ctx->rot->highest_bank & 0x3) << 8);
Alan Kwong9487de22016-01-16 22:06:36 -0500894 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
895 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
896 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
897 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
898
899 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
900}
901
902/*
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -0700903 * sde_hw_rotator_cdp_configs - configures the CDP registers
904 * @ctx: Pointer to rotator context
905 * @params: Pointer to parameters needed for CDP configs
906 */
907static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
908 struct sde_rot_cdp_params *params)
909{
910 int reg_val;
911 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
912
913 if (!params->enable) {
914 SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
915 goto end;
916 }
917
918 reg_val = BIT(0); /* enable cdp */
919
920 if (sde_mdp_is_ubwc_format(params->fmt))
921 reg_val |= BIT(1); /* enable UBWC meta cdp */
922
923 if (sde_mdp_is_ubwc_format(params->fmt)
924 || sde_mdp_is_tilea4x_format(params->fmt)
925 || sde_mdp_is_tilea5x_format(params->fmt))
926 reg_val |= BIT(2); /* enable tile amortize */
927
928 reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
929
930 SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
931
932end:
933 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
934}
935
936/*
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -0700937 * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
938 * for the WRITEBACK rotator for inline and offline rotation.
939 *
940 * @ctx: Pointer to rotator context
941 */
942static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
943{
944 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
945 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
946
947 /* Offline rotation setting */
948 if (!ctx->sbuf_mode) {
949 /* QOS LUT WR setting */
950 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
951 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
952 mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
953 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
954 mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
955 }
956
957 /* Danger LUT WR setting */
958 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
959 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
960 mdata->lut_cfg[SDE_ROT_WR].danger_lut);
961
962 /* Safe LUT WR setting */
963 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
964 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
965 mdata->lut_cfg[SDE_ROT_WR].safe_lut);
966
967 /* Inline rotation setting */
968 } else {
969 /* QOS LUT WR setting */
970 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
971 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
972 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
973 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
974 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
975 }
976
977 /* Danger LUT WR setting */
978 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
979 mdata->sde_inline_qos_map))
980 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
981 mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
982
983 /* Safe LUT WR setting */
984 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
985 mdata->sde_inline_qos_map))
986 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
987 mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
988 }
989
990 /* Update command queue write ptr */
991 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
992}
993
994/*
995 * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
996 * for the SSPP rotator for inline and offline rotation.
997 *
998 * @ctx: Pointer to rotator context
999 */
1000static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
1001{
1002 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
1003 u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1004
1005 /* Offline rotation setting */
1006 if (!ctx->sbuf_mode) {
1007 /* QOS LUT RD setting */
1008 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
1009 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
1010 mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
1011 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
1012 mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
1013 }
1014
1015 /* Danger LUT RD setting */
1016 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
1017 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
1018 mdata->lut_cfg[SDE_ROT_RD].danger_lut);
1019
1020 /* Safe LUT RD setting */
1021 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
1022 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1023 mdata->lut_cfg[SDE_ROT_RD].safe_lut);
1024
1025 /* inline rotation setting */
1026 } else {
1027 /* QOS LUT RD setting */
1028 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
1029 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
1030 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
1031 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
1032 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
1033 }
1034
1035 /* Danger LUT RD setting */
1036 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
1037 mdata->sde_inline_qos_map))
1038 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
1039 mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
1040
1041 /* Safe LUT RD setting */
1042 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
1043 mdata->sde_inline_qos_map))
1044 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1045 mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
1046 }
1047
1048 /* Update command queue write ptr */
1049 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1050}
1051
1052/*
Alan Kwong9487de22016-01-16 22:06:36 -05001053 * sde_hw_rotator_setup_fetchengine - setup fetch engine
1054 * @ctx: Pointer to rotator context
1055 * @queue_id: Priority queue identifier
1056 * @cfg: Fetch configuration
1057 * @danger_lut: real-time QoS LUT for danger setting (not used)
1058 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001059 * @dnsc_factor_w: downscale factor for width
1060 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -05001061 * @flags: Control flag
1062 */
1063static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
1064 enum sde_rot_queue_prio queue_id,
1065 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001066 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -05001067{
1068 struct sde_hw_rotator *rot = ctx->rot;
1069 struct sde_mdp_format_params *fmt;
1070 struct sde_mdp_data *data;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001071 struct sde_rot_cdp_params cdp_params = {0};
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001072 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001073 u32 *wrptr;
1074 u32 opmode = 0;
1075 u32 chroma_samp = 0;
1076 u32 src_format = 0;
1077 u32 unpack = 0;
1078 u32 width = cfg->img_width;
1079 u32 height = cfg->img_height;
1080 u32 fetch_blocksize = 0;
1081 int i;
1082
1083 if (ctx->rot->mode == ROT_REGDMA_ON) {
1084 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_EN,
1085 REGDMA_INT_MASK);
1086 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
1087 REGDMA_EN);
1088 }
1089
1090 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1091
Alan Kwong5b4d71b2017-02-10 20:52:59 -08001092 /*
1093 * initialize start control trigger selection first
1094 */
1095 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
1096 if (ctx->sbuf_mode)
1097 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
1098 ctx->start_ctrl);
1099 else
1100 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
1101 }
1102
Alan Kwong9487de22016-01-16 22:06:36 -05001103 /* source image setup */
1104 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
1105 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
1106 for (i = 0; i < cfg->src_plane.num_planes; i++)
1107 cfg->src_plane.ystride[i] *= 2;
1108 width *= 2;
1109 height /= 2;
1110 }
1111
1112 /*
1113 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
1114 */
1115 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
1116
1117 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
1118 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1119 cfg->src_rect->w | (cfg->src_rect->h << 16));
1120 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
1121 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1122 cfg->src_rect->x | (cfg->src_rect->y << 16));
1123 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1124 cfg->src_rect->w | (cfg->src_rect->h << 16));
1125 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1126 cfg->src_rect->x | (cfg->src_rect->y << 16));
1127
1128 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
1129 data = cfg->data;
1130 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1131 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
1132 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
1133 (cfg->src_plane.ystride[1] << 16));
1134 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
1135 (cfg->src_plane.ystride[3] << 16));
1136
1137 /* UNUSED, write 0 */
1138 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1139
1140 /* setup source format */
1141 fmt = cfg->fmt;
1142
1143 chroma_samp = fmt->chroma_sample;
1144 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
1145 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
1146 chroma_samp = SDE_MDP_CHROMA_H1V2;
1147 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
1148 chroma_samp = SDE_MDP_CHROMA_H2V1;
1149 }
1150
1151 src_format = (chroma_samp << 23) |
1152 (fmt->fetch_planes << 19) |
1153 (fmt->bits[C3_ALPHA] << 6) |
1154 (fmt->bits[C2_R_Cr] << 4) |
1155 (fmt->bits[C1_B_Cb] << 2) |
1156 (fmt->bits[C0_G_Y] << 0);
1157
1158 if (fmt->alpha_enable &&
1159 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
1160 src_format |= BIT(8); /* SRCC3_EN */
1161
1162 src_format |= ((fmt->unpack_count - 1) << 12) |
1163 (fmt->unpack_tight << 17) |
1164 (fmt->unpack_align_msb << 18) |
1165 ((fmt->bpp - 1) << 9) |
1166 ((fmt->frame_format & 3) << 30);
1167
1168 if (flags & SDE_ROT_FLAG_ROT_90)
1169 src_format |= BIT(11); /* ROT90 */
1170
1171 if (sde_mdp_is_ubwc_format(fmt))
1172 opmode |= BIT(0); /* BWC_DEC_EN */
1173
1174 /* if this is YUV pixel format, enable CSC */
1175 if (sde_mdp_is_yuv_format(fmt))
1176 src_format |= BIT(15); /* SRC_COLOR_SPACE */
1177
1178 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1179 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
1180
Alan Kwong3bef26f2017-02-26 15:38:09 -08001181 if (rot->solid_fill)
1182 src_format |= BIT(22); /* SOLID_FILL */
1183
Alan Kwong9487de22016-01-16 22:06:36 -05001184 /* SRC_FORMAT */
1185 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
1186
1187 /* setup source unpack pattern */
1188 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1189 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1190
1191 /* SRC_UNPACK_PATTERN */
1192 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
1193
1194 /* setup source op mode */
1195 if (flags & SDE_ROT_FLAG_FLIP_LR)
1196 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
1197 if (flags & SDE_ROT_FLAG_FLIP_UD)
1198 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
1199 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
1200
1201 /* SRC_OP_MODE */
1202 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
1203
1204 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001205 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
1206 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
1207 if (sde_mdp_is_tp10_format(fmt))
1208 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
1209 else
1210 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
1211 } else {
1212 if (sde_mdp_is_tp10_format(fmt))
1213 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
1214 else
1215 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
1216 }
1217
Alan Kwong3bef26f2017-02-26 15:38:09 -08001218 if (rot->solid_fill)
1219 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
1220 rot->constant_color);
1221
Alan Kwong9487de22016-01-16 22:06:36 -05001222 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
1223 fetch_blocksize |
1224 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
1225 ((rot->highest_bank & 0x3) << 18));
1226
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001227 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1228 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(31) |
1229 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1230 ((ctx->rot->highest_bank & 0x3) << 4) |
1231 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1232
Alan Kwong9487de22016-01-16 22:06:36 -05001233 /* setup source buffer plane security status */
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001234 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1235 SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
Alan Kwong9487de22016-01-16 22:06:36 -05001236 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
1237 ctx->is_secure = true;
Benjamin Chan15c93d82016-08-29 10:04:22 -04001238 } else {
1239 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
1240 ctx->is_secure = false;
Alan Kwong9487de22016-01-16 22:06:36 -05001241 }
1242
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001243 /* Update command queue write ptr */
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001244 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1245
1246 /* CDP register RD setting */
1247 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1248 mdata->enable_cdp[SDE_ROT_RD] : false;
1249 cdp_params.fmt = fmt;
1250 cdp_params.offset = ROT_SSPP_CDP_CNTL;
1251 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1252
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001253 /* QOS LUT/ Danger LUT/ Safe Lut WR setting */
1254 sde_hw_rotator_setup_qos_lut_rd(ctx);
1255
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001256 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1257
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001258 /*
1259 * Determine if traffic shaping is required. Only enable traffic
1260 * shaping when content is 4k@30fps. The actual traffic shaping
1261 * bandwidth calculation is done in output setup.
1262 */
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001263 if (((!ctx->sbuf_mode)
1264 && (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
1265 && (cfg->fps <= 30)) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001266 SDEROT_DBG("Enable Traffic Shaper\n");
1267 ctx->is_traffic_shaping = true;
1268 } else {
1269 SDEROT_DBG("Disable Traffic Shaper\n");
1270 ctx->is_traffic_shaping = false;
1271 }
1272
Alan Kwong9487de22016-01-16 22:06:36 -05001273 /* Update command queue write ptr */
1274 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1275}
1276
1277/*
1278 * sde_hw_rotator_setup_wbengine - setup writeback engine
1279 * @ctx: Pointer to rotator context
1280 * @queue_id: Priority queue identifier
1281 * @cfg: Writeback configuration
1282 * @flags: Control flag
1283 */
1284static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
1285 enum sde_rot_queue_prio queue_id,
1286 struct sde_hw_rot_wb_cfg *cfg,
1287 u32 flags)
1288{
Alan Kwong6bc64622017-02-04 17:36:03 -08001289 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001290 struct sde_mdp_format_params *fmt;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001291 struct sde_rot_cdp_params cdp_params = {0};
Alan Kwong9487de22016-01-16 22:06:36 -05001292 u32 *wrptr;
1293 u32 pack = 0;
1294 u32 dst_format = 0;
Clarence Ip77cadd12017-06-19 17:51:46 -04001295 u32 no_partial_writes = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001296 int i;
1297
1298 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1299
1300 fmt = cfg->fmt;
1301
1302 /* setup WB DST format */
1303 dst_format |= (fmt->chroma_sample << 23) |
1304 (fmt->fetch_planes << 19) |
1305 (fmt->bits[C3_ALPHA] << 6) |
1306 (fmt->bits[C2_R_Cr] << 4) |
1307 (fmt->bits[C1_B_Cb] << 2) |
1308 (fmt->bits[C0_G_Y] << 0);
1309
1310 /* alpha control */
1311 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
1312 dst_format |= BIT(8);
1313 if (!fmt->alpha_enable) {
1314 dst_format |= BIT(14);
1315 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
1316 }
1317 }
1318
1319 dst_format |= ((fmt->unpack_count - 1) << 12) |
1320 (fmt->unpack_tight << 17) |
1321 (fmt->unpack_align_msb << 18) |
1322 ((fmt->bpp - 1) << 9) |
1323 ((fmt->frame_format & 3) << 30);
1324
1325 if (sde_mdp_is_yuv_format(fmt))
1326 dst_format |= BIT(15);
1327
1328 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1329 dst_format |= BIT(21); /* PACK_DX_FORMAT */
1330
1331 /*
1332 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
1333 */
1334 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
1335
1336 /* DST_FORMAT */
1337 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
1338
1339 /* DST_OP_MODE */
1340 if (sde_mdp_is_ubwc_format(fmt))
1341 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
1342 else
1343 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1344
1345 /* DST_PACK_PATTERN */
1346 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1347 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1348 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
1349
1350 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
1351 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1352 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
1353 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
1354 (cfg->dst_plane.ystride[1] << 16));
1355 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
1356 (cfg->dst_plane.ystride[3] << 16));
1357
1358 /* setup WB out image size and ROI */
1359 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
1360 cfg->img_width | (cfg->img_height << 16));
1361 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
1362 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
1363 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
1364 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
1365
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001366 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1367 SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
Benjamin Chan15c93d82016-08-29 10:04:22 -04001368 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
1369 else
1370 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
1371
Alan Kwong9487de22016-01-16 22:06:36 -05001372 /*
1373 * setup Downscale factor
1374 */
1375 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
1376 cfg->v_downscale_factor |
1377 (cfg->h_downscale_factor << 16));
1378
Clarence Ip22fed4c2017-05-16 15:30:51 -04001379 /* partial write check */
Clarence Ip77cadd12017-06-19 17:51:46 -04001380 if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
1381 no_partial_writes = BIT(10);
1382
1383 /*
1384 * For simplicity, don't disable partial writes if
1385 * the ROI does not span the entire width of the
1386 * output image, and require the total stride to
1387 * also be properly aligned.
1388 *
1389 * This avoids having to determine the memory access
1390 * alignment of the actual horizontal ROI on a per
1391 * color format basis.
1392 */
1393 if (sde_mdp_is_ubwc_format(fmt)) {
1394 no_partial_writes = 0x0;
1395 } else if (cfg->dst_rect->x ||
1396 cfg->dst_rect->w != cfg->img_width) {
1397 no_partial_writes = 0x0;
1398 } else {
1399 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1400 if (cfg->dst_plane.ystride[i] &
1401 PARTIAL_WRITE_ALIGNMENT)
1402 no_partial_writes = 0x0;
1403 }
1404 }
Clarence Ip22fed4c2017-05-16 15:30:51 -04001405
Alan Kwong6bc64622017-02-04 17:36:03 -08001406 /* write config setup for bank configuration */
Clarence Ip77cadd12017-06-19 17:51:46 -04001407 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
Alan Kwong9487de22016-01-16 22:06:36 -05001408 (ctx->rot->highest_bank & 0x3) << 8);
1409
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001410 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1411 SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
1412 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1413 ((ctx->rot->highest_bank & 0x3) << 4) |
1414 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1415
Alan Kwong6bc64622017-02-04 17:36:03 -08001416 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
1417 SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
1418 ctx->sys_cache_mode);
1419
1420 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
1421 (flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
Alan Kwong9487de22016-01-16 22:06:36 -05001422
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001423 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1424
1425 /* CDP register WR setting */
1426 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1427 mdata->enable_cdp[SDE_ROT_WR] : false;
1428 cdp_params.fmt = fmt;
1429 cdp_params.offset = ROT_WB_CDP_CNTL;
1430 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1431
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001432 /* QOS LUT/ Danger LUT/ Safe LUT WR setting */
1433 sde_hw_rotator_setup_qos_lut_wr(ctx);
1434
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001435 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1436
Alan Kwong498d59f2017-02-11 18:56:34 -08001437 /* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001438 if (!ctx->sbuf_mode &&
1439 (ctx->is_traffic_shaping || cfg->prefill_bw)) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001440 u32 bw;
1441
1442 /*
1443 * Target to finish in 12ms, and we need to set number of bytes
1444 * per clock tick for traffic shaping.
1445 * Each clock tick run @ 19.2MHz, so we need we know total of
1446 * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
1447 * Finally, calcualte the byte count per clock tick based on
1448 * resolution, bpp and compression ratio.
1449 */
1450 bw = cfg->dst_rect->w * cfg->dst_rect->h;
1451
1452 if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
1453 bw = (bw * 3) / 2;
1454 else
1455 bw *= fmt->bpp;
1456
1457 bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
Alan Kwong498d59f2017-02-11 18:56:34 -08001458
1459 /* use prefill bandwidth instead if specified */
1460 if (cfg->prefill_bw)
1461 bw = DIV_ROUND_UP(cfg->prefill_bw,
1462 TRAFFIC_SHAPE_VSYNC_CLK);
1463
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001464 if (bw > 0xFF)
1465 bw = 0xFF;
Benjamin Chanf2f3e632017-07-12 10:21:39 -04001466 else if (bw == 0)
1467 bw = 1;
1468
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001469 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
Alan Kwong498d59f2017-02-11 18:56:34 -08001470 BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001471 SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
1472 } else {
1473 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
1474 SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
1475 }
1476
Alan Kwong9487de22016-01-16 22:06:36 -05001477 /* Update command queue write ptr */
1478 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1479}
1480
1481/*
1482 * sde_hw_rotator_start_no_regdma - start non-regdma operation
1483 * @ctx: Pointer to rotator context
1484 * @queue_id: Priority queue identifier
1485 */
1486static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
1487 enum sde_rot_queue_prio queue_id)
1488{
1489 struct sde_hw_rotator *rot = ctx->rot;
1490 u32 *wrptr;
1491 u32 *rdptr;
1492 u8 *addr;
1493 u32 mask;
1494 u32 blksize;
1495
1496 rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
1497 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1498
1499 if (rot->irq_num >= 0) {
1500 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
1501 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
1502 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001503 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001504 }
1505
Alan Kwong6bc64622017-02-04 17:36:03 -08001506 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
Alan Kwong9487de22016-01-16 22:06:36 -05001507
1508 /* Update command queue write ptr */
1509 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1510
1511 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
1512 /* Write all command stream to Rotator blocks */
1513 /* Rotator will start right away after command stream finish writing */
1514 while (rdptr < wrptr) {
1515 u32 op = REGDMA_OP_MASK & *rdptr;
1516
1517 switch (op) {
1518 case REGDMA_OP_NOP:
1519 SDEROT_DBG("NOP\n");
1520 rdptr++;
1521 break;
1522 case REGDMA_OP_REGWRITE:
1523 SDEROT_DBG("REGW %6.6x %8.8x\n",
1524 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1525 rdptr[1]);
1526 addr = rot->mdss_base +
1527 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1528 writel_relaxed(*rdptr++, addr);
1529 break;
1530 case REGDMA_OP_REGMODIFY:
1531 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
1532 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1533 rdptr[1], rdptr[2]);
1534 addr = rot->mdss_base +
1535 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1536 mask = *rdptr++;
1537 writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
1538 addr);
1539 break;
1540 case REGDMA_OP_BLKWRITE_SINGLE:
1541 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
1542 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1543 rdptr[1]);
1544 addr = rot->mdss_base +
1545 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1546 blksize = *rdptr++;
1547 while (blksize--) {
1548 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
1549 writel_relaxed(*rdptr++, addr);
1550 }
1551 break;
1552 case REGDMA_OP_BLKWRITE_INC:
1553 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
1554 rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
1555 rdptr[1]);
1556 addr = rot->mdss_base +
1557 (*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
1558 blksize = *rdptr++;
1559 while (blksize--) {
1560 SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
1561 writel_relaxed(*rdptr++, addr);
1562 addr += 4;
1563 }
1564 break;
1565 default:
1566 /* Other not supported OP mode
1567 * Skip data for now for unregonized OP mode
1568 */
1569 SDEROT_DBG("UNDEFINED\n");
1570 rdptr++;
1571 break;
1572 }
1573 }
1574 SDEROT_DBG("END %d\n", ctx->timestamp);
1575
1576 return ctx->timestamp;
1577}
1578
1579/*
1580 * sde_hw_rotator_start_regdma - start regdma operation
1581 * @ctx: Pointer to rotator context
1582 * @queue_id: Priority queue identifier
1583 */
1584static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
1585 enum sde_rot_queue_prio queue_id)
1586{
1587 struct sde_hw_rotator *rot = ctx->rot;
1588 u32 *wrptr;
1589 u32 regdmaSlot;
1590 u32 offset;
1591 long length;
1592 long ts_length;
1593 u32 enableInt;
1594 u32 swts = 0;
1595 u32 mask = 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08001596 u32 trig_sel;
Alan Kwong9487de22016-01-16 22:06:36 -05001597
1598 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1599
Alan Kwong9487de22016-01-16 22:06:36 -05001600 /*
1601 * Last ROT command must be ROT_START before REGDMA start
1602 */
Alan Kwong6bc64622017-02-04 17:36:03 -08001603 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
1604
Alan Kwong9487de22016-01-16 22:06:36 -05001605 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1606
1607 /*
1608 * Start REGDMA with command offset and size
1609 */
1610 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
1611 length = ((long)wrptr - (long)ctx->regdma_base) / 4;
1612 offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
1613 REGDMA_RAM_REGDMA_CMD_RAM));
1614 enableInt = ((ctx->timestamp & 1) + 1) << 30;
Alan Kwong6bc64622017-02-04 17:36:03 -08001615 trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
1616 REGDMA_CMD_TRIG_SEL_SW_START;
Alan Kwong9487de22016-01-16 22:06:36 -05001617
1618 SDEROT_DBG(
1619 "regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
1620 queue_id, regdmaSlot, enableInt, length, offset,
1621 ctx->timestamp);
1622
1623 /* ensure the command packet is issued before the submit command */
1624 wmb();
1625
1626 /* REGDMA submission for current context */
1627 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1628 SDE_ROTREG_WRITE(rot->mdss_base,
1629 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001630 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1631 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001632 swts = ctx->timestamp;
1633 mask = ~SDE_REGDMA_SWTS_MASK;
1634 } else {
1635 SDE_ROTREG_WRITE(rot->mdss_base,
1636 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001637 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1638 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001639 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
1640 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
1641 }
1642
Alan Kwong6bc64622017-02-04 17:36:03 -08001643 /* timestamp update can only be used in offline multi-context mode */
1644 if (!ctx->sbuf_mode) {
1645 /* Write timestamp after previous rotator job finished */
1646 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
1647 offset += length;
1648 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
1649 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
Alan Kwong9487de22016-01-16 22:06:36 -05001650
Alan Kwong6bc64622017-02-04 17:36:03 -08001651 /* ensure command packet is issue before the submit command */
1652 wmb();
Alan Kwong9487de22016-01-16 22:06:36 -05001653
Alan Kwong6bc64622017-02-04 17:36:03 -08001654 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1655 SDE_ROTREG_WRITE(rot->mdss_base,
1656 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
1657 enableInt | (ts_length << 14) | offset);
1658 } else {
1659 SDE_ROTREG_WRITE(rot->mdss_base,
1660 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
1661 enableInt | (ts_length << 14) | offset);
1662 }
Alan Kwong9487de22016-01-16 22:06:36 -05001663 }
1664
Alan Kwong9487de22016-01-16 22:06:36 -05001665 /* Update command queue write ptr */
1666 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1667
1668 return ctx->timestamp;
1669}
1670
1671/*
1672 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
1673 * @ctx: Pointer to rotator context
1674 * @queue_id: Priority queue identifier
1675 * @flags: Option flag
1676 */
1677static u32 sde_hw_rotator_wait_done_no_regdma(
1678 struct sde_hw_rotator_context *ctx,
1679 enum sde_rot_queue_prio queue_id, u32 flag)
1680{
1681 struct sde_hw_rotator *rot = ctx->rot;
1682 int rc = 0;
1683 u32 sts = 0;
1684 u32 status;
1685 unsigned long flags;
1686
1687 if (rot->irq_num >= 0) {
1688 SDEROT_DBG("Wait for Rotator completion\n");
1689 rc = wait_for_completion_timeout(&ctx->rot_comp,
Alan Kwong6bc64622017-02-04 17:36:03 -08001690 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001691
1692 spin_lock_irqsave(&rot->rotisr_lock, flags);
1693 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1694 if (rc == 0) {
1695 /*
1696 * Timeout, there might be error,
1697 * or rotator still busy
1698 */
1699 if (status & ROT_BUSY_BIT)
1700 SDEROT_ERR(
1701 "Timeout waiting for rotator done\n");
1702 else if (status & ROT_ERROR_BIT)
1703 SDEROT_ERR(
1704 "Rotator report error status\n");
1705 else
1706 SDEROT_WARN(
1707 "Timeout waiting, but rotator job is done!!\n");
1708
Alan Kwong818b7fc2016-07-24 22:07:41 -04001709 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001710 }
1711 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1712 } else {
1713 int cnt = 200;
1714
1715 do {
1716 udelay(500);
1717 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1718 cnt--;
1719 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
1720 && ((status & ROT_ERROR_BIT) == 0));
1721
1722 if (status & ROT_ERROR_BIT)
1723 SDEROT_ERR("Rotator error\n");
1724 else if (status & ROT_BUSY_BIT)
1725 SDEROT_ERR("Rotator busy\n");
1726
1727 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1728 ROT_DONE_CLEAR);
1729 }
1730
1731 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1732
1733 return sts;
1734}
1735
1736/*
1737 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
1738 * @ctx: Pointer to rotator context
1739 * @queue_id: Priority queue identifier
1740 * @flags: Option flag
1741 */
1742static u32 sde_hw_rotator_wait_done_regdma(
1743 struct sde_hw_rotator_context *ctx,
1744 enum sde_rot_queue_prio queue_id, u32 flag)
1745{
1746 struct sde_hw_rotator *rot = ctx->rot;
1747 int rc = 0;
1748 u32 status;
1749 u32 last_isr;
1750 u32 last_ts;
1751 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001752 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -05001753 u32 sts = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001754 unsigned long flags;
1755
1756 if (rot->irq_num >= 0) {
1757 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
1758 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001759 rc = wait_event_timeout(ctx->regdma_waitq,
1760 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Alan Kwong6bc64622017-02-04 17:36:03 -08001761 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001762
Benjamin Chane7ca72e2016-12-22 18:42:34 -05001763 ATRACE_INT("sde_rot_done", 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001764 spin_lock_irqsave(&rot->rotisr_lock, flags);
1765
1766 last_isr = ctx->last_regdma_isr_status;
1767 last_ts = ctx->last_regdma_timestamp;
1768 status = last_isr & REGDMA_INT_MASK;
1769 int_id = last_ts & 1;
1770 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
1771 status, int_id, last_ts);
1772
1773 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
Alan Kwong818b7fc2016-07-24 22:07:41 -04001774 bool pending;
1775
1776 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001777 SDEROT_ERR(
Alan Kwong818b7fc2016-07-24 22:07:41 -04001778 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
1779 ctx->timestamp, swts, pending);
Alan Kwong9487de22016-01-16 22:06:36 -05001780
1781 if (status & REGDMA_WATCHDOG_INT)
1782 SDEROT_ERR("REGDMA watchdog interrupt\n");
1783 else if (status & REGDMA_INVALID_DESCRIPTOR)
1784 SDEROT_ERR("REGDMA invalid descriptor\n");
1785 else if (status & REGDMA_INCOMPLETE_CMD)
1786 SDEROT_ERR("REGDMA incomplete command\n");
1787 else if (status & REGDMA_INVALID_CMD)
1788 SDEROT_ERR("REGDMA invalid command\n");
1789
Alan Kwong818b7fc2016-07-24 22:07:41 -04001790 sde_hw_rotator_dump_status(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001791 status = ROT_ERROR_BIT;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001792 } else {
1793 if (rc == 1)
1794 SDEROT_WARN(
1795 "REGDMA done but no irq, ts:0x%X/0x%X\n",
1796 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001797 status = 0;
1798 }
1799
Alan Kwong9487de22016-01-16 22:06:36 -05001800 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1801 } else {
1802 int cnt = 200;
Alan Kwongb0679602016-11-27 17:04:13 -08001803 bool pending;
Alan Kwong9487de22016-01-16 22:06:36 -05001804
1805 do {
1806 udelay(500);
Alan Kwongb0679602016-11-27 17:04:13 -08001807 last_isr = SDE_ROTREG_READ(rot->mdss_base,
1808 REGDMA_CSR_REGDMA_INT_STATUS);
1809 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001810 cnt--;
Alan Kwongb0679602016-11-27 17:04:13 -08001811 } while ((cnt > 0) && pending &&
1812 ((last_isr & REGDMA_INT_ERR_MASK) == 0));
Alan Kwong9487de22016-01-16 22:06:36 -05001813
Alan Kwongb0679602016-11-27 17:04:13 -08001814 if (last_isr & REGDMA_INT_ERR_MASK) {
1815 SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
1816 ctx->timestamp, swts, last_isr);
1817 sde_hw_rotator_dump_status(rot);
1818 status = ROT_ERROR_BIT;
1819 } else if (pending) {
1820 SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
1821 ctx->timestamp, swts, last_isr);
1822 sde_hw_rotator_dump_status(rot);
1823 status = ROT_ERROR_BIT;
1824 } else {
1825 status = 0;
1826 }
Alan Kwong9487de22016-01-16 22:06:36 -05001827
1828 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
Alan Kwongb0679602016-11-27 17:04:13 -08001829 last_isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001830 }
1831
1832 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1833
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001834 if (status & ROT_ERROR_BIT)
Benjamin Chan2d6411a2017-03-28 18:01:53 -04001835 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
1836 "vbif_dbg_bus", "panic");
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001837
Alan Kwong9487de22016-01-16 22:06:36 -05001838 return sts;
1839}
1840
1841/*
1842 * setup_rotator_ops - setup callback functions for the low-level HAL
1843 * @ops: Pointer to low-level ops callback
1844 * @mode: Operation mode (non-regdma or regdma)
1845 */
1846static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
1847 enum sde_rotator_regdma_mode mode)
1848{
1849 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
1850 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
1851 if (mode == ROT_REGDMA_ON) {
1852 ops->start_rotator = sde_hw_rotator_start_regdma;
1853 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
1854 } else {
1855 ops->start_rotator = sde_hw_rotator_start_no_regdma;
1856 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
1857 }
1858}
1859
1860/*
1861 * sde_hw_rotator_swts_create - create software timestamp buffer
1862 * @rot: Pointer to rotator hw
1863 *
1864 * This buffer is used by regdma to keep track of last completed command.
1865 */
1866static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
1867{
1868 int rc = 0;
1869 struct ion_handle *handle;
1870 struct sde_mdp_img_data *data;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001871 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001872 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
1873
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001874 rot->iclient = mdata->iclient;
Alan Kwong9487de22016-01-16 22:06:36 -05001875
1876 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
1877 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
1878 if (IS_ERR_OR_NULL(handle)) {
1879 SDEROT_ERR("ion memory allocation failed\n");
1880 return -ENOMEM;
1881 }
1882
1883 data = &rot->swts_buf;
1884 data->len = bufsize;
1885 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
1886 if (IS_ERR(data->srcp_dma_buf)) {
1887 SDEROT_ERR("ion_dma_buf setup failed\n");
1888 rc = -ENOMEM;
1889 goto imap_err;
1890 }
1891
1892 sde_smmu_ctrl(1);
1893
1894 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
1895 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
1896 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
1897 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
1898 rc = -ENOMEM;
1899 goto err_put;
1900 }
1901
1902 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
1903 DMA_BIDIRECTIONAL);
1904 if (IS_ERR_OR_NULL(data->srcp_table)) {
1905 SDEROT_ERR("dma_buf_map_attachment error\n");
1906 rc = -ENOMEM;
1907 goto err_detach;
1908 }
1909
1910 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
1911 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
1912 &data->len, DMA_BIDIRECTIONAL);
Alan Kwong6ce448d2016-11-24 18:45:20 -08001913 if (rc < 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05001914 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
1915 goto err_unmap;
1916 }
1917
Alan Kwong6ce448d2016-11-24 18:45:20 -08001918 dma_buf_begin_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05001919 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
1920 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
1921 SDEROT_ERR("ion kernel memory mapping failed\n");
1922 rc = IS_ERR(rot->swts_buffer);
1923 goto kmap_err;
1924 }
1925
1926 data->mapped = true;
1927 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
1928 data->len, rot->swts_buffer);
1929
1930 ion_free(rot->iclient, handle);
1931
1932 sde_smmu_ctrl(0);
1933
1934 return rc;
1935kmap_err:
1936 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1937 DMA_FROM_DEVICE, data->srcp_dma_buf);
1938err_unmap:
1939 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1940 DMA_FROM_DEVICE);
1941err_detach:
1942 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1943err_put:
1944 dma_buf_put(data->srcp_dma_buf);
1945 data->srcp_dma_buf = NULL;
1946imap_err:
1947 ion_free(rot->iclient, handle);
1948
1949 return rc;
1950}
1951
1952/*
1953 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
1954 * @rot: Pointer to rotator hw
1955 */
1956static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
1957{
1958 struct sde_mdp_img_data *data;
1959
1960 data = &rot->swts_buf;
1961
Alan Kwong6ce448d2016-11-24 18:45:20 -08001962 dma_buf_end_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05001963 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
1964
1965 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
1966 DMA_FROM_DEVICE, data->srcp_dma_buf);
1967 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
1968 DMA_FROM_DEVICE);
1969 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
1970 dma_buf_put(data->srcp_dma_buf);
1971 data->srcp_dma_buf = NULL;
1972}
1973
1974/*
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04001975 * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
1976 * PM event occurs
1977 * @mgr: Pointer to rotator manager
1978 * @pmon: Boolean indicate an on/off power event
1979 */
1980void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
1981{
1982 struct sde_hw_rotator *rot;
1983 u32 l_ts, h_ts, swts, hwts;
1984 u32 rotsts, regdmasts;
1985
1986 /*
1987 * Check last HW timestamp with SW timestamp before power off event.
1988 * If there is a mismatch, that will be quite possible the rotator HW
1989 * is either hang or not finishing last submitted job. In that case,
1990 * it is best to do a timeout eventlog to capture some good events
1991 * log data for analysis.
1992 */
1993 if (!pmon && mgr && mgr->hw_data) {
1994 rot = mgr->hw_data;
1995 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
1996 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
1997
1998 /* contruct the combined timstamp */
1999 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2000 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2001 SDE_REGDMA_SWTS_SHIFT);
2002
2003 /* Need to turn on clock to access rotator register */
2004 sde_rotator_clk_ctrl(mgr, true);
2005 hwts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2006 regdmasts = SDE_ROTREG_READ(rot->mdss_base,
2007 REGDMA_CSR_REGDMA_BLOCK_STATUS);
2008 rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
2009
2010 SDEROT_DBG(
2011 "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
2012 swts, hwts, regdmasts, rotsts);
2013 SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
2014
2015 if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
2016 (rotsts & ROT_STATUS_MASK))) {
2017 SDEROT_ERR(
2018 "Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
2019 swts, hwts, regdmasts, rotsts);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002020 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
2021 "vbif_dbg_bus", "panic");
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002022 }
2023
2024 /* Turn off rotator clock after checking rotator registers */
2025 sde_rotator_clk_ctrl(mgr, false);
2026 }
2027}
2028
2029/*
2030 * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
2031 * PM event occurs
2032 * @mgr: Pointer to rotator manager
2033 * @pmon: Boolean indicate an on/off power event
2034 */
2035void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
2036{
2037 struct sde_hw_rotator *rot;
2038 u32 l_ts, h_ts, swts;
2039
2040 /*
2041 * After a power on event, the rotator HW is reset to default setting.
2042 * It is necessary to synchronize the SW timestamp with the HW.
2043 */
2044 if (pmon && mgr && mgr->hw_data) {
2045 rot = mgr->hw_data;
2046 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2047 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2048
2049 /* contruct the combined timstamp */
2050 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2051 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2052 SDE_REGDMA_SWTS_SHIFT);
2053
2054 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2055 swts, h_ts, l_ts);
2056 SDEROT_EVTLOG(swts, h_ts, l_ts);
2057 rot->reset_hw_ts = true;
2058 rot->last_hw_ts = swts;
2059 }
2060}
2061
2062/*
Alan Kwong9487de22016-01-16 22:06:36 -05002063 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
2064 * @mgr: Pointer to rotator manager
2065 */
2066static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
2067{
2068 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2069 struct sde_hw_rotator *rot;
2070
2071 if (!mgr || !mgr->pdev || !mgr->hw_data) {
2072 SDEROT_ERR("null parameters\n");
2073 return;
2074 }
2075
2076 rot = mgr->hw_data;
2077 if (rot->irq_num >= 0)
2078 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2079
2080 if (rot->mode == ROT_REGDMA_ON)
2081 sde_hw_rotator_swtc_destroy(rot);
2082
2083 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2084 mgr->hw_data = NULL;
2085}
2086
2087/*
2088 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
2089 * @mgr: Pointer to rotator manager
2090 * @pipe_id: pipe identifier (not used)
2091 * @wb_id: writeback identifier/priority queue identifier
2092 *
2093 * This function allocates a new hw rotator resource for the given priority.
2094 */
2095static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
2096 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
2097{
2098 struct sde_hw_rotator_resource_info *resinfo;
2099
2100 if (!mgr || !mgr->hw_data) {
2101 SDEROT_ERR("null parameters\n");
2102 return NULL;
2103 }
2104
2105 /*
2106 * Allocate rotator resource info. Each allocation is per
2107 * HW priority queue
2108 */
2109 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
2110 if (!resinfo) {
2111 SDEROT_ERR("Failed allocation HW rotator resource info\n");
2112 return NULL;
2113 }
2114
2115 resinfo->rot = mgr->hw_data;
2116 resinfo->hw.wb_id = wb_id;
2117 atomic_set(&resinfo->hw.num_active, 0);
2118 init_waitqueue_head(&resinfo->hw.wait_queue);
2119
2120 /* For non-regdma, only support one active session */
2121 if (resinfo->rot->mode == ROT_REGDMA_OFF)
2122 resinfo->hw.max_active = 1;
2123 else {
2124 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
2125
2126 if (resinfo->rot->iclient == NULL)
2127 sde_hw_rotator_swts_create(resinfo->rot);
2128 }
2129
Alan Kwongf987ea32016-07-06 12:11:44 -04002130 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002131 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002132
Alan Kwong9487de22016-01-16 22:06:36 -05002133 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
2134 resinfo, wb_id);
2135
2136 return &resinfo->hw;
2137}
2138
2139/*
2140 * sde_hw_rotator_free_ext - free the given rotator resource
2141 * @mgr: Pointer to rotator manager
2142 * @hw: Pointer to rotator resource
2143 */
2144static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
2145 struct sde_rot_hw_resource *hw)
2146{
2147 struct sde_hw_rotator_resource_info *resinfo;
2148
2149 if (!mgr || !mgr->hw_data)
2150 return;
2151
2152 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2153
2154 SDEROT_DBG(
2155 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
2156 resinfo, hw->wb_id, atomic_read(&hw->num_active),
2157 hw->pending_count);
2158
Alan Kwongf987ea32016-07-06 12:11:44 -04002159 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002160 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002161
Alan Kwong9487de22016-01-16 22:06:36 -05002162 devm_kfree(&mgr->pdev->dev, resinfo);
2163}
2164
2165/*
2166 * sde_hw_rotator_alloc_rotctx - allocate rotator context
2167 * @rot: Pointer to rotator hw
2168 * @hw: Pointer to rotator resource
2169 * @session_id: Session identifier of this context
Clarence Ip9e6c3302017-06-02 11:02:57 -04002170 * @sequence_id: Sequence identifier of this request
Alan Kwong6bc64622017-02-04 17:36:03 -08002171 * @sbuf_mode: true if stream buffer is requested
Alan Kwong9487de22016-01-16 22:06:36 -05002172 *
2173 * This function allocates a new rotator context for the given session id.
2174 */
2175static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
2176 struct sde_hw_rotator *rot,
2177 struct sde_rot_hw_resource *hw,
Alan Kwong6bc64622017-02-04 17:36:03 -08002178 u32 session_id,
Clarence Ip9e6c3302017-06-02 11:02:57 -04002179 u32 sequence_id,
Alan Kwong6bc64622017-02-04 17:36:03 -08002180 bool sbuf_mode)
Alan Kwong9487de22016-01-16 22:06:36 -05002181{
2182 struct sde_hw_rotator_context *ctx;
2183
2184 /* Allocate rotator context */
2185 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
2186 if (!ctx) {
2187 SDEROT_ERR("Failed allocation HW rotator context\n");
2188 return NULL;
2189 }
2190
2191 ctx->rot = rot;
2192 ctx->q_id = hw->wb_id;
2193 ctx->session_id = session_id;
Clarence Ip9e6c3302017-06-02 11:02:57 -04002194 ctx->sequence_id = sequence_id;
Alan Kwong9487de22016-01-16 22:06:36 -05002195 ctx->hwres = hw;
2196 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
2197 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
2198 ctx->is_secure = false;
Alan Kwong6bc64622017-02-04 17:36:03 -08002199 ctx->sbuf_mode = sbuf_mode;
2200 INIT_LIST_HEAD(&ctx->list);
Alan Kwong9487de22016-01-16 22:06:36 -05002201
2202 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
2203 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
2204 ctx->regdma_wrptr = ctx->regdma_base;
2205 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
2206 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
2207 sde_hw_rotator_get_regdma_ctxidx(ctx));
2208
Alan Kwong818b7fc2016-07-24 22:07:41 -04002209 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
2210
Alan Kwong9487de22016-01-16 22:06:36 -05002211 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002212 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002213
2214 /* Store rotator context for lookup purpose */
2215 sde_hw_rotator_put_ctx(ctx);
2216
2217 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002218 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002219 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2220 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002221 atomic_read(&ctx->hwres->num_active),
2222 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002223
2224 return ctx;
2225}
2226
2227/*
2228 * sde_hw_rotator_free_rotctx - free the given rotator context
2229 * @rot: Pointer to rotator hw
2230 * @ctx: Pointer to rotator context
2231 */
2232static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
2233 struct sde_hw_rotator_context *ctx)
2234{
2235 if (!rot || !ctx)
2236 return;
2237
2238 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002239 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002240 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2241 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002242 atomic_read(&ctx->hwres->num_active),
2243 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002244
Benjamin Chanc3e185f2016-11-08 21:48:21 -05002245 /* Clear rotator context from lookup purpose */
2246 sde_hw_rotator_clr_ctx(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05002247
2248 devm_kfree(&rot->pdev->dev, ctx);
2249}
2250
2251/*
2252 * sde_hw_rotator_config - configure hw for the given rotation entry
2253 * @hw: Pointer to rotator resource
2254 * @entry: Pointer to rotation entry
2255 *
2256 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
2257 * based on the given rotation entry.
2258 */
2259static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
2260 struct sde_rot_entry *entry)
2261{
2262 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2263 struct sde_hw_rotator *rot;
2264 struct sde_hw_rotator_resource_info *resinfo;
2265 struct sde_hw_rotator_context *ctx;
2266 struct sde_hw_rot_sspp_cfg sspp_cfg;
2267 struct sde_hw_rot_wb_cfg wb_cfg;
2268 u32 danger_lut = 0; /* applicable for realtime client only */
2269 u32 safe_lut = 0; /* applicable for realtime client only */
2270 u32 flags = 0;
Benjamin Chana9dd3052017-02-14 17:39:32 -05002271 u32 rststs = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05002272 struct sde_rotation_item *item;
Alan Kwong6bc64622017-02-04 17:36:03 -08002273 int ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002274
2275 if (!hw || !entry) {
2276 SDEROT_ERR("null hw resource/entry\n");
2277 return -EINVAL;
2278 }
2279
2280 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2281 rot = resinfo->rot;
2282 item = &entry->item;
2283
Alan Kwong6bc64622017-02-04 17:36:03 -08002284 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
Clarence Ip9e6c3302017-06-02 11:02:57 -04002285 item->sequence_id, item->output.sbuf);
Alan Kwong9487de22016-01-16 22:06:36 -05002286 if (!ctx) {
2287 SDEROT_ERR("Failed allocating rotator context!!\n");
2288 return -EINVAL;
2289 }
2290
Alan Kwong6bc64622017-02-04 17:36:03 -08002291 /* save entry for debugging purposes */
2292 ctx->last_entry = entry;
2293
2294 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
2295 if (entry->dst_buf.sbuf) {
2296 u32 op_mode;
2297
2298 if (entry->item.trigger ==
2299 SDE_ROTATOR_TRIGGER_COMMAND)
2300 ctx->start_ctrl = (rot->cmd_trigger << 4);
2301 else if (entry->item.trigger ==
2302 SDE_ROTATOR_TRIGGER_VIDEO)
2303 ctx->start_ctrl = (rot->vid_trigger << 4);
2304 else
2305 ctx->start_ctrl = 0;
2306
2307 ctx->sys_cache_mode = BIT(15) |
2308 ((item->output.scid & 0x1f) << 8) |
2309 (item->output.writeback ? 0x5 : 0);
2310
2311 ctx->op_mode = BIT(4) |
2312 ((ctx->rot->sbuf_headroom & 0xff) << 8);
2313
2314 /* detect transition to inline mode */
2315 op_mode = (SDE_ROTREG_READ(rot->mdss_base,
2316 ROTTOP_OP_MODE) >> 4) & 0x3;
2317 if (!op_mode) {
2318 u32 status;
2319
2320 status = SDE_ROTREG_READ(rot->mdss_base,
2321 ROTTOP_STATUS);
2322 if (status & BIT(0)) {
2323 SDEROT_ERR("rotator busy 0x%x\n",
2324 status);
2325 sde_hw_rotator_dump_status(rot);
2326 SDEROT_EVTLOG_TOUT_HANDLER("rot",
2327 "vbif_dbg_bus",
2328 "panic");
2329 }
2330 }
2331
2332 } else {
2333 ctx->start_ctrl = BIT(0);
2334 ctx->sys_cache_mode = 0;
2335 ctx->op_mode = 0;
2336 }
2337 } else {
2338 ctx->start_ctrl = BIT(0);
2339 }
2340
2341 SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
2342
Benjamin Chana9dd3052017-02-14 17:39:32 -05002343 /*
2344 * if Rotator HW is reset, but missing PM event notification, we
2345 * need to init the SW timestamp automatically.
2346 */
2347 rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
2348 if (!rot->reset_hw_ts && rststs) {
2349 u32 l_ts, h_ts, swts;
2350
2351 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2352 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2353 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2354 SDEROT_EVTLOG(0xbad0, rststs, swts, h_ts, l_ts);
2355
2356 if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY)
2357 h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
2358 else
2359 l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
2360
2361 /* construct the combined timstamp */
2362 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2363 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2364 SDE_REGDMA_SWTS_SHIFT);
2365
2366 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2367 swts, h_ts, l_ts);
2368 SDEROT_EVTLOG(0x900d, swts, h_ts, l_ts);
2369 rot->last_hw_ts = swts;
2370
2371 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2372 rot->last_hw_ts);
2373 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
2374 /* ensure write is issued to the rotator HW */
2375 wmb();
2376 }
2377
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002378 if (rot->reset_hw_ts) {
2379 SDEROT_EVTLOG(rot->last_hw_ts);
2380 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2381 rot->last_hw_ts);
Benjamin Chana9dd3052017-02-14 17:39:32 -05002382 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002383 /* ensure write is issued to the rotator HW */
2384 wmb();
2385 rot->reset_hw_ts = false;
2386 }
2387
Alan Kwong9487de22016-01-16 22:06:36 -05002388 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
2389 SDE_ROT_FLAG_FLIP_LR : 0;
2390 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
2391 SDE_ROT_FLAG_FLIP_UD : 0;
2392 flags |= (item->flags & SDE_ROTATION_90) ?
2393 SDE_ROT_FLAG_ROT_90 : 0;
2394 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
2395 SDE_ROT_FLAG_DEINTERLACE : 0;
2396 flags |= (item->flags & SDE_ROTATION_SECURE) ?
2397 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002398 flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
2399 SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
2400
Alan Kwong9487de22016-01-16 22:06:36 -05002401
2402 sspp_cfg.img_width = item->input.width;
2403 sspp_cfg.img_height = item->input.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002404 sspp_cfg.fps = entry->perf->config.frame_rate;
2405 sspp_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002406 sspp_cfg.fmt = sde_get_format_params(item->input.format);
2407 if (!sspp_cfg.fmt) {
2408 SDEROT_ERR("null format\n");
Alan Kwong6bc64622017-02-04 17:36:03 -08002409 ret = -EINVAL;
2410 goto error;
Alan Kwong9487de22016-01-16 22:06:36 -05002411 }
2412 sspp_cfg.src_rect = &item->src_rect;
2413 sspp_cfg.data = &entry->src_buf;
2414 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
2415 item->input.height, &sspp_cfg.src_plane,
2416 0, /* No bwc_mode */
2417 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
2418 true : false);
2419
2420 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002421 &sspp_cfg, danger_lut, safe_lut,
2422 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05002423
2424 wb_cfg.img_width = item->output.width;
2425 wb_cfg.img_height = item->output.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002426 wb_cfg.fps = entry->perf->config.frame_rate;
2427 wb_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002428 wb_cfg.fmt = sde_get_format_params(item->output.format);
2429 wb_cfg.dst_rect = &item->dst_rect;
2430 wb_cfg.data = &entry->dst_buf;
2431 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
2432 item->output.height, &wb_cfg.dst_plane,
2433 0, /* No bwc_mode */
2434 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
2435
2436 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
2437 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
Alan Kwong498d59f2017-02-11 18:56:34 -08002438 wb_cfg.prefill_bw = item->prefill_bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002439
2440 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
2441
2442 /* setup VA mapping for debugfs */
2443 if (rot->dbgmem) {
2444 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
2445 &item->input,
2446 &entry->src_buf);
2447
2448 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
2449 &item->output,
2450 &entry->dst_buf);
2451 }
2452
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002453 SDEROT_EVTLOG(ctx->timestamp, flags,
2454 item->input.width, item->input.height,
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002455 item->output.width, item->output.height,
Benjamin Chan59a06052017-01-12 18:06:03 -05002456 entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
Benjamin Chan1b94f952017-01-23 17:42:30 -05002457 item->input.format, item->output.format,
2458 entry->perf->config.frame_rate);
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002459
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002460 if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002461 struct sde_mdp_set_ot_params ot_params;
2462
2463 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2464 ot_params.xin_id = XIN_SSPP;
2465 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002466 ot_params.width = entry->perf->config.input.width;
2467 ot_params.height = entry->perf->config.input.height;
2468 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002469 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
2470 ot_params.reg_off_mdp_clk_ctrl =
2471 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2472 ot_params.bit_off_mdp_clk_ctrl =
2473 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002474 ot_params.fmt = ctx->is_traffic_shaping ?
2475 SDE_PIX_FMT_ABGR_8888 :
2476 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002477 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2478 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002479 sde_mdp_set_ot_limit(&ot_params);
2480 }
2481
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002482 if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002483 struct sde_mdp_set_ot_params ot_params;
2484
2485 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2486 ot_params.xin_id = XIN_WRITEBACK;
2487 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002488 ot_params.width = entry->perf->config.input.width;
2489 ot_params.height = entry->perf->config.input.height;
2490 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002491 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
2492 ot_params.reg_off_mdp_clk_ctrl =
2493 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2494 ot_params.bit_off_mdp_clk_ctrl =
2495 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002496 ot_params.fmt = ctx->is_traffic_shaping ?
2497 SDE_PIX_FMT_ABGR_8888 :
2498 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002499 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2500 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002501 sde_mdp_set_ot_limit(&ot_params);
2502 }
2503
2504 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
2505 u32 qos_lut = 0; /* low priority for nrt read client */
2506
2507 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
2508 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
2509
2510 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
2511 }
2512
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -07002513 /* VBIF QoS and other settings */
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002514 if (!ctx->sbuf_mode)
2515 sde_hw_rotator_vbif_setting(rot);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002516
Alan Kwong9487de22016-01-16 22:06:36 -05002517 return 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08002518
2519error:
2520 sde_hw_rotator_free_rotctx(rot, ctx);
2521 return ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002522}
2523
2524/*
2525 * sde_hw_rotator_kickoff - kickoff processing on the given entry
2526 * @hw: Pointer to rotator resource
2527 * @entry: Pointer to rotation entry
2528 */
2529static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
2530 struct sde_rot_entry *entry)
2531{
2532 struct sde_hw_rotator *rot;
2533 struct sde_hw_rotator_resource_info *resinfo;
2534 struct sde_hw_rotator_context *ctx;
Alan Kwong9487de22016-01-16 22:06:36 -05002535
2536 if (!hw || !entry) {
2537 SDEROT_ERR("null hw resource/entry\n");
2538 return -EINVAL;
2539 }
2540
2541 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2542 rot = resinfo->rot;
2543
2544 /* Lookup rotator context from session-id */
Clarence Ip9e6c3302017-06-02 11:02:57 -04002545 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2546 entry->item.sequence_id, hw->wb_id);
Alan Kwong9487de22016-01-16 22:06:36 -05002547 if (!ctx) {
2548 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2549 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002550 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002551 }
Alan Kwong9487de22016-01-16 22:06:36 -05002552
Alan Kwong9487de22016-01-16 22:06:36 -05002553 rot->ops.start_rotator(ctx, ctx->q_id);
2554
2555 return 0;
2556}
2557
2558/*
2559 * sde_hw_rotator_wait4done - wait for completion notification
2560 * @hw: Pointer to rotator resource
2561 * @entry: Pointer to rotation entry
2562 *
2563 * This function blocks until the given entry is complete, error
2564 * is detected, or timeout.
2565 */
2566static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
2567 struct sde_rot_entry *entry)
2568{
2569 struct sde_hw_rotator *rot;
2570 struct sde_hw_rotator_resource_info *resinfo;
2571 struct sde_hw_rotator_context *ctx;
2572 int ret;
2573
2574 if (!hw || !entry) {
2575 SDEROT_ERR("null hw resource/entry\n");
2576 return -EINVAL;
2577 }
2578
2579 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2580 rot = resinfo->rot;
2581
2582 /* Lookup rotator context from session-id */
Clarence Ip9e6c3302017-06-02 11:02:57 -04002583 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2584 entry->item.sequence_id, hw->wb_id);
Alan Kwong9487de22016-01-16 22:06:36 -05002585 if (!ctx) {
2586 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2587 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002588 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002589 }
Alan Kwong9487de22016-01-16 22:06:36 -05002590
2591 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
2592
Alan Kwong9487de22016-01-16 22:06:36 -05002593 if (rot->dbgmem) {
2594 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
2595 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
2596 }
2597
2598 /* Current rotator context job is finished, time to free up*/
2599 sde_hw_rotator_free_rotctx(rot, ctx);
2600
2601 return ret;
2602}
2603
2604/*
2605 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
2606 * @rot: Pointer to hw rotator
2607 *
2608 * This function initializes feature and/or capability bitmask based on
2609 * h/w version read from the device.
2610 */
2611static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
2612{
2613 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2614 u32 hw_version;
2615
2616 if (!mdata) {
2617 SDEROT_ERR("null rotator data\n");
2618 return -EINVAL;
2619 }
2620
2621 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
2622 SDEROT_DBG("hw version %8.8x\n", hw_version);
2623
2624 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
2625 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
Alan Kwong9487de22016-01-16 22:06:36 -05002626 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
2627 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
2628 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
2629
2630 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
2631
Alan Kwong6bc64622017-02-04 17:36:03 -08002632 /* features exposed via rotator top h/w version */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002633 if (hw_version != SDE_ROT_TYPE_V1_0) {
2634 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
2635 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
2636 }
2637
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002638 set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
2639
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002640 mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
2641 mdata->nrt_vbif_dbg_bus_size =
2642 ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
2643
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002644 mdata->rot_dbg_bus = rot_dbgbus_r3;
2645 mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
2646
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002647 mdata->regdump = sde_rot_r3_regdump;
2648 mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002649 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
Alan Kwong6bc64622017-02-04 17:36:03 -08002650
2651 /* features exposed via mdss h/w version */
2652 if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400)) {
2653 SDEROT_DBG("Supporting sys cache inline rotation\n");
2654 set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
Alan Kwongfb8eeb22017-02-06 15:00:03 -08002655 set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
Clarence Ip22fed4c2017-05-16 15:30:51 -04002656 set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
Alan Kwong4b416162017-08-11 21:03:10 -04002657 rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
2658 sde_hw_rotator_v4_inpixfmts;
2659 rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
2660 ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
2661 rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
2662 sde_hw_rotator_v4_outpixfmts;
2663 rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
2664 ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
2665 rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
2666 sde_hw_rotator_v4_inpixfmts_sbuf;
2667 rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
2668 ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
2669 rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
2670 sde_hw_rotator_v4_outpixfmts_sbuf;
2671 rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
2672 ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
Alan Kwong6bc64622017-02-04 17:36:03 -08002673 rot->downscale_caps =
2674 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
2675 } else {
Alan Kwong4b416162017-08-11 21:03:10 -04002676 rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
2677 sde_hw_rotator_v3_inpixfmts;
2678 rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
2679 ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
2680 rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
2681 sde_hw_rotator_v3_outpixfmts;
2682 rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
2683 ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
Alan Kwong6bc64622017-02-04 17:36:03 -08002684 rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
2685 "LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
2686 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
2687 }
2688
Alan Kwong9487de22016-01-16 22:06:36 -05002689 return 0;
2690}
2691
2692/*
2693 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
2694 * @irq: Interrupt number
2695 * @ptr: Pointer to private handle provided during registration
2696 *
2697 * This function services rotator interrupt and wakes up waiting client
2698 * with pending rotation requests already submitted to h/w.
2699 */
2700static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
2701{
2702 struct sde_hw_rotator *rot = ptr;
2703 struct sde_hw_rotator_context *ctx;
2704 irqreturn_t ret = IRQ_NONE;
2705 u32 isr;
2706
2707 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
2708
2709 SDEROT_DBG("intr_status = %8.8x\n", isr);
2710
2711 if (isr & ROT_DONE_MASK) {
2712 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002713 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05002714 SDEROT_DBG("Notify rotator complete\n");
2715
2716 /* Normal rotator only 1 session, no need to lookup */
2717 ctx = rot->rotCtx[0][0];
2718 WARN_ON(ctx == NULL);
2719 complete_all(&ctx->rot_comp);
2720
2721 spin_lock(&rot->rotisr_lock);
2722 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
2723 ROT_DONE_CLEAR);
2724 spin_unlock(&rot->rotisr_lock);
2725 ret = IRQ_HANDLED;
2726 }
2727
2728 return ret;
2729}
2730
2731/*
2732 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
2733 * @irq: Interrupt number
2734 * @ptr: Pointer to private handle provided during registration
2735 *
2736 * This function services rotator interrupt, decoding the source of
2737 * events (high/low priority queue), and wakes up all waiting clients
2738 * with pending rotation requests already submitted to h/w.
2739 */
2740static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
2741{
2742 struct sde_hw_rotator *rot = ptr;
Clarence Ip96854c2db12017-06-12 14:32:26 -04002743 struct sde_hw_rotator_context *ctx, *tmp;
Alan Kwong9487de22016-01-16 22:06:36 -05002744 irqreturn_t ret = IRQ_NONE;
Clarence Ip96854c2db12017-06-12 14:32:26 -04002745 u32 isr, isr_tmp;
Alan Kwong9487de22016-01-16 22:06:36 -05002746 u32 ts;
2747 u32 q_id;
2748
2749 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002750 /* acknowledge interrupt before reading latest timestamp */
2751 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05002752 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2753
2754 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
2755
2756 /* Any REGDMA status, including error and watchdog timer, should
2757 * trigger and wake up waiting thread
2758 */
2759 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
2760 spin_lock(&rot->rotisr_lock);
2761
2762 /*
2763 * Obtain rotator context based on timestamp from regdma
2764 * and low/high interrupt status
2765 */
2766 if (isr & REGDMA_INT_HIGH_MASK) {
2767 q_id = ROT_QUEUE_HIGH_PRIORITY;
2768 ts = ts & SDE_REGDMA_SWTS_MASK;
2769 } else if (isr & REGDMA_INT_LOW_MASK) {
2770 q_id = ROT_QUEUE_LOW_PRIORITY;
2771 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
2772 SDE_REGDMA_SWTS_MASK;
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002773 } else {
2774 SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
2775 goto done_isr_handle;
Alan Kwong9487de22016-01-16 22:06:36 -05002776 }
Alan Kwong6bc64622017-02-04 17:36:03 -08002777
2778 /*
2779 * Timestamp packet is not available in sbuf mode.
2780 * Simulate timestamp update in the handler instead.
2781 */
Clarence Ip96854c2db12017-06-12 14:32:26 -04002782 if (list_empty(&rot->sbuf_ctx[q_id]))
2783 goto skip_sbuf;
2784
2785 ctx = NULL;
2786 isr_tmp = isr;
2787 list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) {
2788 u32 mask;
2789
2790 mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK :
2791 REGDMA_INT_0_MASK;
2792 if (isr_tmp & mask) {
2793 isr_tmp &= ~mask;
2794 ctx = tmp;
Alan Kwong6bc64622017-02-04 17:36:03 -08002795 ts = ctx->timestamp;
2796 sde_hw_rotator_update_swts(rot, ctx, ts);
2797 SDEROT_DBG("update swts:0x%X\n", ts);
Alan Kwong6bc64622017-02-04 17:36:03 -08002798 }
Clarence Ip96854c2db12017-06-12 14:32:26 -04002799 SDEROT_EVTLOG(isr, tmp->timestamp);
Alan Kwong6bc64622017-02-04 17:36:03 -08002800 }
Clarence Ip96854c2db12017-06-12 14:32:26 -04002801 if (ctx == NULL)
2802 SDEROT_ERR("invalid swts ctx\n");
2803skip_sbuf:
Alan Kwong9487de22016-01-16 22:06:36 -05002804 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05002805
2806 /*
2807 * Wake up all waiting context from the current and previous
2808 * SW Timestamp.
2809 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04002810 while (ctx &&
2811 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05002812 ctx->last_regdma_isr_status = isr;
2813 ctx->last_regdma_timestamp = ts;
2814 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04002815 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002816 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002817
2818 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
2819 ctx = rot->rotCtx[q_id]
2820 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04002821 };
Alan Kwong9487de22016-01-16 22:06:36 -05002822
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002823done_isr_handle:
Alan Kwong9487de22016-01-16 22:06:36 -05002824 spin_unlock(&rot->rotisr_lock);
2825 ret = IRQ_HANDLED;
2826 } else if (isr & REGDMA_INT_ERR_MASK) {
2827 /*
2828 * For REGDMA Err, we save the isr info and wake up
2829 * all waiting contexts
2830 */
2831 int i, j;
2832
2833 SDEROT_ERR(
2834 "regdma err isr:%X, wake up all waiting contexts\n",
2835 isr);
2836
2837 spin_lock(&rot->rotisr_lock);
2838
2839 for (i = 0; i < ROT_QUEUE_MAX; i++) {
2840 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
2841 ctx = rot->rotCtx[i][j];
2842 if (ctx && ctx->last_regdma_isr_status == 0) {
2843 ctx->last_regdma_isr_status = isr;
2844 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04002845 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002846 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
2847 i, j, ctx);
2848 }
2849 }
2850 }
2851
Alan Kwong9487de22016-01-16 22:06:36 -05002852 spin_unlock(&rot->rotisr_lock);
2853 ret = IRQ_HANDLED;
2854 }
2855
2856 return ret;
2857}
2858
2859/*
2860 * sde_hw_rotator_validate_entry - validate rotation entry
2861 * @mgr: Pointer to rotator manager
2862 * @entry: Pointer to rotation entry
2863 *
2864 * This function validates the given rotation entry and provides possible
2865 * fixup (future improvement) if available. This function returns 0 if
2866 * the entry is valid, and returns error code otherwise.
2867 */
2868static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
2869 struct sde_rot_entry *entry)
2870{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002871 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwongb6c049c2017-03-31 12:50:27 -07002872 struct sde_hw_rotator *hw_data;
Alan Kwong9487de22016-01-16 22:06:36 -05002873 int ret = 0;
2874 u16 src_w, src_h, dst_w, dst_h;
2875 struct sde_rotation_item *item = &entry->item;
2876 struct sde_mdp_format_params *fmt;
2877
Alan Kwongb6c049c2017-03-31 12:50:27 -07002878 if (!mgr || !entry || !mgr->hw_data) {
2879 SDEROT_ERR("invalid parameters\n");
2880 return -EINVAL;
2881 }
2882
2883 hw_data = mgr->hw_data;
2884
2885 if (hw_data->maxlinewidth < item->src_rect.w) {
2886 SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
2887 return -EINVAL;
2888 }
2889
Alan Kwong9487de22016-01-16 22:06:36 -05002890 src_w = item->src_rect.w;
2891 src_h = item->src_rect.h;
2892
2893 if (item->flags & SDE_ROTATION_90) {
2894 dst_w = item->dst_rect.h;
2895 dst_h = item->dst_rect.w;
2896 } else {
2897 dst_w = item->dst_rect.w;
2898 dst_h = item->dst_rect.h;
2899 }
2900
2901 entry->dnsc_factor_w = 0;
2902 entry->dnsc_factor_h = 0;
2903
Alan Kwong6bc64622017-02-04 17:36:03 -08002904 if (item->output.sbuf &&
2905 !test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
2906 SDEROT_ERR("stream buffer not supported\n");
2907 return -EINVAL;
2908 }
2909
Alan Kwong9487de22016-01-16 22:06:36 -05002910 if ((src_w != dst_w) || (src_h != dst_h)) {
Clarence Ip4db1ea82017-05-01 12:18:55 -07002911 if (!dst_w || !dst_h) {
2912 SDEROT_DBG("zero output width/height not support\n");
2913 ret = -EINVAL;
2914 goto dnsc_err;
2915 }
Alan Kwong9487de22016-01-16 22:06:36 -05002916 if ((src_w % dst_w) || (src_h % dst_h)) {
2917 SDEROT_DBG("non integral scale not support\n");
2918 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002919 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05002920 }
2921 entry->dnsc_factor_w = src_w / dst_w;
2922 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
2923 (entry->dnsc_factor_w > 64)) {
2924 SDEROT_DBG("non power-of-2 w_scale not support\n");
2925 ret = -EINVAL;
2926 goto dnsc_err;
2927 }
2928 entry->dnsc_factor_h = src_h / dst_h;
2929 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
2930 (entry->dnsc_factor_h > 64)) {
2931 SDEROT_DBG("non power-of-2 h_scale not support\n");
2932 ret = -EINVAL;
2933 goto dnsc_err;
2934 }
2935 }
2936
Benjamin Chan0e96afd2017-01-17 16:49:12 -05002937 fmt = sde_get_format_params(item->output.format);
Benjamin Chan886ff672016-11-07 15:23:17 -05002938 /*
2939 * Rotator downscale support max 4 times for UBWC format and
2940 * max 2 times for TP10/TP10_UBWC format
2941 */
2942 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
2943 SDEROT_DBG("max downscale for UBWC format is 4\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002944 ret = -EINVAL;
2945 goto dnsc_err;
2946 }
Benjamin Chan886ff672016-11-07 15:23:17 -05002947 if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
2948 SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
Alan Kwong9487de22016-01-16 22:06:36 -05002949 ret = -EINVAL;
2950 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002951 goto dnsc_err;
2952
2953dnsc_1p5_check:
2954 /* Check for 1.5 downscale that only applies to V2 HW */
2955 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
2956 entry->dnsc_factor_w = src_w / dst_w;
2957 if ((entry->dnsc_factor_w != 1) ||
2958 ((dst_w * 3) != (src_w * 2))) {
2959 SDEROT_DBG(
2960 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
2961 src_w, dst_w);
2962 ret = -EINVAL;
2963 goto dnsc_err;
2964 }
2965
2966 entry->dnsc_factor_h = src_h / dst_h;
2967 if ((entry->dnsc_factor_h != 1) ||
2968 ((dst_h * 3) != (src_h * 2))) {
2969 SDEROT_DBG(
2970 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
2971 src_h, dst_h);
2972 ret = -EINVAL;
2973 goto dnsc_err;
2974 }
2975 ret = 0;
2976 }
Alan Kwong9487de22016-01-16 22:06:36 -05002977
2978dnsc_err:
2979 /* Downscaler does not support asymmetrical dnsc */
2980 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
2981 SDEROT_DBG("asymmetric downscale not support\n");
2982 ret = -EINVAL;
2983 }
2984
2985 if (ret) {
2986 entry->dnsc_factor_w = 0;
2987 entry->dnsc_factor_h = 0;
2988 }
2989 return ret;
2990}
2991
2992/*
2993 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
2994 * @mgr: Pointer to rotator manager
2995 * @attr: Pointer to device attribute interface
2996 * @buf: Pointer to output buffer
2997 * @len: Length of output buffer
2998 */
2999static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
3000 struct device_attribute *attr, char *buf, ssize_t len)
3001{
3002 struct sde_hw_rotator *hw_data;
Benjamin Chan886ff672016-11-07 15:23:17 -05003003 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05003004 int cnt = 0;
3005
3006 if (!mgr || !buf)
3007 return 0;
3008
3009 hw_data = mgr->hw_data;
3010
3011#define SPRINT(fmt, ...) \
3012 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
3013
3014 /* insert capabilities here */
Benjamin Chan886ff672016-11-07 15:23:17 -05003015 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
3016 SPRINT("min_downscale=1.5\n");
3017 else
3018 SPRINT("min_downscale=2.0\n");
Alan Kwong9487de22016-01-16 22:06:36 -05003019
Benjamin Chan42db2c92016-11-22 22:50:01 -05003020 SPRINT("downscale_compression=1\n");
3021
Alan Kwong6bc64622017-02-04 17:36:03 -08003022 if (hw_data->downscale_caps)
3023 SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
3024
Alan Kwong9487de22016-01-16 22:06:36 -05003025#undef SPRINT
3026 return cnt;
3027}
3028
3029/*
3030 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
3031 * @mgr: Pointer to rotator manager
3032 * @attr: Pointer to device attribute interface
3033 * @buf: Pointer to output buffer
3034 * @len: Length of output buffer
3035 */
3036static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
3037 struct device_attribute *attr, char *buf, ssize_t len)
3038{
3039 struct sde_hw_rotator *rot;
3040 struct sde_hw_rotator_context *ctx;
3041 int cnt = 0;
3042 int num_active = 0;
3043 int i, j;
3044
3045 if (!mgr || !buf) {
3046 SDEROT_ERR("null parameters\n");
3047 return 0;
3048 }
3049
3050 rot = mgr->hw_data;
3051
3052#define SPRINT(fmt, ...) \
3053 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
3054
3055 if (rot) {
3056 SPRINT("rot_mode=%d\n", rot->mode);
3057 SPRINT("irq_num=%d\n", rot->irq_num);
3058
3059 if (rot->mode == ROT_REGDMA_OFF) {
3060 SPRINT("max_active=1\n");
3061 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
3062 } else {
3063 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3064 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
3065 j++) {
3066 ctx = rot->rotCtx[i][j];
3067
3068 if (ctx) {
3069 SPRINT(
3070 "rotCtx[%d][%d]:%p\n",
3071 i, j, ctx);
3072 ++num_active;
3073 }
3074 }
3075 }
3076
3077 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
3078 SPRINT("num_active=%d\n", num_active);
3079 }
3080 }
3081
3082#undef SPRINT
3083 return cnt;
3084}
3085
3086/*
Alan Kwongda16e442016-08-14 20:47:18 -04003087 * sde_hw_rotator_get_pixfmt - get the indexed pixel format
3088 * @mgr: Pointer to rotator manager
3089 * @index: index of pixel format
3090 * @input: true for input port; false for output port
Alan Kwong4b416162017-08-11 21:03:10 -04003091 * @mode: operating mode
Alan Kwongda16e442016-08-14 20:47:18 -04003092 */
3093static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
Alan Kwong4b416162017-08-11 21:03:10 -04003094 int index, bool input, u32 mode)
Alan Kwongda16e442016-08-14 20:47:18 -04003095{
Alan Kwong6bc64622017-02-04 17:36:03 -08003096 struct sde_hw_rotator *rot;
3097
3098 if (!mgr || !mgr->hw_data) {
3099 SDEROT_ERR("null parameters\n");
3100 return 0;
3101 }
3102
3103 rot = mgr->hw_data;
3104
Alan Kwong4b416162017-08-11 21:03:10 -04003105 if (mode >= SDE_ROTATOR_MODE_MAX) {
3106 SDEROT_ERR("invalid rotator mode %d\n", mode);
3107 return 0;
3108 }
3109
Alan Kwongda16e442016-08-14 20:47:18 -04003110 if (input) {
Alan Kwong4b416162017-08-11 21:03:10 -04003111 if ((index < rot->num_inpixfmt[mode]) && rot->inpixfmts[mode])
3112 return rot->inpixfmts[mode][index];
Alan Kwongda16e442016-08-14 20:47:18 -04003113 else
3114 return 0;
3115 } else {
Alan Kwong4b416162017-08-11 21:03:10 -04003116 if ((index < rot->num_outpixfmt[mode]) && rot->outpixfmts[mode])
3117 return rot->outpixfmts[mode][index];
Alan Kwongda16e442016-08-14 20:47:18 -04003118 else
3119 return 0;
3120 }
3121}
3122
3123/*
3124 * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
3125 * @mgr: Pointer to rotator manager
3126 * @pixfmt: pixel format to be verified
3127 * @input: true for input port; false for output port
Alan Kwong4b416162017-08-11 21:03:10 -04003128 * @mode: operating mode
Alan Kwongda16e442016-08-14 20:47:18 -04003129 */
3130static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
Alan Kwong4b416162017-08-11 21:03:10 -04003131 bool input, u32 mode)
Alan Kwongda16e442016-08-14 20:47:18 -04003132{
Alan Kwong6bc64622017-02-04 17:36:03 -08003133 struct sde_hw_rotator *rot;
Alan Kwong4b416162017-08-11 21:03:10 -04003134 const u32 *pixfmts;
Alan Kwong6bc64622017-02-04 17:36:03 -08003135 u32 num_pixfmt;
Alan Kwongda16e442016-08-14 20:47:18 -04003136 int i;
3137
Alan Kwong6bc64622017-02-04 17:36:03 -08003138 if (!mgr || !mgr->hw_data) {
3139 SDEROT_ERR("null parameters\n");
3140 return false;
Alan Kwongda16e442016-08-14 20:47:18 -04003141 }
3142
Alan Kwong6bc64622017-02-04 17:36:03 -08003143 rot = mgr->hw_data;
3144
Alan Kwong4b416162017-08-11 21:03:10 -04003145 if (mode >= SDE_ROTATOR_MODE_MAX) {
3146 SDEROT_ERR("invalid rotator mode %d\n", mode);
3147 return false;
3148 }
3149
Alan Kwong6bc64622017-02-04 17:36:03 -08003150 if (input) {
Alan Kwong4b416162017-08-11 21:03:10 -04003151 pixfmts = rot->inpixfmts[mode];
3152 num_pixfmt = rot->num_inpixfmt[mode];
Alan Kwong6bc64622017-02-04 17:36:03 -08003153 } else {
Alan Kwong4b416162017-08-11 21:03:10 -04003154 pixfmts = rot->outpixfmts[mode];
3155 num_pixfmt = rot->num_outpixfmt[mode];
Alan Kwong6bc64622017-02-04 17:36:03 -08003156 }
3157
3158 if (!pixfmts || !num_pixfmt) {
3159 SDEROT_ERR("invalid pixel format tables\n");
3160 return false;
3161 }
3162
3163 for (i = 0; i < num_pixfmt; i++)
3164 if (pixfmts[i] == pixfmt)
3165 return true;
3166
Alan Kwongda16e442016-08-14 20:47:18 -04003167 return false;
3168}
3169
3170/*
Alan Kwong6bc64622017-02-04 17:36:03 -08003171 * sde_hw_rotator_get_downscale_caps - get scaling capability string
3172 * @mgr: Pointer to rotator manager
3173 * @caps: Pointer to capability string buffer; NULL to return maximum length
3174 * @len: length of capability string buffer
3175 * return: length of capability string
3176 */
3177static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
3178 char *caps, int len)
3179{
3180 struct sde_hw_rotator *rot;
3181 int rc = 0;
3182
3183 if (!mgr || !mgr->hw_data) {
3184 SDEROT_ERR("null parameters\n");
3185 return -EINVAL;
3186 }
3187
3188 rot = mgr->hw_data;
3189
3190 if (rot->downscale_caps) {
3191 if (caps)
3192 rc = snprintf(caps, len, "%s", rot->downscale_caps);
3193 else
3194 rc = strlen(rot->downscale_caps);
3195 }
3196
3197 return rc;
3198}
3199
3200/*
Alan Kwongb6c049c2017-03-31 12:50:27 -07003201 * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
3202 * @mgr: Pointer to rotator manager
3203 * return: maximum line width supported by hardware
3204 */
3205static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
3206{
3207 struct sde_hw_rotator *rot;
3208
3209 if (!mgr || !mgr->hw_data) {
3210 SDEROT_ERR("null parameters\n");
3211 return -EINVAL;
3212 }
3213
3214 rot = mgr->hw_data;
3215
3216 return rot->maxlinewidth;
3217}
3218
3219/*
Alan Kwong9487de22016-01-16 22:06:36 -05003220 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
3221 * @hw_data: Pointer to rotator hw
3222 * @dev: Pointer to platform device
3223 */
3224static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
3225 struct platform_device *dev)
3226{
3227 int ret = 0;
3228 u32 data;
3229
3230 if (!hw_data || !dev)
3231 return -EINVAL;
3232
3233 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
3234 &data);
3235 if (ret) {
3236 SDEROT_DBG("default to regdma off\n");
3237 ret = 0;
3238 hw_data->mode = ROT_REGDMA_OFF;
3239 } else if (data < ROT_REGDMA_MAX) {
3240 SDEROT_DBG("set to regdma mode %d\n", data);
3241 hw_data->mode = data;
3242 } else {
3243 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
3244 hw_data->mode = ROT_REGDMA_OFF;
3245 }
3246
3247 ret = of_property_read_u32(dev->dev.of_node,
3248 "qcom,mdss-highest-bank-bit", &data);
3249 if (ret) {
3250 SDEROT_DBG("default to A5X bank\n");
3251 ret = 0;
3252 hw_data->highest_bank = 2;
3253 } else {
3254 SDEROT_DBG("set highest bank bit to %d\n", data);
3255 hw_data->highest_bank = data;
3256 }
3257
Alan Kwong6bc64622017-02-04 17:36:03 -08003258 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwongfb8eeb22017-02-06 15:00:03 -08003259 "qcom,sde-ubwc-malsize", &data);
3260 if (ret) {
3261 ret = 0;
3262 hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
3263 } else {
3264 SDEROT_DBG("set ubwc malsize to %d\n", data);
3265 hw_data->ubwc_malsize = data;
3266 }
3267
3268 ret = of_property_read_u32(dev->dev.of_node,
3269 "qcom,sde-ubwc_swizzle", &data);
3270 if (ret) {
3271 ret = 0;
3272 hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
3273 } else {
3274 SDEROT_DBG("set ubwc swizzle to %d\n", data);
3275 hw_data->ubwc_swizzle = data;
3276 }
3277
3278 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwong6bc64622017-02-04 17:36:03 -08003279 "qcom,mdss-sbuf-headroom", &data);
3280 if (ret) {
3281 ret = 0;
3282 hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
3283 } else {
3284 SDEROT_DBG("set sbuf headroom to %d\n", data);
3285 hw_data->sbuf_headroom = data;
3286 }
3287
Alan Kwongb6c049c2017-03-31 12:50:27 -07003288 ret = of_property_read_u32(dev->dev.of_node,
3289 "qcom,mdss-rot-linewidth", &data);
3290 if (ret) {
3291 ret = 0;
3292 hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
3293 } else {
3294 SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
3295 hw_data->maxlinewidth = data;
3296 }
3297
Alan Kwong9487de22016-01-16 22:06:36 -05003298 return ret;
3299}
3300
3301/*
3302 * sde_rotator_r3_init - initialize the r3 module
3303 * @mgr: Pointer to rotator manager
3304 *
3305 * This function setup r3 callback functions, parses r3 specific
3306 * device tree settings, installs r3 specific interrupt handler,
3307 * as well as initializes r3 internal data structure.
3308 */
3309int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
3310{
3311 struct sde_hw_rotator *rot;
3312 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
3313 int i;
3314 int ret;
3315
3316 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
3317 if (!rot)
3318 return -ENOMEM;
3319
3320 mgr->hw_data = rot;
3321 mgr->queue_count = ROT_QUEUE_MAX;
3322
3323 rot->mdss_base = mdata->sde_io.base;
3324 rot->pdev = mgr->pdev;
Alan Kwong6bc64622017-02-04 17:36:03 -08003325 rot->koff_timeout = KOFF_TIMEOUT;
3326 rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
3327 rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
Alan Kwong9487de22016-01-16 22:06:36 -05003328
3329 /* Assign ops */
3330 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
3331 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
3332 mgr->ops_hw_free = sde_hw_rotator_free_ext;
3333 mgr->ops_config_hw = sde_hw_rotator_config;
3334 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
3335 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
3336 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
3337 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
3338 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
3339 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
Alan Kwongda16e442016-08-14 20:47:18 -04003340 mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
3341 mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04003342 mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
3343 mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
Alan Kwong6bc64622017-02-04 17:36:03 -08003344 mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
Alan Kwongb6c049c2017-03-31 12:50:27 -07003345 mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
Alan Kwong9487de22016-01-16 22:06:36 -05003346
3347 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
3348 if (ret)
3349 goto error_parse_dt;
3350
3351 rot->irq_num = platform_get_irq(mgr->pdev, 0);
3352 if (rot->irq_num < 0) {
3353 SDEROT_ERR("fail to get rotator irq\n");
3354 } else {
3355 if (rot->mode == ROT_REGDMA_OFF)
3356 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3357 rot->irq_num,
3358 sde_hw_rotator_rotirq_handler,
3359 NULL, 0, "sde_rotator_r3", rot);
3360 else
3361 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3362 rot->irq_num,
3363 sde_hw_rotator_regdmairq_handler,
3364 NULL, 0, "sde_rotator_r3", rot);
3365 if (ret) {
3366 SDEROT_ERR("fail to request irq r:%d\n", ret);
3367 rot->irq_num = -1;
3368 } else {
3369 disable_irq(rot->irq_num);
3370 }
3371 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04003372 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05003373
3374 setup_rotator_ops(&rot->ops, rot->mode);
3375
3376 spin_lock_init(&rot->rotctx_lock);
3377 spin_lock_init(&rot->rotisr_lock);
3378
3379 /* REGDMA initialization */
3380 if (rot->mode == ROT_REGDMA_OFF) {
3381 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3382 rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
3383 SDE_HW_ROT_REGDMA_SEG_SIZE * i];
3384 } else {
3385 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3386 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
3387 (u32 *)(rot->mdss_base +
3388 REGDMA_RAM_REGDMA_CMD_RAM +
3389 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
3390
3391 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3392 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
3393 (u32 *)(rot->mdss_base +
3394 REGDMA_RAM_REGDMA_CMD_RAM +
3395 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
3396 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
3397 }
3398
Alan Kwong6bc64622017-02-04 17:36:03 -08003399 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3400 atomic_set(&rot->timestamp[i], 0);
3401 INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
3402 }
Alan Kwong9487de22016-01-16 22:06:36 -05003403
3404 ret = sde_rotator_hw_rev_init(rot);
3405 if (ret)
3406 goto error_hw_rev_init;
3407
Alan Kwong315cd772016-08-03 22:29:42 -04003408 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
Clarence Ip77c053d2017-04-24 19:26:37 -07003409 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003410 CLKFLAG_NORETAIN_MEM);
Clarence Ip77c053d2017-04-24 19:26:37 -07003411 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003412 CLKFLAG_NORETAIN_PERIPH);
3413
Benjamin Chan53e3bce2016-08-31 14:43:29 -04003414 mdata->sde_rot_hw = rot;
Alan Kwong9487de22016-01-16 22:06:36 -05003415 return 0;
3416error_hw_rev_init:
3417 if (rot->irq_num >= 0)
3418 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
3419 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
3420error_parse_dt:
3421 return ret;
3422}