blob: 32068444e454f11e59e6f379d812ee0a9bc28dff [file] [log] [blame]
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Alan Kwong9487de22016-01-16 22:06:36 -05002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
Alan Kwong4b416162017-08-11 21:03:10 -040014#define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
Alan Kwong9487de22016-01-16 22:06:36 -050015
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
Alan Kwong9487de22016-01-16 22:06:36 -050020#include <linux/delay.h>
21#include <linux/debugfs.h>
22#include <linux/interrupt.h>
23#include <linux/dma-mapping.h>
24#include <linux/dma-buf.h>
25#include <linux/msm_ion.h>
Alan Kwong6ce448d2016-11-24 18:45:20 -080026#include <linux/clk.h>
27#include <linux/clk/qcom.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
Benjamin Chan53e3bce2016-08-31 14:43:29 -040037#include "sde_rotator_debug.h"
Alan Kwong9487de22016-01-16 22:06:36 -050038
Benjamin Chan99eb63b2016-12-21 15:45:26 -050039#define RES_UHD (3840*2160)
Benjamin Chan2f6fc402017-09-27 16:15:33 -040040#define MS_TO_US(t) ((t) * USEC_PER_MSEC)
Benjamin Chan99eb63b2016-12-21 15:45:26 -050041
42/* traffic shaping clock ticks = finish_time x 19.2MHz */
43#define TRAFFIC_SHAPE_CLKTICK_14MS 268800
44#define TRAFFIC_SHAPE_CLKTICK_12MS 230400
Alan Kwong498d59f2017-02-11 18:56:34 -080045#define TRAFFIC_SHAPE_VSYNC_CLK 19200000
Benjamin Chan99eb63b2016-12-21 15:45:26 -050046
Alan Kwong9487de22016-01-16 22:06:36 -050047/* XIN mapping */
48#define XIN_SSPP 0
49#define XIN_WRITEBACK 1
50
51/* wait for at most 2 vsync for lowest refresh rate (24hz) */
Benjamin Chan2f6fc402017-09-27 16:15:33 -040052#define KOFF_TIMEOUT (42 * 8)
Alan Kwong6bc64622017-02-04 17:36:03 -080053
54/* default stream buffer headroom in lines */
55#define DEFAULT_SBUF_HEADROOM 20
Clarence Ip37e013c2017-05-04 12:23:13 -070056#define DEFAULT_UBWC_MALSIZE 0
57#define DEFAULT_UBWC_SWIZZLE 0
Alan Kwong9487de22016-01-16 22:06:36 -050058
Alan Kwongb6c049c2017-03-31 12:50:27 -070059#define DEFAULT_MAXLINEWIDTH 4096
60
Clarence Ip77cadd12017-06-19 17:51:46 -040061/* stride alignment requirement for avoiding partial writes */
62#define PARTIAL_WRITE_ALIGNMENT 0x1F
63
Alan Kwong9487de22016-01-16 22:06:36 -050064/* Macro for constructing the REGDMA command */
65#define SDE_REGDMA_WRITE(p, off, data) \
66 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080067 SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
68 (u32)(data));\
Benjamin Chanda32f8b2017-09-20 17:11:27 -040069 writel_relaxed_no_log( \
70 (REGDMA_OP_REGWRITE | \
71 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
72 p); \
73 p += sizeof(u32); \
74 writel_relaxed_no_log(data, p); \
75 p += sizeof(u32); \
Alan Kwong9487de22016-01-16 22:06:36 -050076 } while (0)
77
78#define SDE_REGDMA_MODIFY(p, off, mask, data) \
79 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080080 SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
81 (u32)(data));\
Benjamin Chanda32f8b2017-09-20 17:11:27 -040082 writel_relaxed_no_log( \
83 (REGDMA_OP_REGMODIFY | \
84 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
85 p); \
86 p += sizeof(u32); \
87 writel_relaxed_no_log(mask, p); \
88 p += sizeof(u32); \
89 writel_relaxed_no_log(data, p); \
90 p += sizeof(u32); \
Alan Kwong9487de22016-01-16 22:06:36 -050091 } while (0)
92
93#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
94 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080095 SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
96 (u32)(len));\
Benjamin Chanda32f8b2017-09-20 17:11:27 -040097 writel_relaxed_no_log( \
98 (REGDMA_OP_BLKWRITE_INC | \
99 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
100 p); \
101 p += sizeof(u32); \
102 writel_relaxed_no_log(len, p); \
103 p += sizeof(u32); \
Alan Kwong9487de22016-01-16 22:06:36 -0500104 } while (0)
105
106#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
107 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -0800108 SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
Benjamin Chanda32f8b2017-09-20 17:11:27 -0400109 writel_relaxed_no_log(data, p); \
110 p += sizeof(u32); \
111 } while (0)
112
113#define SDE_REGDMA_READ(p, data) \
114 do { \
115 data = readl_relaxed_no_log(p); \
116 p += sizeof(u32); \
Alan Kwong9487de22016-01-16 22:06:36 -0500117 } while (0)
118
119/* Macro for directly accessing mapped registers */
120#define SDE_ROTREG_WRITE(base, off, data) \
Alan Kwong6bc64622017-02-04 17:36:03 -0800121 do { \
122 SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
123 , (u32)(data));\
124 writel_relaxed(data, (base + (off))); \
125 } while (0)
Alan Kwong9487de22016-01-16 22:06:36 -0500126
127#define SDE_ROTREG_READ(base, off) \
128 readl_relaxed(base + (off))
129
Alan Kwong4b416162017-08-11 21:03:10 -0400130static const u32 sde_hw_rotator_v3_inpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400131 SDE_PIX_FMT_XRGB_8888,
132 SDE_PIX_FMT_ARGB_8888,
133 SDE_PIX_FMT_ABGR_8888,
134 SDE_PIX_FMT_RGBA_8888,
135 SDE_PIX_FMT_BGRA_8888,
136 SDE_PIX_FMT_RGBX_8888,
137 SDE_PIX_FMT_BGRX_8888,
138 SDE_PIX_FMT_XBGR_8888,
139 SDE_PIX_FMT_RGBA_5551,
140 SDE_PIX_FMT_ARGB_1555,
141 SDE_PIX_FMT_ABGR_1555,
142 SDE_PIX_FMT_BGRA_5551,
143 SDE_PIX_FMT_BGRX_5551,
144 SDE_PIX_FMT_RGBX_5551,
145 SDE_PIX_FMT_XBGR_1555,
146 SDE_PIX_FMT_XRGB_1555,
147 SDE_PIX_FMT_ARGB_4444,
148 SDE_PIX_FMT_RGBA_4444,
149 SDE_PIX_FMT_BGRA_4444,
150 SDE_PIX_FMT_ABGR_4444,
151 SDE_PIX_FMT_RGBX_4444,
152 SDE_PIX_FMT_XRGB_4444,
153 SDE_PIX_FMT_BGRX_4444,
154 SDE_PIX_FMT_XBGR_4444,
155 SDE_PIX_FMT_RGB_888,
156 SDE_PIX_FMT_BGR_888,
157 SDE_PIX_FMT_RGB_565,
158 SDE_PIX_FMT_BGR_565,
159 SDE_PIX_FMT_Y_CB_CR_H2V2,
160 SDE_PIX_FMT_Y_CR_CB_H2V2,
161 SDE_PIX_FMT_Y_CR_CB_GH2V2,
162 SDE_PIX_FMT_Y_CBCR_H2V2,
163 SDE_PIX_FMT_Y_CRCB_H2V2,
164 SDE_PIX_FMT_Y_CBCR_H1V2,
165 SDE_PIX_FMT_Y_CRCB_H1V2,
166 SDE_PIX_FMT_Y_CBCR_H2V1,
167 SDE_PIX_FMT_Y_CRCB_H2V1,
168 SDE_PIX_FMT_YCBYCR_H2V1,
169 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
170 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
171 SDE_PIX_FMT_RGBA_8888_UBWC,
172 SDE_PIX_FMT_RGBX_8888_UBWC,
173 SDE_PIX_FMT_RGB_565_UBWC,
174 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
175 SDE_PIX_FMT_RGBA_1010102,
176 SDE_PIX_FMT_RGBX_1010102,
177 SDE_PIX_FMT_ARGB_2101010,
178 SDE_PIX_FMT_XRGB_2101010,
179 SDE_PIX_FMT_BGRA_1010102,
180 SDE_PIX_FMT_BGRX_1010102,
181 SDE_PIX_FMT_ABGR_2101010,
182 SDE_PIX_FMT_XBGR_2101010,
183 SDE_PIX_FMT_RGBA_1010102_UBWC,
184 SDE_PIX_FMT_RGBX_1010102_UBWC,
185 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
186 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
187 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
188};
189
Alan Kwong4b416162017-08-11 21:03:10 -0400190static const u32 sde_hw_rotator_v3_outpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400191 SDE_PIX_FMT_XRGB_8888,
192 SDE_PIX_FMT_ARGB_8888,
193 SDE_PIX_FMT_ABGR_8888,
194 SDE_PIX_FMT_RGBA_8888,
195 SDE_PIX_FMT_BGRA_8888,
196 SDE_PIX_FMT_RGBX_8888,
197 SDE_PIX_FMT_BGRX_8888,
198 SDE_PIX_FMT_XBGR_8888,
199 SDE_PIX_FMT_RGBA_5551,
200 SDE_PIX_FMT_ARGB_1555,
201 SDE_PIX_FMT_ABGR_1555,
202 SDE_PIX_FMT_BGRA_5551,
203 SDE_PIX_FMT_BGRX_5551,
204 SDE_PIX_FMT_RGBX_5551,
205 SDE_PIX_FMT_XBGR_1555,
206 SDE_PIX_FMT_XRGB_1555,
207 SDE_PIX_FMT_ARGB_4444,
208 SDE_PIX_FMT_RGBA_4444,
209 SDE_PIX_FMT_BGRA_4444,
210 SDE_PIX_FMT_ABGR_4444,
211 SDE_PIX_FMT_RGBX_4444,
212 SDE_PIX_FMT_XRGB_4444,
213 SDE_PIX_FMT_BGRX_4444,
214 SDE_PIX_FMT_XBGR_4444,
215 SDE_PIX_FMT_RGB_888,
216 SDE_PIX_FMT_BGR_888,
217 SDE_PIX_FMT_RGB_565,
218 SDE_PIX_FMT_BGR_565,
219 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
220 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
221 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
222 SDE_PIX_FMT_Y_CBCR_H2V2,
223 SDE_PIX_FMT_Y_CRCB_H2V2,
224 SDE_PIX_FMT_Y_CBCR_H1V2,
225 SDE_PIX_FMT_Y_CRCB_H1V2,
226 SDE_PIX_FMT_Y_CBCR_H2V1,
227 SDE_PIX_FMT_Y_CRCB_H2V1,
228 /* SDE_PIX_FMT_YCBYCR_H2V1 */
229 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
230 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
231 SDE_PIX_FMT_RGBA_8888_UBWC,
232 SDE_PIX_FMT_RGBX_8888_UBWC,
233 SDE_PIX_FMT_RGB_565_UBWC,
234 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
235 SDE_PIX_FMT_RGBA_1010102,
236 SDE_PIX_FMT_RGBX_1010102,
237 /* SDE_PIX_FMT_ARGB_2101010 */
238 /* SDE_PIX_FMT_XRGB_2101010 */
239 SDE_PIX_FMT_BGRA_1010102,
240 SDE_PIX_FMT_BGRX_1010102,
241 /* SDE_PIX_FMT_ABGR_2101010 */
242 /* SDE_PIX_FMT_XBGR_2101010 */
243 SDE_PIX_FMT_RGBA_1010102_UBWC,
244 SDE_PIX_FMT_RGBX_1010102_UBWC,
245 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
246 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
247 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
248};
249
Alan Kwong4b416162017-08-11 21:03:10 -0400250static const u32 sde_hw_rotator_v4_inpixfmts[] = {
Alan Kwong6bc64622017-02-04 17:36:03 -0800251 SDE_PIX_FMT_XRGB_8888,
252 SDE_PIX_FMT_ARGB_8888,
253 SDE_PIX_FMT_ABGR_8888,
254 SDE_PIX_FMT_RGBA_8888,
255 SDE_PIX_FMT_BGRA_8888,
256 SDE_PIX_FMT_RGBX_8888,
257 SDE_PIX_FMT_BGRX_8888,
258 SDE_PIX_FMT_XBGR_8888,
259 SDE_PIX_FMT_RGBA_5551,
260 SDE_PIX_FMT_ARGB_1555,
261 SDE_PIX_FMT_ABGR_1555,
262 SDE_PIX_FMT_BGRA_5551,
263 SDE_PIX_FMT_BGRX_5551,
264 SDE_PIX_FMT_RGBX_5551,
265 SDE_PIX_FMT_XBGR_1555,
266 SDE_PIX_FMT_XRGB_1555,
267 SDE_PIX_FMT_ARGB_4444,
268 SDE_PIX_FMT_RGBA_4444,
269 SDE_PIX_FMT_BGRA_4444,
270 SDE_PIX_FMT_ABGR_4444,
271 SDE_PIX_FMT_RGBX_4444,
272 SDE_PIX_FMT_XRGB_4444,
273 SDE_PIX_FMT_BGRX_4444,
274 SDE_PIX_FMT_XBGR_4444,
275 SDE_PIX_FMT_RGB_888,
276 SDE_PIX_FMT_BGR_888,
277 SDE_PIX_FMT_RGB_565,
278 SDE_PIX_FMT_BGR_565,
279 SDE_PIX_FMT_Y_CB_CR_H2V2,
280 SDE_PIX_FMT_Y_CR_CB_H2V2,
281 SDE_PIX_FMT_Y_CR_CB_GH2V2,
282 SDE_PIX_FMT_Y_CBCR_H2V2,
283 SDE_PIX_FMT_Y_CRCB_H2V2,
284 SDE_PIX_FMT_Y_CBCR_H1V2,
285 SDE_PIX_FMT_Y_CRCB_H1V2,
286 SDE_PIX_FMT_Y_CBCR_H2V1,
287 SDE_PIX_FMT_Y_CRCB_H2V1,
288 SDE_PIX_FMT_YCBYCR_H2V1,
289 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
290 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
291 SDE_PIX_FMT_RGBA_8888_UBWC,
292 SDE_PIX_FMT_RGBX_8888_UBWC,
293 SDE_PIX_FMT_RGB_565_UBWC,
294 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
295 SDE_PIX_FMT_RGBA_1010102,
296 SDE_PIX_FMT_RGBX_1010102,
297 SDE_PIX_FMT_ARGB_2101010,
298 SDE_PIX_FMT_XRGB_2101010,
299 SDE_PIX_FMT_BGRA_1010102,
300 SDE_PIX_FMT_BGRX_1010102,
301 SDE_PIX_FMT_ABGR_2101010,
302 SDE_PIX_FMT_XBGR_2101010,
303 SDE_PIX_FMT_RGBA_1010102_UBWC,
304 SDE_PIX_FMT_RGBX_1010102_UBWC,
305 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
306 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
307 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800308 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
309 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800310 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
311 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
312 SDE_PIX_FMT_XRGB_8888_TILE,
313 SDE_PIX_FMT_ARGB_8888_TILE,
314 SDE_PIX_FMT_ABGR_8888_TILE,
315 SDE_PIX_FMT_XBGR_8888_TILE,
316 SDE_PIX_FMT_RGBA_8888_TILE,
317 SDE_PIX_FMT_BGRA_8888_TILE,
318 SDE_PIX_FMT_RGBX_8888_TILE,
319 SDE_PIX_FMT_BGRX_8888_TILE,
320 SDE_PIX_FMT_RGBA_1010102_TILE,
321 SDE_PIX_FMT_RGBX_1010102_TILE,
322 SDE_PIX_FMT_ARGB_2101010_TILE,
323 SDE_PIX_FMT_XRGB_2101010_TILE,
324 SDE_PIX_FMT_BGRA_1010102_TILE,
325 SDE_PIX_FMT_BGRX_1010102_TILE,
326 SDE_PIX_FMT_ABGR_2101010_TILE,
327 SDE_PIX_FMT_XBGR_2101010_TILE,
328};
329
Alan Kwong4b416162017-08-11 21:03:10 -0400330static const u32 sde_hw_rotator_v4_outpixfmts[] = {
Alan Kwong6bc64622017-02-04 17:36:03 -0800331 SDE_PIX_FMT_XRGB_8888,
332 SDE_PIX_FMT_ARGB_8888,
333 SDE_PIX_FMT_ABGR_8888,
334 SDE_PIX_FMT_RGBA_8888,
335 SDE_PIX_FMT_BGRA_8888,
336 SDE_PIX_FMT_RGBX_8888,
337 SDE_PIX_FMT_BGRX_8888,
338 SDE_PIX_FMT_XBGR_8888,
339 SDE_PIX_FMT_RGBA_5551,
340 SDE_PIX_FMT_ARGB_1555,
341 SDE_PIX_FMT_ABGR_1555,
342 SDE_PIX_FMT_BGRA_5551,
343 SDE_PIX_FMT_BGRX_5551,
344 SDE_PIX_FMT_RGBX_5551,
345 SDE_PIX_FMT_XBGR_1555,
346 SDE_PIX_FMT_XRGB_1555,
347 SDE_PIX_FMT_ARGB_4444,
348 SDE_PIX_FMT_RGBA_4444,
349 SDE_PIX_FMT_BGRA_4444,
350 SDE_PIX_FMT_ABGR_4444,
351 SDE_PIX_FMT_RGBX_4444,
352 SDE_PIX_FMT_XRGB_4444,
353 SDE_PIX_FMT_BGRX_4444,
354 SDE_PIX_FMT_XBGR_4444,
355 SDE_PIX_FMT_RGB_888,
356 SDE_PIX_FMT_BGR_888,
357 SDE_PIX_FMT_RGB_565,
358 SDE_PIX_FMT_BGR_565,
359 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
360 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
361 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
362 SDE_PIX_FMT_Y_CBCR_H2V2,
363 SDE_PIX_FMT_Y_CRCB_H2V2,
364 SDE_PIX_FMT_Y_CBCR_H1V2,
365 SDE_PIX_FMT_Y_CRCB_H1V2,
366 SDE_PIX_FMT_Y_CBCR_H2V1,
367 SDE_PIX_FMT_Y_CRCB_H2V1,
368 /* SDE_PIX_FMT_YCBYCR_H2V1 */
369 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
370 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
371 SDE_PIX_FMT_RGBA_8888_UBWC,
372 SDE_PIX_FMT_RGBX_8888_UBWC,
373 SDE_PIX_FMT_RGB_565_UBWC,
374 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
375 SDE_PIX_FMT_RGBA_1010102,
376 SDE_PIX_FMT_RGBX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400377 SDE_PIX_FMT_ARGB_2101010,
378 SDE_PIX_FMT_XRGB_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800379 SDE_PIX_FMT_BGRA_1010102,
380 SDE_PIX_FMT_BGRX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400381 SDE_PIX_FMT_ABGR_2101010,
382 SDE_PIX_FMT_XBGR_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800383 SDE_PIX_FMT_RGBA_1010102_UBWC,
384 SDE_PIX_FMT_RGBX_1010102_UBWC,
385 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
386 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
387 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800388 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
389 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800390 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
391 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
392 SDE_PIX_FMT_XRGB_8888_TILE,
393 SDE_PIX_FMT_ARGB_8888_TILE,
394 SDE_PIX_FMT_ABGR_8888_TILE,
395 SDE_PIX_FMT_XBGR_8888_TILE,
396 SDE_PIX_FMT_RGBA_8888_TILE,
397 SDE_PIX_FMT_BGRA_8888_TILE,
398 SDE_PIX_FMT_RGBX_8888_TILE,
399 SDE_PIX_FMT_BGRX_8888_TILE,
400 SDE_PIX_FMT_RGBA_1010102_TILE,
401 SDE_PIX_FMT_RGBX_1010102_TILE,
402 SDE_PIX_FMT_ARGB_2101010_TILE,
403 SDE_PIX_FMT_XRGB_2101010_TILE,
404 SDE_PIX_FMT_BGRA_1010102_TILE,
405 SDE_PIX_FMT_BGRX_1010102_TILE,
406 SDE_PIX_FMT_ABGR_2101010_TILE,
407 SDE_PIX_FMT_XBGR_2101010_TILE,
408};
409
Alan Kwong4b416162017-08-11 21:03:10 -0400410static const u32 sde_hw_rotator_v4_inpixfmts_sbuf[] = {
411 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
412 SDE_PIX_FMT_Y_CBCR_H2V2,
413 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
414 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
415 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
416 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
417 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
418 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
419};
420
421static const u32 sde_hw_rotator_v4_outpixfmts_sbuf[] = {
422 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
423 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
424 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
425};
426
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400427static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400428 {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400429 {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
430 {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
431};
432
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400433static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
434 /*
435 * rottop - 0xA8850
436 */
437 /* REGDMA */
438 { 0XA8850, 0, 0 },
439 { 0XA8850, 0, 1 },
440 { 0XA8850, 0, 2 },
441 { 0XA8850, 0, 3 },
442 { 0XA8850, 0, 4 },
443
444 /* ROT_WB */
445 { 0XA8850, 1, 0 },
446 { 0XA8850, 1, 1 },
447 { 0XA8850, 1, 2 },
448 { 0XA8850, 1, 3 },
449 { 0XA8850, 1, 4 },
450 { 0XA8850, 1, 5 },
451 { 0XA8850, 1, 6 },
452 { 0XA8850, 1, 7 },
453
454 /* UBWC_DEC */
455 { 0XA8850, 2, 0 },
456
457 /* UBWC_ENC */
458 { 0XA8850, 3, 0 },
459
460 /* ROT_FETCH_0 */
461 { 0XA8850, 4, 0 },
462 { 0XA8850, 4, 1 },
463 { 0XA8850, 4, 2 },
464 { 0XA8850, 4, 3 },
465 { 0XA8850, 4, 4 },
466 { 0XA8850, 4, 5 },
467 { 0XA8850, 4, 6 },
468 { 0XA8850, 4, 7 },
469
470 /* ROT_FETCH_1 */
471 { 0XA8850, 5, 0 },
472 { 0XA8850, 5, 1 },
473 { 0XA8850, 5, 2 },
474 { 0XA8850, 5, 3 },
475 { 0XA8850, 5, 4 },
476 { 0XA8850, 5, 5 },
477 { 0XA8850, 5, 6 },
478 { 0XA8850, 5, 7 },
479
480 /* ROT_FETCH_2 */
481 { 0XA8850, 6, 0 },
482 { 0XA8850, 6, 1 },
483 { 0XA8850, 6, 2 },
484 { 0XA8850, 6, 3 },
485 { 0XA8850, 6, 4 },
486 { 0XA8850, 6, 5 },
487 { 0XA8850, 6, 6 },
488 { 0XA8850, 6, 7 },
489
490 /* ROT_FETCH_3 */
491 { 0XA8850, 7, 0 },
492 { 0XA8850, 7, 1 },
493 { 0XA8850, 7, 2 },
494 { 0XA8850, 7, 3 },
495 { 0XA8850, 7, 4 },
496 { 0XA8850, 7, 5 },
497 { 0XA8850, 7, 6 },
498 { 0XA8850, 7, 7 },
499
500 /* ROT_FETCH_4 */
501 { 0XA8850, 8, 0 },
502 { 0XA8850, 8, 1 },
503 { 0XA8850, 8, 2 },
504 { 0XA8850, 8, 3 },
505 { 0XA8850, 8, 4 },
506 { 0XA8850, 8, 5 },
507 { 0XA8850, 8, 6 },
508 { 0XA8850, 8, 7 },
509
510 /* ROT_UNPACK_0*/
511 { 0XA8850, 9, 0 },
512 { 0XA8850, 9, 1 },
513 { 0XA8850, 9, 2 },
514 { 0XA8850, 9, 3 },
515};
516
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400517static struct sde_rot_regdump sde_rot_r3_regdump[] = {
518 { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
519 { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
520 { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
521 { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
522 SDE_ROT_REGDUMP_READ },
523 /*
524 * Need to perform a SW reset to REGDMA in order to access the
525 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
526 * REGDMA RAM should be dump at last.
527 */
528 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
529 SDE_ROT_REGDUMP_WRITE },
530 { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
531 SDE_ROT_REGDUMP_READ },
Benjamin Chan59a06052017-01-12 18:06:03 -0500532 { "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
533 SDE_ROT_REGDUMP_VBIF },
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400534};
535
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -0700536struct sde_rot_cdp_params {
537 bool enable;
538 struct sde_mdp_format_params *fmt;
539 u32 offset;
540};
541
Alan Kwong818b7fc2016-07-24 22:07:41 -0400542/* Invalid software timestamp value for initialization */
543#define SDE_REGDMA_SWTS_INVALID (~0)
544
545/**
546 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
547 * @ts_curr: current software timestamp
548 * @ts_prev: previous software timestamp
549 * @return: the amount ts_curr is ahead of ts_prev
550 */
551static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
552{
553 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
554
555 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
556}
557
558/**
559 * sde_hw_rotator_pending_swts - Check if the given context is still pending
560 * @rot: Pointer to hw rotator
561 * @ctx: Pointer to rotator context
562 * @pswts: Pointer to returned reference software timestamp, optional
563 * @return: true if context has pending requests
564 */
565static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
566 struct sde_hw_rotator_context *ctx, u32 *pswts)
567{
568 u32 swts;
569 int ts_diff;
570 bool pending;
571
572 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
573 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
574 else
575 swts = ctx->last_regdma_timestamp;
576
577 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
578 swts >>= SDE_REGDMA_SWTS_SHIFT;
579
580 swts &= SDE_REGDMA_SWTS_MASK;
581
582 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
583
584 if (pswts)
585 *pswts = swts;
586
587 pending = (ts_diff > 0) ? true : false;
588
589 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
590 ctx->timestamp, ctx->q_id, swts, pending);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -0400591 SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400592 return pending;
593}
594
595/**
Alan Kwong6bc64622017-02-04 17:36:03 -0800596 * sde_hw_rotator_update_swts - update software timestamp with given value
597 * @rot: Pointer to hw rotator
598 * @ctx: Pointer to rotator contxt
599 * @swts: new software timestamp
600 * @return: new combined swts
601 */
602static u32 sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
603 struct sde_hw_rotator_context *ctx, u32 swts)
604{
605 u32 mask = SDE_REGDMA_SWTS_MASK;
606
607 swts &= SDE_REGDMA_SWTS_MASK;
608 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY) {
609 swts <<= SDE_REGDMA_SWTS_SHIFT;
610 mask <<= SDE_REGDMA_SWTS_SHIFT;
611 }
612
613 swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
614 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
615
616 return swts;
617}
618
619/**
Alan Kwong818b7fc2016-07-24 22:07:41 -0400620 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
621 * Also, clear rotator/regdma irq status.
622 * @rot: Pointer to hw rotator
623 */
624static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
625{
626 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
627 atomic_read(&rot->irq_enabled));
628
629 if (!atomic_read(&rot->irq_enabled)) {
630 if (rot->mode == ROT_REGDMA_OFF)
631 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
632 ROT_DONE_MASK);
633 else
634 SDE_ROTREG_WRITE(rot->mdss_base,
635 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
636
637 enable_irq(rot->irq_num);
638 }
639 atomic_inc(&rot->irq_enabled);
640}
641
642/**
643 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
644 * Also, clear rotator/regdma irq enable masks.
645 * @rot: Pointer to hw rotator
646 */
647static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
648{
649 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
650 atomic_read(&rot->irq_enabled));
651
652 if (!atomic_read(&rot->irq_enabled)) {
653 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
654 return;
655 }
656
657 if (!atomic_dec_return(&rot->irq_enabled)) {
658 if (rot->mode == ROT_REGDMA_OFF)
659 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
660 else
661 SDE_ROTREG_WRITE(rot->mdss_base,
662 REGDMA_CSR_REGDMA_INT_EN, 0);
663 /* disable irq after last pending irq is handled, if any */
664 synchronize_irq(rot->irq_num);
665 disable_irq_nosync(rot->irq_num);
666 }
667}
668
669/**
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400670 * sde_hw_rotator_reset - Reset rotator hardware
671 * @rot: pointer to hw rotator
672 * @ctx: pointer to current rotator context during the hw hang
673 */
674static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
675 struct sde_hw_rotator_context *ctx)
676{
677 struct sde_hw_rotator_context *rctx = NULL;
678 u32 int_mask = (REGDMA_INT_0_MASK | REGDMA_INT_1_MASK |
679 REGDMA_INT_2_MASK);
680 u32 last_ts[ROT_QUEUE_MAX] = {0,};
681 u32 latest_ts;
682 int elapsed_time, t;
683 int i, j;
684 unsigned long flags;
685
686 if (!rot || !ctx) {
687 SDEROT_ERR("NULL rotator context\n");
688 return -EINVAL;
689 }
690
691 if (ctx->q_id >= ROT_QUEUE_MAX) {
692 SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
693 return -EINVAL;
694 }
695
696 /* sw reset the hw rotator */
697 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 1);
698 usleep_range(MS_TO_US(10), MS_TO_US(20));
699 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 0);
700
701 spin_lock_irqsave(&rot->rotisr_lock, flags);
702
703 /* update timestamp register with current context */
704 last_ts[ctx->q_id] = ctx->timestamp;
705 sde_hw_rotator_update_swts(rot, ctx, ctx->timestamp);
706 SDEROT_EVTLOG(ctx->timestamp);
707
708 /*
709 * Search for any pending rot session, and look for last timestamp
710 * per hw queue.
711 */
712 for (i = 0; i < ROT_QUEUE_MAX; i++) {
713 latest_ts = atomic_read(&rot->timestamp[i]);
714 latest_ts &= SDE_REGDMA_SWTS_MASK;
715 elapsed_time = sde_hw_rotator_elapsed_swts(latest_ts,
716 last_ts[i]);
717
718 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
719 rctx = rot->rotCtx[i][j];
720 if (rctx && rctx != ctx) {
721 rctx->last_regdma_isr_status = int_mask;
722 rctx->last_regdma_timestamp = rctx->timestamp;
723
724 t = sde_hw_rotator_elapsed_swts(latest_ts,
725 rctx->timestamp);
726 if (t < elapsed_time) {
727 elapsed_time = t;
728 last_ts[i] = rctx->timestamp;
729 sde_hw_rotator_update_swts(rot, rctx,
730 last_ts[i]);
731 }
732
733 SDEROT_DBG("rotctx[%d][%d], ts:%d\n",
734 i, j, rctx->timestamp);
735 SDEROT_EVTLOG(i, j, rctx->timestamp,
736 last_ts[i]);
737 }
738 }
739 }
740
741 /* Finally wakeup all pending rotator context in queue */
742 for (i = 0; i < ROT_QUEUE_MAX; i++) {
743 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
744 rctx = rot->rotCtx[i][j];
745 if (rctx && rctx != ctx)
746 wake_up_all(&rctx->regdma_waitq);
747 }
748 }
749
750 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
751
752 return 0;
753}
754
755/**
Alan Kwong818b7fc2016-07-24 22:07:41 -0400756 * sde_hw_rotator_dump_status - Dump hw rotator status on error
757 * @rot: Pointer to hw rotator
758 */
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400759static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot, u32 *ubwcerr)
Alan Kwong818b7fc2016-07-24 22:07:41 -0400760{
Benjamin Chan1b94f952017-01-23 17:42:30 -0500761 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400762 u32 reg = 0;
Benjamin Chan1b94f952017-01-23 17:42:30 -0500763
Alan Kwong818b7fc2016-07-24 22:07:41 -0400764 SDEROT_ERR(
765 "op_mode = %x, int_en = %x, int_status = %x\n",
766 SDE_ROTREG_READ(rot->mdss_base,
767 REGDMA_CSR_REGDMA_OP_MODE),
768 SDE_ROTREG_READ(rot->mdss_base,
769 REGDMA_CSR_REGDMA_INT_EN),
770 SDE_ROTREG_READ(rot->mdss_base,
771 REGDMA_CSR_REGDMA_INT_STATUS));
772
773 SDEROT_ERR(
774 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
775 SDE_ROTREG_READ(rot->mdss_base,
776 REGDMA_TIMESTAMP_REG),
777 SDE_ROTREG_READ(rot->mdss_base,
778 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
779 SDE_ROTREG_READ(rot->mdss_base,
780 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
781 SDE_ROTREG_READ(rot->mdss_base,
782 REGDMA_CSR_REGDMA_BLOCK_STATUS));
783
784 SDEROT_ERR(
785 "invalid_cmd_offset = %x, fsm_state = %x\n",
786 SDE_ROTREG_READ(rot->mdss_base,
787 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
788 SDE_ROTREG_READ(rot->mdss_base,
789 REGDMA_CSR_REGDMA_FSM_STATE));
Benjamin Chan59a06052017-01-12 18:06:03 -0500790
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400791 reg = SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS);
792 if (ubwcerr)
793 *ubwcerr = reg;
Benjamin Chan59a06052017-01-12 18:06:03 -0500794 SDEROT_ERR(
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400795 "UBWC decode status = %x, UBWC encode status = %x\n", reg,
Benjamin Chan59a06052017-01-12 18:06:03 -0500796 SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
Benjamin Chan1b94f952017-01-23 17:42:30 -0500797
798 SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
799 SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
800 SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
Alan Kwong6bc64622017-02-04 17:36:03 -0800801
802 SDEROT_ERR(
803 "sbuf_status_plane0 = %x, sbuf_status_plane1 = %x\n",
804 SDE_ROTREG_READ(rot->mdss_base,
805 ROT_WB_SBUF_STATUS_PLANE0),
806 SDE_ROTREG_READ(rot->mdss_base,
807 ROT_WB_SBUF_STATUS_PLANE1));
Alan Kwong818b7fc2016-07-24 22:07:41 -0400808}
809
Alan Kwong9487de22016-01-16 22:06:36 -0500810/**
811 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
812 * on provided session_id. Each rotator has a different session_id.
Clarence Ip9e6c3302017-06-02 11:02:57 -0400813 * @rot: Pointer to rotator hw
814 * @session_id: Identifier for rotator session
815 * @sequence_id: Identifier for rotation request within the session
816 * @q_id: Rotator queue identifier
Alan Kwong9487de22016-01-16 22:06:36 -0500817 */
818static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
Clarence Ip9e6c3302017-06-02 11:02:57 -0400819 struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
Alan Kwong9487de22016-01-16 22:06:36 -0500820 enum sde_rot_queue_prio q_id)
821{
822 int i;
823 struct sde_hw_rotator_context *ctx = NULL;
824
825 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
826 ctx = rot->rotCtx[q_id][i];
827
Clarence Ip9e6c3302017-06-02 11:02:57 -0400828 if (ctx && (ctx->session_id == session_id) &&
829 (ctx->sequence_id == sequence_id)) {
Alan Kwong9487de22016-01-16 22:06:36 -0500830 SDEROT_DBG(
Clarence Ip9e6c3302017-06-02 11:02:57 -0400831 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d | sequence-id:%d\n",
832 q_id, i, ctx, ctx->session_id,
833 ctx->sequence_id);
Alan Kwong9487de22016-01-16 22:06:36 -0500834 return ctx;
835 }
836 }
837
838 return NULL;
839}
840
841/*
842 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
843 * @dbgbuf: Pointer to debug buffer
844 * @buf: Pointer to layer buffer structure
845 * @data: Pointer to h/w mapped buffer structure
846 */
847static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
848 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
849{
850 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
851 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
852
853 dbgbuf->vaddr = NULL;
854 dbgbuf->width = buf->width;
855 dbgbuf->height = buf->height;
856
857 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
Alan Kwong6ce448d2016-11-24 18:45:20 -0800858 dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500859 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
860 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
861 dbgbuf->vaddr, dbgbuf->buflen,
862 dbgbuf->width, dbgbuf->height);
863 }
864}
865
866/*
867 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
868 * @dbgbuf: Pointer to debug buffer
869 */
870static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
871{
872 if (dbgbuf->vaddr) {
873 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
Alan Kwong6ce448d2016-11-24 18:45:20 -0800874 dma_buf_end_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500875 }
876
877 dbgbuf->vaddr = NULL;
878 dbgbuf->dmabuf = NULL;
879 dbgbuf->buflen = 0;
880 dbgbuf->width = 0;
881 dbgbuf->height = 0;
882}
883
884/*
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -0700885 * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
886 * levels, enable write gather enable and avoid clk gating setting for
887 * debug purpose.
888 *
889 * @rot: Pointer to rotator hw
890 */
891static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
892{
893 u32 i, mask, vbif_qos, reg_val = 0;
894 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
895
896 /* VBIF_ROT QoS remapper setting */
897 switch (mdata->npriority_lvl) {
898
899 case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
900 for (i = 0; i < mdata->npriority_lvl; i++) {
901 reg_val = SDE_VBIF_READ(mdata,
902 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
903 mask = 0x3 << (XIN_SSPP * 2);
904 vbif_qos = mdata->vbif_nrt_qos[i];
905 reg_val |= vbif_qos << (XIN_SSPP * 2);
906 /* ensure write is issued after the read operation */
907 mb();
908 SDE_VBIF_WRITE(mdata,
909 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
910 reg_val);
911 }
912 break;
913
914 case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
915 mask = mdata->npriority_lvl - 1;
916 for (i = 0; i < mdata->npriority_lvl; i++) {
917 /* RD and WR client */
918 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
919 << (XIN_SSPP * 4);
920 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
921 << (XIN_WRITEBACK * 4);
922
923 SDE_VBIF_WRITE(mdata,
924 MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
925 reg_val);
926 SDE_VBIF_WRITE(mdata,
927 MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
928 reg_val);
929 }
930 break;
931
932 default:
933 SDEROT_DBG("invalid vbif remapper levels\n");
934 }
935
936 /* Enable write gather for writeback to remove write gaps, which
937 * may hang AXI/BIMC/SDE.
938 */
939 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
940 BIT(XIN_WRITEBACK));
941
942 /*
943 * For debug purpose, disable clock gating, i.e. Clocks always on
944 */
945 if (mdata->clk_always_on) {
946 SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
947 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
948 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
949 0xFFFF);
950 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
951 }
952}
953
954/*
Alan Kwong9487de22016-01-16 22:06:36 -0500955 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
956 * @ctx: Pointer to rotator context
957 * @mask: Bit mask location of the timestamp
958 * @swts: Software timestamp
959 */
960static void sde_hw_rotator_setup_timestamp_packet(
961 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
962{
Benjamin Chanda32f8b2017-09-20 17:11:27 -0400963 char __iomem *wrptr;
Alan Kwong9487de22016-01-16 22:06:36 -0500964
965 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
966
967 /*
968 * Create a dummy packet write out to 1 location for timestamp
969 * generation.
970 */
971 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
972 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
973 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
974 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
975 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
976 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
977 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
978 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
979 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
980 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
981 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
982 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
983 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
Benjamin Chan15c93d82016-08-29 10:04:22 -0400984 /*
985 * Must clear secure buffer setting for SW timestamp because
986 * SW timstamp buffer allocation is always non-secure region.
987 */
988 if (ctx->is_secure) {
989 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
990 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
991 }
Alan Kwong9487de22016-01-16 22:06:36 -0500992 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
993 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
994 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
995 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
996 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
997 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
998 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
999 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
1000 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
Clarence Ip77cadd12017-06-19 17:51:46 -04001001 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
1002 (ctx->rot->highest_bank & 0x3) << 8);
Alan Kwong9487de22016-01-16 22:06:36 -05001003 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
1004 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
1005 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
1006 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
1007
1008 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1009}
1010
1011/*
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001012 * sde_hw_rotator_cdp_configs - configures the CDP registers
1013 * @ctx: Pointer to rotator context
1014 * @params: Pointer to parameters needed for CDP configs
1015 */
1016static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
1017 struct sde_rot_cdp_params *params)
1018{
1019 int reg_val;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001020 char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001021
1022 if (!params->enable) {
1023 SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
1024 goto end;
1025 }
1026
1027 reg_val = BIT(0); /* enable cdp */
1028
1029 if (sde_mdp_is_ubwc_format(params->fmt))
1030 reg_val |= BIT(1); /* enable UBWC meta cdp */
1031
1032 if (sde_mdp_is_ubwc_format(params->fmt)
1033 || sde_mdp_is_tilea4x_format(params->fmt)
1034 || sde_mdp_is_tilea5x_format(params->fmt))
1035 reg_val |= BIT(2); /* enable tile amortize */
1036
1037 reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
1038
1039 SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
1040
1041end:
1042 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1043}
1044
1045/*
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001046 * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
1047 * for the WRITEBACK rotator for inline and offline rotation.
1048 *
1049 * @ctx: Pointer to rotator context
1050 */
1051static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
1052{
1053 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001054 char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001055
1056 /* Offline rotation setting */
1057 if (!ctx->sbuf_mode) {
1058 /* QOS LUT WR setting */
1059 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
1060 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
1061 mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
1062 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
1063 mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
1064 }
1065
1066 /* Danger LUT WR setting */
1067 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
1068 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
1069 mdata->lut_cfg[SDE_ROT_WR].danger_lut);
1070
1071 /* Safe LUT WR setting */
1072 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
1073 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
1074 mdata->lut_cfg[SDE_ROT_WR].safe_lut);
1075
1076 /* Inline rotation setting */
1077 } else {
1078 /* QOS LUT WR setting */
1079 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
1080 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
1081 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
1082 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
1083 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
1084 }
1085
1086 /* Danger LUT WR setting */
1087 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
1088 mdata->sde_inline_qos_map))
1089 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
1090 mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
1091
1092 /* Safe LUT WR setting */
1093 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
1094 mdata->sde_inline_qos_map))
1095 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
1096 mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
1097 }
1098
1099 /* Update command queue write ptr */
1100 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1101}
1102
1103/*
1104 * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
1105 * for the SSPP rotator for inline and offline rotation.
1106 *
1107 * @ctx: Pointer to rotator context
1108 */
1109static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
1110{
1111 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001112 char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001113
1114 /* Offline rotation setting */
1115 if (!ctx->sbuf_mode) {
1116 /* QOS LUT RD setting */
1117 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
1118 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
1119 mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
1120 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
1121 mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
1122 }
1123
1124 /* Danger LUT RD setting */
1125 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
1126 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
1127 mdata->lut_cfg[SDE_ROT_RD].danger_lut);
1128
1129 /* Safe LUT RD setting */
1130 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
1131 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1132 mdata->lut_cfg[SDE_ROT_RD].safe_lut);
1133
1134 /* inline rotation setting */
1135 } else {
1136 /* QOS LUT RD setting */
1137 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
1138 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
1139 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
1140 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
1141 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
1142 }
1143
1144 /* Danger LUT RD setting */
1145 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
1146 mdata->sde_inline_qos_map))
1147 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
1148 mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
1149
1150 /* Safe LUT RD setting */
1151 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
1152 mdata->sde_inline_qos_map))
1153 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1154 mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
1155 }
1156
1157 /* Update command queue write ptr */
1158 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1159}
1160
1161/*
Alan Kwong9487de22016-01-16 22:06:36 -05001162 * sde_hw_rotator_setup_fetchengine - setup fetch engine
1163 * @ctx: Pointer to rotator context
1164 * @queue_id: Priority queue identifier
1165 * @cfg: Fetch configuration
1166 * @danger_lut: real-time QoS LUT for danger setting (not used)
1167 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001168 * @dnsc_factor_w: downscale factor for width
1169 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -05001170 * @flags: Control flag
1171 */
1172static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
1173 enum sde_rot_queue_prio queue_id,
1174 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001175 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -05001176{
1177 struct sde_hw_rotator *rot = ctx->rot;
1178 struct sde_mdp_format_params *fmt;
1179 struct sde_mdp_data *data;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001180 struct sde_rot_cdp_params cdp_params = {0};
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001181 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001182 char __iomem *wrptr;
Alan Kwong9487de22016-01-16 22:06:36 -05001183 u32 opmode = 0;
1184 u32 chroma_samp = 0;
1185 u32 src_format = 0;
1186 u32 unpack = 0;
1187 u32 width = cfg->img_width;
1188 u32 height = cfg->img_height;
1189 u32 fetch_blocksize = 0;
1190 int i;
1191
1192 if (ctx->rot->mode == ROT_REGDMA_ON) {
Alan Kwong708eacd82017-08-24 14:26:57 -04001193 if (rot->irq_num >= 0)
1194 SDE_ROTREG_WRITE(rot->mdss_base,
1195 REGDMA_CSR_REGDMA_INT_EN,
1196 REGDMA_INT_MASK);
Alan Kwong9487de22016-01-16 22:06:36 -05001197 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
1198 REGDMA_EN);
1199 }
1200
1201 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1202
Alan Kwong5b4d71b2017-02-10 20:52:59 -08001203 /*
1204 * initialize start control trigger selection first
1205 */
1206 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
1207 if (ctx->sbuf_mode)
1208 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
1209 ctx->start_ctrl);
1210 else
1211 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
1212 }
1213
Alan Kwong9487de22016-01-16 22:06:36 -05001214 /* source image setup */
1215 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
1216 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
1217 for (i = 0; i < cfg->src_plane.num_planes; i++)
1218 cfg->src_plane.ystride[i] *= 2;
1219 width *= 2;
1220 height /= 2;
1221 }
1222
1223 /*
1224 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
1225 */
1226 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
1227
1228 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
1229 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1230 cfg->src_rect->w | (cfg->src_rect->h << 16));
1231 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
1232 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1233 cfg->src_rect->x | (cfg->src_rect->y << 16));
1234 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1235 cfg->src_rect->w | (cfg->src_rect->h << 16));
1236 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1237 cfg->src_rect->x | (cfg->src_rect->y << 16));
1238
1239 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
1240 data = cfg->data;
1241 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1242 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
1243 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
1244 (cfg->src_plane.ystride[1] << 16));
1245 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
1246 (cfg->src_plane.ystride[3] << 16));
1247
1248 /* UNUSED, write 0 */
1249 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1250
1251 /* setup source format */
1252 fmt = cfg->fmt;
1253
1254 chroma_samp = fmt->chroma_sample;
1255 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
1256 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
1257 chroma_samp = SDE_MDP_CHROMA_H1V2;
1258 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
1259 chroma_samp = SDE_MDP_CHROMA_H2V1;
1260 }
1261
1262 src_format = (chroma_samp << 23) |
1263 (fmt->fetch_planes << 19) |
1264 (fmt->bits[C3_ALPHA] << 6) |
1265 (fmt->bits[C2_R_Cr] << 4) |
1266 (fmt->bits[C1_B_Cb] << 2) |
1267 (fmt->bits[C0_G_Y] << 0);
1268
1269 if (fmt->alpha_enable &&
1270 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
1271 src_format |= BIT(8); /* SRCC3_EN */
1272
1273 src_format |= ((fmt->unpack_count - 1) << 12) |
1274 (fmt->unpack_tight << 17) |
1275 (fmt->unpack_align_msb << 18) |
1276 ((fmt->bpp - 1) << 9) |
1277 ((fmt->frame_format & 3) << 30);
1278
1279 if (flags & SDE_ROT_FLAG_ROT_90)
1280 src_format |= BIT(11); /* ROT90 */
1281
1282 if (sde_mdp_is_ubwc_format(fmt))
1283 opmode |= BIT(0); /* BWC_DEC_EN */
1284
1285 /* if this is YUV pixel format, enable CSC */
1286 if (sde_mdp_is_yuv_format(fmt))
1287 src_format |= BIT(15); /* SRC_COLOR_SPACE */
1288
1289 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1290 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
1291
Alan Kwong3bef26f2017-02-26 15:38:09 -08001292 if (rot->solid_fill)
1293 src_format |= BIT(22); /* SOLID_FILL */
1294
Alan Kwong9487de22016-01-16 22:06:36 -05001295 /* SRC_FORMAT */
1296 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
1297
1298 /* setup source unpack pattern */
1299 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1300 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1301
1302 /* SRC_UNPACK_PATTERN */
1303 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
1304
1305 /* setup source op mode */
1306 if (flags & SDE_ROT_FLAG_FLIP_LR)
1307 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
1308 if (flags & SDE_ROT_FLAG_FLIP_UD)
1309 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
1310 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
1311
1312 /* SRC_OP_MODE */
1313 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
1314
1315 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001316 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
1317 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
1318 if (sde_mdp_is_tp10_format(fmt))
1319 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
1320 else
1321 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
1322 } else {
1323 if (sde_mdp_is_tp10_format(fmt))
1324 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
1325 else
1326 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
1327 }
1328
Alan Kwong3bef26f2017-02-26 15:38:09 -08001329 if (rot->solid_fill)
1330 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
1331 rot->constant_color);
1332
Alan Kwong9487de22016-01-16 22:06:36 -05001333 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
1334 fetch_blocksize |
1335 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
1336 ((rot->highest_bank & 0x3) << 18));
1337
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001338 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1339 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(31) |
1340 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1341 ((ctx->rot->highest_bank & 0x3) << 4) |
1342 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1343
Alan Kwong9487de22016-01-16 22:06:36 -05001344 /* setup source buffer plane security status */
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001345 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1346 SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
Alan Kwong9487de22016-01-16 22:06:36 -05001347 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
1348 ctx->is_secure = true;
Benjamin Chan15c93d82016-08-29 10:04:22 -04001349 } else {
1350 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
1351 ctx->is_secure = false;
Alan Kwong9487de22016-01-16 22:06:36 -05001352 }
1353
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001354 /* Update command queue write ptr */
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001355 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1356
1357 /* CDP register RD setting */
1358 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1359 mdata->enable_cdp[SDE_ROT_RD] : false;
1360 cdp_params.fmt = fmt;
1361 cdp_params.offset = ROT_SSPP_CDP_CNTL;
1362 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1363
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001364 /* QOS LUT/ Danger LUT/ Safe Lut WR setting */
1365 sde_hw_rotator_setup_qos_lut_rd(ctx);
1366
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001367 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1368
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001369 /*
1370 * Determine if traffic shaping is required. Only enable traffic
1371 * shaping when content is 4k@30fps. The actual traffic shaping
1372 * bandwidth calculation is done in output setup.
1373 */
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001374 if (((!ctx->sbuf_mode)
1375 && (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
1376 && (cfg->fps <= 30)) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001377 SDEROT_DBG("Enable Traffic Shaper\n");
1378 ctx->is_traffic_shaping = true;
1379 } else {
1380 SDEROT_DBG("Disable Traffic Shaper\n");
1381 ctx->is_traffic_shaping = false;
1382 }
1383
Alan Kwong9487de22016-01-16 22:06:36 -05001384 /* Update command queue write ptr */
1385 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1386}
1387
1388/*
1389 * sde_hw_rotator_setup_wbengine - setup writeback engine
1390 * @ctx: Pointer to rotator context
1391 * @queue_id: Priority queue identifier
1392 * @cfg: Writeback configuration
1393 * @flags: Control flag
1394 */
1395static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
1396 enum sde_rot_queue_prio queue_id,
1397 struct sde_hw_rot_wb_cfg *cfg,
1398 u32 flags)
1399{
Alan Kwong6bc64622017-02-04 17:36:03 -08001400 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001401 struct sde_mdp_format_params *fmt;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001402 struct sde_rot_cdp_params cdp_params = {0};
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001403 char __iomem *wrptr;
Alan Kwong9487de22016-01-16 22:06:36 -05001404 u32 pack = 0;
1405 u32 dst_format = 0;
Clarence Ip77cadd12017-06-19 17:51:46 -04001406 u32 no_partial_writes = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001407 int i;
1408
1409 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1410
1411 fmt = cfg->fmt;
1412
1413 /* setup WB DST format */
1414 dst_format |= (fmt->chroma_sample << 23) |
1415 (fmt->fetch_planes << 19) |
1416 (fmt->bits[C3_ALPHA] << 6) |
1417 (fmt->bits[C2_R_Cr] << 4) |
1418 (fmt->bits[C1_B_Cb] << 2) |
1419 (fmt->bits[C0_G_Y] << 0);
1420
1421 /* alpha control */
1422 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
1423 dst_format |= BIT(8);
1424 if (!fmt->alpha_enable) {
1425 dst_format |= BIT(14);
1426 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
1427 }
1428 }
1429
1430 dst_format |= ((fmt->unpack_count - 1) << 12) |
1431 (fmt->unpack_tight << 17) |
1432 (fmt->unpack_align_msb << 18) |
1433 ((fmt->bpp - 1) << 9) |
1434 ((fmt->frame_format & 3) << 30);
1435
1436 if (sde_mdp_is_yuv_format(fmt))
1437 dst_format |= BIT(15);
1438
1439 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1440 dst_format |= BIT(21); /* PACK_DX_FORMAT */
1441
1442 /*
1443 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
1444 */
1445 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
1446
1447 /* DST_FORMAT */
1448 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
1449
1450 /* DST_OP_MODE */
1451 if (sde_mdp_is_ubwc_format(fmt))
1452 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
1453 else
1454 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1455
1456 /* DST_PACK_PATTERN */
1457 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1458 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1459 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
1460
1461 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
1462 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1463 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
1464 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
1465 (cfg->dst_plane.ystride[1] << 16));
1466 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
1467 (cfg->dst_plane.ystride[3] << 16));
1468
1469 /* setup WB out image size and ROI */
1470 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
1471 cfg->img_width | (cfg->img_height << 16));
1472 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
1473 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
1474 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
1475 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
1476
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001477 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1478 SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
Benjamin Chan15c93d82016-08-29 10:04:22 -04001479 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
1480 else
1481 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
1482
Alan Kwong9487de22016-01-16 22:06:36 -05001483 /*
1484 * setup Downscale factor
1485 */
1486 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
1487 cfg->v_downscale_factor |
1488 (cfg->h_downscale_factor << 16));
1489
Clarence Ip22fed4c2017-05-16 15:30:51 -04001490 /* partial write check */
Clarence Ip77cadd12017-06-19 17:51:46 -04001491 if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
1492 no_partial_writes = BIT(10);
1493
1494 /*
1495 * For simplicity, don't disable partial writes if
1496 * the ROI does not span the entire width of the
1497 * output image, and require the total stride to
1498 * also be properly aligned.
1499 *
1500 * This avoids having to determine the memory access
1501 * alignment of the actual horizontal ROI on a per
1502 * color format basis.
1503 */
1504 if (sde_mdp_is_ubwc_format(fmt)) {
1505 no_partial_writes = 0x0;
1506 } else if (cfg->dst_rect->x ||
1507 cfg->dst_rect->w != cfg->img_width) {
1508 no_partial_writes = 0x0;
1509 } else {
1510 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1511 if (cfg->dst_plane.ystride[i] &
1512 PARTIAL_WRITE_ALIGNMENT)
1513 no_partial_writes = 0x0;
1514 }
1515 }
Clarence Ip22fed4c2017-05-16 15:30:51 -04001516
Alan Kwong6bc64622017-02-04 17:36:03 -08001517 /* write config setup for bank configuration */
Clarence Ip77cadd12017-06-19 17:51:46 -04001518 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
Alan Kwong9487de22016-01-16 22:06:36 -05001519 (ctx->rot->highest_bank & 0x3) << 8);
1520
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001521 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1522 SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
1523 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1524 ((ctx->rot->highest_bank & 0x3) << 4) |
1525 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1526
Alan Kwong6bc64622017-02-04 17:36:03 -08001527 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
1528 SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
1529 ctx->sys_cache_mode);
1530
1531 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
1532 (flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
Alan Kwong9487de22016-01-16 22:06:36 -05001533
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001534 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1535
1536 /* CDP register WR setting */
1537 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1538 mdata->enable_cdp[SDE_ROT_WR] : false;
1539 cdp_params.fmt = fmt;
1540 cdp_params.offset = ROT_WB_CDP_CNTL;
1541 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1542
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001543 /* QOS LUT/ Danger LUT/ Safe LUT WR setting */
1544 sde_hw_rotator_setup_qos_lut_wr(ctx);
1545
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001546 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1547
Alan Kwong498d59f2017-02-11 18:56:34 -08001548 /* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
Alan Kwong1513d3d2017-08-27 21:20:01 -04001549 if (ctx->is_traffic_shaping || cfg->prefill_bw) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001550 u32 bw;
1551
1552 /*
1553 * Target to finish in 12ms, and we need to set number of bytes
1554 * per clock tick for traffic shaping.
1555 * Each clock tick run @ 19.2MHz, so we need we know total of
1556 * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
1557 * Finally, calcualte the byte count per clock tick based on
1558 * resolution, bpp and compression ratio.
1559 */
1560 bw = cfg->dst_rect->w * cfg->dst_rect->h;
1561
1562 if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
1563 bw = (bw * 3) / 2;
1564 else
1565 bw *= fmt->bpp;
1566
1567 bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
Alan Kwong498d59f2017-02-11 18:56:34 -08001568
1569 /* use prefill bandwidth instead if specified */
1570 if (cfg->prefill_bw)
1571 bw = DIV_ROUND_UP(cfg->prefill_bw,
1572 TRAFFIC_SHAPE_VSYNC_CLK);
1573
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001574 if (bw > 0xFF)
1575 bw = 0xFF;
Benjamin Chanf2f3e632017-07-12 10:21:39 -04001576 else if (bw == 0)
1577 bw = 1;
1578
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001579 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
Alan Kwong498d59f2017-02-11 18:56:34 -08001580 BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001581 SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
1582 } else {
1583 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
1584 SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
1585 }
1586
Alan Kwong9487de22016-01-16 22:06:36 -05001587 /* Update command queue write ptr */
1588 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1589}
1590
1591/*
1592 * sde_hw_rotator_start_no_regdma - start non-regdma operation
1593 * @ctx: Pointer to rotator context
1594 * @queue_id: Priority queue identifier
1595 */
1596static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
1597 enum sde_rot_queue_prio queue_id)
1598{
1599 struct sde_hw_rotator *rot = ctx->rot;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001600 char __iomem *wrptr;
1601 char __iomem *mem_rdptr;
1602 char __iomem *addr;
Alan Kwong9487de22016-01-16 22:06:36 -05001603 u32 mask;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001604 u32 cmd0, cmd1, cmd2;
Alan Kwong9487de22016-01-16 22:06:36 -05001605 u32 blksize;
1606
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001607 /*
1608 * when regdma is not using, the regdma segment is just a normal
1609 * DRAM, and not an iomem.
1610 */
1611 mem_rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05001612 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1613
1614 if (rot->irq_num >= 0) {
1615 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
1616 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
1617 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001618 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001619 }
1620
Alan Kwong6bc64622017-02-04 17:36:03 -08001621 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
Alan Kwong9487de22016-01-16 22:06:36 -05001622
1623 /* Update command queue write ptr */
1624 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1625
1626 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
1627 /* Write all command stream to Rotator blocks */
1628 /* Rotator will start right away after command stream finish writing */
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001629 while (mem_rdptr < wrptr) {
1630 u32 op = REGDMA_OP_MASK & readl_relaxed_no_log(mem_rdptr);
Alan Kwong9487de22016-01-16 22:06:36 -05001631
1632 switch (op) {
1633 case REGDMA_OP_NOP:
1634 SDEROT_DBG("NOP\n");
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001635 mem_rdptr += sizeof(u32);
Alan Kwong9487de22016-01-16 22:06:36 -05001636 break;
1637 case REGDMA_OP_REGWRITE:
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001638 SDE_REGDMA_READ(mem_rdptr, cmd0);
1639 SDE_REGDMA_READ(mem_rdptr, cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001640 SDEROT_DBG("REGW %6.6x %8.8x\n",
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001641 cmd0 & REGDMA_ADDR_OFFSET_MASK,
1642 cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001643 addr = rot->mdss_base +
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001644 (cmd0 & REGDMA_ADDR_OFFSET_MASK);
1645 writel_relaxed(cmd1, addr);
Alan Kwong9487de22016-01-16 22:06:36 -05001646 break;
1647 case REGDMA_OP_REGMODIFY:
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001648 SDE_REGDMA_READ(mem_rdptr, cmd0);
1649 SDE_REGDMA_READ(mem_rdptr, cmd1);
1650 SDE_REGDMA_READ(mem_rdptr, cmd2);
Alan Kwong9487de22016-01-16 22:06:36 -05001651 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001652 cmd0 & REGDMA_ADDR_OFFSET_MASK,
1653 cmd1, cmd2);
Alan Kwong9487de22016-01-16 22:06:36 -05001654 addr = rot->mdss_base +
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001655 (cmd0 & REGDMA_ADDR_OFFSET_MASK);
1656 mask = cmd1;
1657 writel_relaxed((readl_relaxed(addr) & mask) | cmd2,
Alan Kwong9487de22016-01-16 22:06:36 -05001658 addr);
1659 break;
1660 case REGDMA_OP_BLKWRITE_SINGLE:
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001661 SDE_REGDMA_READ(mem_rdptr, cmd0);
1662 SDE_REGDMA_READ(mem_rdptr, cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001663 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001664 cmd0 & REGDMA_ADDR_OFFSET_MASK,
1665 cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001666 addr = rot->mdss_base +
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001667 (cmd0 & REGDMA_ADDR_OFFSET_MASK);
1668 blksize = cmd1;
Alan Kwong9487de22016-01-16 22:06:36 -05001669 while (blksize--) {
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001670 SDE_REGDMA_READ(mem_rdptr, cmd0);
1671 SDEROT_DBG("DATA %8.8x\n", cmd0);
1672 writel_relaxed(cmd0, addr);
Alan Kwong9487de22016-01-16 22:06:36 -05001673 }
1674 break;
1675 case REGDMA_OP_BLKWRITE_INC:
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001676 SDE_REGDMA_READ(mem_rdptr, cmd0);
1677 SDE_REGDMA_READ(mem_rdptr, cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001678 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001679 cmd0 & REGDMA_ADDR_OFFSET_MASK,
1680 cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001681 addr = rot->mdss_base +
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001682 (cmd0 & REGDMA_ADDR_OFFSET_MASK);
1683 blksize = cmd1;
Alan Kwong9487de22016-01-16 22:06:36 -05001684 while (blksize--) {
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001685 SDE_REGDMA_READ(mem_rdptr, cmd0);
1686 SDEROT_DBG("DATA %8.8x\n", cmd0);
1687 writel_relaxed(cmd0, addr);
Alan Kwong9487de22016-01-16 22:06:36 -05001688 addr += 4;
1689 }
1690 break;
1691 default:
1692 /* Other not supported OP mode
1693 * Skip data for now for unregonized OP mode
1694 */
1695 SDEROT_DBG("UNDEFINED\n");
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001696 mem_rdptr += sizeof(u32);
Alan Kwong9487de22016-01-16 22:06:36 -05001697 break;
1698 }
1699 }
1700 SDEROT_DBG("END %d\n", ctx->timestamp);
1701
1702 return ctx->timestamp;
1703}
1704
1705/*
1706 * sde_hw_rotator_start_regdma - start regdma operation
1707 * @ctx: Pointer to rotator context
1708 * @queue_id: Priority queue identifier
1709 */
1710static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
1711 enum sde_rot_queue_prio queue_id)
1712{
1713 struct sde_hw_rotator *rot = ctx->rot;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001714 char __iomem *wrptr;
Alan Kwong9487de22016-01-16 22:06:36 -05001715 u32 regdmaSlot;
1716 u32 offset;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001717 u32 length;
1718 u32 ts_length;
Alan Kwong9487de22016-01-16 22:06:36 -05001719 u32 enableInt;
1720 u32 swts = 0;
1721 u32 mask = 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08001722 u32 trig_sel;
Alan Kwong9487de22016-01-16 22:06:36 -05001723
1724 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1725
Alan Kwong9487de22016-01-16 22:06:36 -05001726 /*
1727 * Last ROT command must be ROT_START before REGDMA start
1728 */
Alan Kwong6bc64622017-02-04 17:36:03 -08001729 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
1730
Alan Kwong9487de22016-01-16 22:06:36 -05001731 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1732
1733 /*
1734 * Start REGDMA with command offset and size
1735 */
1736 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001737 length = (wrptr - ctx->regdma_base) / 4;
1738 offset = (ctx->regdma_base - (rot->mdss_base +
1739 REGDMA_RAM_REGDMA_CMD_RAM)) / sizeof(u32);
Alan Kwong9487de22016-01-16 22:06:36 -05001740 enableInt = ((ctx->timestamp & 1) + 1) << 30;
Alan Kwong6bc64622017-02-04 17:36:03 -08001741 trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
1742 REGDMA_CMD_TRIG_SEL_SW_START;
Alan Kwong9487de22016-01-16 22:06:36 -05001743
1744 SDEROT_DBG(
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001745 "regdma(%d)[%d] <== INT:0x%X|length:%d|offset:0x%X, ts:%X\n",
Alan Kwong9487de22016-01-16 22:06:36 -05001746 queue_id, regdmaSlot, enableInt, length, offset,
1747 ctx->timestamp);
1748
1749 /* ensure the command packet is issued before the submit command */
1750 wmb();
1751
1752 /* REGDMA submission for current context */
1753 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1754 SDE_ROTREG_WRITE(rot->mdss_base,
1755 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001756 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1757 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001758 swts = ctx->timestamp;
1759 mask = ~SDE_REGDMA_SWTS_MASK;
1760 } else {
1761 SDE_ROTREG_WRITE(rot->mdss_base,
1762 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001763 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1764 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001765 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
1766 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
1767 }
1768
Alan Kwong6bc64622017-02-04 17:36:03 -08001769 /* timestamp update can only be used in offline multi-context mode */
1770 if (!ctx->sbuf_mode) {
1771 /* Write timestamp after previous rotator job finished */
1772 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
1773 offset += length;
1774 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001775 ts_length /= sizeof(u32);
Alan Kwong6bc64622017-02-04 17:36:03 -08001776 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
Alan Kwong9487de22016-01-16 22:06:36 -05001777
Alan Kwong6bc64622017-02-04 17:36:03 -08001778 /* ensure command packet is issue before the submit command */
1779 wmb();
Alan Kwong9487de22016-01-16 22:06:36 -05001780
Alan Kwong6bc64622017-02-04 17:36:03 -08001781 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1782 SDE_ROTREG_WRITE(rot->mdss_base,
1783 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
1784 enableInt | (ts_length << 14) | offset);
1785 } else {
1786 SDE_ROTREG_WRITE(rot->mdss_base,
1787 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
1788 enableInt | (ts_length << 14) | offset);
1789 }
Alan Kwong9487de22016-01-16 22:06:36 -05001790 }
1791
Alan Kwong9487de22016-01-16 22:06:36 -05001792 /* Update command queue write ptr */
1793 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1794
1795 return ctx->timestamp;
1796}
1797
1798/*
1799 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
1800 * @ctx: Pointer to rotator context
1801 * @queue_id: Priority queue identifier
1802 * @flags: Option flag
1803 */
1804static u32 sde_hw_rotator_wait_done_no_regdma(
1805 struct sde_hw_rotator_context *ctx,
1806 enum sde_rot_queue_prio queue_id, u32 flag)
1807{
1808 struct sde_hw_rotator *rot = ctx->rot;
1809 int rc = 0;
1810 u32 sts = 0;
1811 u32 status;
1812 unsigned long flags;
1813
1814 if (rot->irq_num >= 0) {
1815 SDEROT_DBG("Wait for Rotator completion\n");
1816 rc = wait_for_completion_timeout(&ctx->rot_comp,
Alan Kwong6bc64622017-02-04 17:36:03 -08001817 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001818
1819 spin_lock_irqsave(&rot->rotisr_lock, flags);
1820 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1821 if (rc == 0) {
1822 /*
1823 * Timeout, there might be error,
1824 * or rotator still busy
1825 */
1826 if (status & ROT_BUSY_BIT)
1827 SDEROT_ERR(
1828 "Timeout waiting for rotator done\n");
1829 else if (status & ROT_ERROR_BIT)
1830 SDEROT_ERR(
1831 "Rotator report error status\n");
1832 else
1833 SDEROT_WARN(
1834 "Timeout waiting, but rotator job is done!!\n");
1835
Alan Kwong818b7fc2016-07-24 22:07:41 -04001836 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001837 }
1838 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1839 } else {
1840 int cnt = 200;
1841
1842 do {
1843 udelay(500);
1844 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1845 cnt--;
1846 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
1847 && ((status & ROT_ERROR_BIT) == 0));
1848
1849 if (status & ROT_ERROR_BIT)
1850 SDEROT_ERR("Rotator error\n");
1851 else if (status & ROT_BUSY_BIT)
1852 SDEROT_ERR("Rotator busy\n");
1853
1854 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1855 ROT_DONE_CLEAR);
1856 }
1857
1858 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1859
1860 return sts;
1861}
1862
1863/*
1864 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
1865 * @ctx: Pointer to rotator context
1866 * @queue_id: Priority queue identifier
1867 * @flags: Option flag
1868 */
1869static u32 sde_hw_rotator_wait_done_regdma(
1870 struct sde_hw_rotator_context *ctx,
1871 enum sde_rot_queue_prio queue_id, u32 flag)
1872{
1873 struct sde_hw_rotator *rot = ctx->rot;
1874 int rc = 0;
1875 u32 status;
1876 u32 last_isr;
1877 u32 last_ts;
1878 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001879 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -05001880 u32 sts = 0;
Benjamin Chan2f6fc402017-09-27 16:15:33 -04001881 u32 ubwcerr = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001882 unsigned long flags;
1883
1884 if (rot->irq_num >= 0) {
1885 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
1886 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001887 rc = wait_event_timeout(ctx->regdma_waitq,
1888 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Alan Kwong6bc64622017-02-04 17:36:03 -08001889 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001890
Benjamin Chane7ca72e2016-12-22 18:42:34 -05001891 ATRACE_INT("sde_rot_done", 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001892 spin_lock_irqsave(&rot->rotisr_lock, flags);
1893
1894 last_isr = ctx->last_regdma_isr_status;
1895 last_ts = ctx->last_regdma_timestamp;
1896 status = last_isr & REGDMA_INT_MASK;
1897 int_id = last_ts & 1;
1898 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
1899 status, int_id, last_ts);
1900
1901 if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
Alan Kwong818b7fc2016-07-24 22:07:41 -04001902 bool pending;
1903
1904 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001905 SDEROT_ERR(
Alan Kwong818b7fc2016-07-24 22:07:41 -04001906 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
1907 ctx->timestamp, swts, pending);
Alan Kwong9487de22016-01-16 22:06:36 -05001908
1909 if (status & REGDMA_WATCHDOG_INT)
1910 SDEROT_ERR("REGDMA watchdog interrupt\n");
1911 else if (status & REGDMA_INVALID_DESCRIPTOR)
1912 SDEROT_ERR("REGDMA invalid descriptor\n");
1913 else if (status & REGDMA_INCOMPLETE_CMD)
1914 SDEROT_ERR("REGDMA incomplete command\n");
1915 else if (status & REGDMA_INVALID_CMD)
1916 SDEROT_ERR("REGDMA invalid command\n");
1917
Benjamin Chan2f6fc402017-09-27 16:15:33 -04001918 sde_hw_rotator_dump_status(rot, &ubwcerr);
1919
1920 if (ubwcerr) {
1921 /*
1922 * Perform recovery for ROT SSPP UBWC decode
1923 * error.
1924 * - SW reset rotator hw block
1925 * - reset TS logic so all pending rotation
1926 * in hw queue got done signalled
1927 */
1928 spin_unlock_irqrestore(&rot->rotisr_lock,
1929 flags);
1930 if (!sde_hw_rotator_reset(rot, ctx))
1931 status = REGDMA_INCOMPLETE_CMD;
1932 else
1933 status = ROT_ERROR_BIT;
1934 spin_lock_irqsave(&rot->rotisr_lock, flags);
1935 } else {
1936 status = ROT_ERROR_BIT;
1937 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04001938 } else {
1939 if (rc == 1)
1940 SDEROT_WARN(
1941 "REGDMA done but no irq, ts:0x%X/0x%X\n",
1942 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001943 status = 0;
1944 }
1945
Alan Kwong9487de22016-01-16 22:06:36 -05001946 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1947 } else {
1948 int cnt = 200;
Alan Kwongb0679602016-11-27 17:04:13 -08001949 bool pending;
Alan Kwong9487de22016-01-16 22:06:36 -05001950
1951 do {
1952 udelay(500);
Alan Kwongb0679602016-11-27 17:04:13 -08001953 last_isr = SDE_ROTREG_READ(rot->mdss_base,
1954 REGDMA_CSR_REGDMA_INT_STATUS);
1955 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001956 cnt--;
Alan Kwongb0679602016-11-27 17:04:13 -08001957 } while ((cnt > 0) && pending &&
1958 ((last_isr & REGDMA_INT_ERR_MASK) == 0));
Alan Kwong9487de22016-01-16 22:06:36 -05001959
Alan Kwongb0679602016-11-27 17:04:13 -08001960 if (last_isr & REGDMA_INT_ERR_MASK) {
1961 SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
1962 ctx->timestamp, swts, last_isr);
Benjamin Chan2f6fc402017-09-27 16:15:33 -04001963 sde_hw_rotator_dump_status(rot, NULL);
Alan Kwongb0679602016-11-27 17:04:13 -08001964 status = ROT_ERROR_BIT;
1965 } else if (pending) {
1966 SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
1967 ctx->timestamp, swts, last_isr);
Benjamin Chan2f6fc402017-09-27 16:15:33 -04001968 sde_hw_rotator_dump_status(rot, NULL);
Alan Kwongb0679602016-11-27 17:04:13 -08001969 status = ROT_ERROR_BIT;
1970 } else {
1971 status = 0;
1972 }
Alan Kwong9487de22016-01-16 22:06:36 -05001973
1974 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
Alan Kwongb0679602016-11-27 17:04:13 -08001975 last_isr);
Alan Kwong9487de22016-01-16 22:06:36 -05001976 }
1977
Benjamin Chan2f6fc402017-09-27 16:15:33 -04001978 sts = (status & (ROT_ERROR_BIT | REGDMA_INCOMPLETE_CMD)) ? -ENODEV : 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001979
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001980 if (status & ROT_ERROR_BIT)
Benjamin Chan2d6411a2017-03-28 18:01:53 -04001981 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
1982 "vbif_dbg_bus", "panic");
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04001983
Alan Kwong9487de22016-01-16 22:06:36 -05001984 return sts;
1985}
1986
1987/*
1988 * setup_rotator_ops - setup callback functions for the low-level HAL
1989 * @ops: Pointer to low-level ops callback
1990 * @mode: Operation mode (non-regdma or regdma)
1991 */
1992static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
1993 enum sde_rotator_regdma_mode mode)
1994{
1995 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
1996 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
1997 if (mode == ROT_REGDMA_ON) {
1998 ops->start_rotator = sde_hw_rotator_start_regdma;
1999 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
2000 } else {
2001 ops->start_rotator = sde_hw_rotator_start_no_regdma;
2002 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
2003 }
2004}
2005
2006/*
2007 * sde_hw_rotator_swts_create - create software timestamp buffer
2008 * @rot: Pointer to rotator hw
2009 *
2010 * This buffer is used by regdma to keep track of last completed command.
2011 */
2012static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
2013{
2014 int rc = 0;
2015 struct ion_handle *handle;
2016 struct sde_mdp_img_data *data;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002017 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002018 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
2019
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002020 rot->iclient = mdata->iclient;
Alan Kwong9487de22016-01-16 22:06:36 -05002021
2022 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
2023 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
2024 if (IS_ERR_OR_NULL(handle)) {
2025 SDEROT_ERR("ion memory allocation failed\n");
2026 return -ENOMEM;
2027 }
2028
2029 data = &rot->swts_buf;
2030 data->len = bufsize;
2031 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
2032 if (IS_ERR(data->srcp_dma_buf)) {
2033 SDEROT_ERR("ion_dma_buf setup failed\n");
2034 rc = -ENOMEM;
2035 goto imap_err;
2036 }
2037
2038 sde_smmu_ctrl(1);
2039
2040 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
2041 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
2042 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
2043 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
2044 rc = -ENOMEM;
2045 goto err_put;
2046 }
2047
2048 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
2049 DMA_BIDIRECTIONAL);
2050 if (IS_ERR_OR_NULL(data->srcp_table)) {
2051 SDEROT_ERR("dma_buf_map_attachment error\n");
2052 rc = -ENOMEM;
2053 goto err_detach;
2054 }
2055
2056 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
2057 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
2058 &data->len, DMA_BIDIRECTIONAL);
Alan Kwong6ce448d2016-11-24 18:45:20 -08002059 if (rc < 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05002060 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
2061 goto err_unmap;
2062 }
2063
Alan Kwong6ce448d2016-11-24 18:45:20 -08002064 dma_buf_begin_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05002065 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
2066 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
2067 SDEROT_ERR("ion kernel memory mapping failed\n");
2068 rc = IS_ERR(rot->swts_buffer);
2069 goto kmap_err;
2070 }
2071
2072 data->mapped = true;
2073 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
2074 data->len, rot->swts_buffer);
2075
2076 ion_free(rot->iclient, handle);
2077
2078 sde_smmu_ctrl(0);
2079
2080 return rc;
2081kmap_err:
2082 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
2083 DMA_FROM_DEVICE, data->srcp_dma_buf);
2084err_unmap:
2085 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
2086 DMA_FROM_DEVICE);
2087err_detach:
2088 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
2089err_put:
2090 dma_buf_put(data->srcp_dma_buf);
2091 data->srcp_dma_buf = NULL;
2092imap_err:
2093 ion_free(rot->iclient, handle);
2094
2095 return rc;
2096}
2097
2098/*
2099 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
2100 * @rot: Pointer to rotator hw
2101 */
2102static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
2103{
2104 struct sde_mdp_img_data *data;
2105
2106 data = &rot->swts_buf;
2107
Alan Kwong6ce448d2016-11-24 18:45:20 -08002108 dma_buf_end_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05002109 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
2110
2111 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
2112 DMA_FROM_DEVICE, data->srcp_dma_buf);
2113 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
2114 DMA_FROM_DEVICE);
2115 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
2116 dma_buf_put(data->srcp_dma_buf);
2117 data->srcp_dma_buf = NULL;
2118}
2119
2120/*
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002121 * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
2122 * PM event occurs
2123 * @mgr: Pointer to rotator manager
2124 * @pmon: Boolean indicate an on/off power event
2125 */
2126void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
2127{
2128 struct sde_hw_rotator *rot;
2129 u32 l_ts, h_ts, swts, hwts;
2130 u32 rotsts, regdmasts;
2131
2132 /*
2133 * Check last HW timestamp with SW timestamp before power off event.
2134 * If there is a mismatch, that will be quite possible the rotator HW
2135 * is either hang or not finishing last submitted job. In that case,
2136 * it is best to do a timeout eventlog to capture some good events
2137 * log data for analysis.
2138 */
2139 if (!pmon && mgr && mgr->hw_data) {
2140 rot = mgr->hw_data;
2141 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2142 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2143
2144 /* contruct the combined timstamp */
2145 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2146 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2147 SDE_REGDMA_SWTS_SHIFT);
2148
2149 /* Need to turn on clock to access rotator register */
2150 sde_rotator_clk_ctrl(mgr, true);
2151 hwts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2152 regdmasts = SDE_ROTREG_READ(rot->mdss_base,
2153 REGDMA_CSR_REGDMA_BLOCK_STATUS);
2154 rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
2155
2156 SDEROT_DBG(
2157 "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
2158 swts, hwts, regdmasts, rotsts);
2159 SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
2160
2161 if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
2162 (rotsts & ROT_STATUS_MASK))) {
2163 SDEROT_ERR(
2164 "Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
2165 swts, hwts, regdmasts, rotsts);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002166 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
2167 "vbif_dbg_bus", "panic");
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002168 }
2169
2170 /* Turn off rotator clock after checking rotator registers */
2171 sde_rotator_clk_ctrl(mgr, false);
2172 }
2173}
2174
2175/*
2176 * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
2177 * PM event occurs
2178 * @mgr: Pointer to rotator manager
2179 * @pmon: Boolean indicate an on/off power event
2180 */
2181void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
2182{
2183 struct sde_hw_rotator *rot;
2184 u32 l_ts, h_ts, swts;
2185
2186 /*
2187 * After a power on event, the rotator HW is reset to default setting.
2188 * It is necessary to synchronize the SW timestamp with the HW.
2189 */
2190 if (pmon && mgr && mgr->hw_data) {
2191 rot = mgr->hw_data;
2192 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2193 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2194
2195 /* contruct the combined timstamp */
2196 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2197 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2198 SDE_REGDMA_SWTS_SHIFT);
2199
2200 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2201 swts, h_ts, l_ts);
2202 SDEROT_EVTLOG(swts, h_ts, l_ts);
2203 rot->reset_hw_ts = true;
2204 rot->last_hw_ts = swts;
2205 }
2206}
2207
2208/*
Alan Kwong9487de22016-01-16 22:06:36 -05002209 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
2210 * @mgr: Pointer to rotator manager
2211 */
2212static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
2213{
2214 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2215 struct sde_hw_rotator *rot;
2216
2217 if (!mgr || !mgr->pdev || !mgr->hw_data) {
2218 SDEROT_ERR("null parameters\n");
2219 return;
2220 }
2221
2222 rot = mgr->hw_data;
2223 if (rot->irq_num >= 0)
2224 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2225
2226 if (rot->mode == ROT_REGDMA_ON)
2227 sde_hw_rotator_swtc_destroy(rot);
2228
2229 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2230 mgr->hw_data = NULL;
2231}
2232
2233/*
2234 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
2235 * @mgr: Pointer to rotator manager
2236 * @pipe_id: pipe identifier (not used)
2237 * @wb_id: writeback identifier/priority queue identifier
2238 *
2239 * This function allocates a new hw rotator resource for the given priority.
2240 */
2241static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
2242 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
2243{
2244 struct sde_hw_rotator_resource_info *resinfo;
2245
2246 if (!mgr || !mgr->hw_data) {
2247 SDEROT_ERR("null parameters\n");
2248 return NULL;
2249 }
2250
2251 /*
2252 * Allocate rotator resource info. Each allocation is per
2253 * HW priority queue
2254 */
2255 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
2256 if (!resinfo) {
2257 SDEROT_ERR("Failed allocation HW rotator resource info\n");
2258 return NULL;
2259 }
2260
2261 resinfo->rot = mgr->hw_data;
2262 resinfo->hw.wb_id = wb_id;
2263 atomic_set(&resinfo->hw.num_active, 0);
2264 init_waitqueue_head(&resinfo->hw.wait_queue);
2265
2266 /* For non-regdma, only support one active session */
2267 if (resinfo->rot->mode == ROT_REGDMA_OFF)
2268 resinfo->hw.max_active = 1;
2269 else {
2270 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
2271
2272 if (resinfo->rot->iclient == NULL)
2273 sde_hw_rotator_swts_create(resinfo->rot);
2274 }
2275
Alan Kwongf987ea32016-07-06 12:11:44 -04002276 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002277 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002278
Alan Kwong9487de22016-01-16 22:06:36 -05002279 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
2280 resinfo, wb_id);
2281
2282 return &resinfo->hw;
2283}
2284
2285/*
2286 * sde_hw_rotator_free_ext - free the given rotator resource
2287 * @mgr: Pointer to rotator manager
2288 * @hw: Pointer to rotator resource
2289 */
2290static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
2291 struct sde_rot_hw_resource *hw)
2292{
2293 struct sde_hw_rotator_resource_info *resinfo;
2294
2295 if (!mgr || !mgr->hw_data)
2296 return;
2297
2298 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2299
2300 SDEROT_DBG(
2301 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
2302 resinfo, hw->wb_id, atomic_read(&hw->num_active),
2303 hw->pending_count);
2304
Alan Kwongf987ea32016-07-06 12:11:44 -04002305 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002306 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002307
Alan Kwong9487de22016-01-16 22:06:36 -05002308 devm_kfree(&mgr->pdev->dev, resinfo);
2309}
2310
2311/*
2312 * sde_hw_rotator_alloc_rotctx - allocate rotator context
2313 * @rot: Pointer to rotator hw
2314 * @hw: Pointer to rotator resource
2315 * @session_id: Session identifier of this context
Clarence Ip9e6c3302017-06-02 11:02:57 -04002316 * @sequence_id: Sequence identifier of this request
Alan Kwong6bc64622017-02-04 17:36:03 -08002317 * @sbuf_mode: true if stream buffer is requested
Alan Kwong9487de22016-01-16 22:06:36 -05002318 *
2319 * This function allocates a new rotator context for the given session id.
2320 */
2321static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
2322 struct sde_hw_rotator *rot,
2323 struct sde_rot_hw_resource *hw,
Alan Kwong6bc64622017-02-04 17:36:03 -08002324 u32 session_id,
Clarence Ip9e6c3302017-06-02 11:02:57 -04002325 u32 sequence_id,
Alan Kwong6bc64622017-02-04 17:36:03 -08002326 bool sbuf_mode)
Alan Kwong9487de22016-01-16 22:06:36 -05002327{
2328 struct sde_hw_rotator_context *ctx;
2329
2330 /* Allocate rotator context */
2331 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
2332 if (!ctx) {
2333 SDEROT_ERR("Failed allocation HW rotator context\n");
2334 return NULL;
2335 }
2336
2337 ctx->rot = rot;
2338 ctx->q_id = hw->wb_id;
2339 ctx->session_id = session_id;
Clarence Ip9e6c3302017-06-02 11:02:57 -04002340 ctx->sequence_id = sequence_id;
Alan Kwong9487de22016-01-16 22:06:36 -05002341 ctx->hwres = hw;
2342 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
2343 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
2344 ctx->is_secure = false;
Alan Kwong6bc64622017-02-04 17:36:03 -08002345 ctx->sbuf_mode = sbuf_mode;
2346 INIT_LIST_HEAD(&ctx->list);
Alan Kwong9487de22016-01-16 22:06:36 -05002347
2348 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
2349 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
2350 ctx->regdma_wrptr = ctx->regdma_base;
2351 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
2352 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
2353 sde_hw_rotator_get_regdma_ctxidx(ctx));
2354
Alan Kwong818b7fc2016-07-24 22:07:41 -04002355 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
2356
Alan Kwong9487de22016-01-16 22:06:36 -05002357 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002358 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002359
2360 /* Store rotator context for lookup purpose */
2361 sde_hw_rotator_put_ctx(ctx);
2362
2363 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002364 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002365 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2366 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002367 atomic_read(&ctx->hwres->num_active),
2368 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002369
2370 return ctx;
2371}
2372
2373/*
2374 * sde_hw_rotator_free_rotctx - free the given rotator context
2375 * @rot: Pointer to rotator hw
2376 * @ctx: Pointer to rotator context
2377 */
2378static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
2379 struct sde_hw_rotator_context *ctx)
2380{
2381 if (!rot || !ctx)
2382 return;
2383
2384 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002385 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002386 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2387 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002388 atomic_read(&ctx->hwres->num_active),
2389 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002390
Benjamin Chanc3e185f2016-11-08 21:48:21 -05002391 /* Clear rotator context from lookup purpose */
2392 sde_hw_rotator_clr_ctx(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05002393
2394 devm_kfree(&rot->pdev->dev, ctx);
2395}
2396
2397/*
2398 * sde_hw_rotator_config - configure hw for the given rotation entry
2399 * @hw: Pointer to rotator resource
2400 * @entry: Pointer to rotation entry
2401 *
2402 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
2403 * based on the given rotation entry.
2404 */
2405static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
2406 struct sde_rot_entry *entry)
2407{
2408 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2409 struct sde_hw_rotator *rot;
2410 struct sde_hw_rotator_resource_info *resinfo;
2411 struct sde_hw_rotator_context *ctx;
2412 struct sde_hw_rot_sspp_cfg sspp_cfg;
2413 struct sde_hw_rot_wb_cfg wb_cfg;
2414 u32 danger_lut = 0; /* applicable for realtime client only */
2415 u32 safe_lut = 0; /* applicable for realtime client only */
2416 u32 flags = 0;
Benjamin Chana9dd3052017-02-14 17:39:32 -05002417 u32 rststs = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05002418 struct sde_rotation_item *item;
Alan Kwong6bc64622017-02-04 17:36:03 -08002419 int ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002420
2421 if (!hw || !entry) {
2422 SDEROT_ERR("null hw resource/entry\n");
2423 return -EINVAL;
2424 }
2425
2426 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2427 rot = resinfo->rot;
2428 item = &entry->item;
2429
Alan Kwong6bc64622017-02-04 17:36:03 -08002430 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
Clarence Ip9e6c3302017-06-02 11:02:57 -04002431 item->sequence_id, item->output.sbuf);
Alan Kwong9487de22016-01-16 22:06:36 -05002432 if (!ctx) {
2433 SDEROT_ERR("Failed allocating rotator context!!\n");
2434 return -EINVAL;
2435 }
2436
Alan Kwong6bc64622017-02-04 17:36:03 -08002437 /* save entry for debugging purposes */
2438 ctx->last_entry = entry;
2439
2440 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
2441 if (entry->dst_buf.sbuf) {
2442 u32 op_mode;
2443
2444 if (entry->item.trigger ==
2445 SDE_ROTATOR_TRIGGER_COMMAND)
2446 ctx->start_ctrl = (rot->cmd_trigger << 4);
2447 else if (entry->item.trigger ==
2448 SDE_ROTATOR_TRIGGER_VIDEO)
2449 ctx->start_ctrl = (rot->vid_trigger << 4);
2450 else
2451 ctx->start_ctrl = 0;
2452
2453 ctx->sys_cache_mode = BIT(15) |
2454 ((item->output.scid & 0x1f) << 8) |
2455 (item->output.writeback ? 0x5 : 0);
2456
2457 ctx->op_mode = BIT(4) |
2458 ((ctx->rot->sbuf_headroom & 0xff) << 8);
2459
2460 /* detect transition to inline mode */
2461 op_mode = (SDE_ROTREG_READ(rot->mdss_base,
2462 ROTTOP_OP_MODE) >> 4) & 0x3;
2463 if (!op_mode) {
2464 u32 status;
2465
2466 status = SDE_ROTREG_READ(rot->mdss_base,
2467 ROTTOP_STATUS);
2468 if (status & BIT(0)) {
2469 SDEROT_ERR("rotator busy 0x%x\n",
2470 status);
Benjamin Chan2f6fc402017-09-27 16:15:33 -04002471 sde_hw_rotator_dump_status(rot, NULL);
Alan Kwong6bc64622017-02-04 17:36:03 -08002472 SDEROT_EVTLOG_TOUT_HANDLER("rot",
2473 "vbif_dbg_bus",
2474 "panic");
2475 }
2476 }
2477
2478 } else {
2479 ctx->start_ctrl = BIT(0);
2480 ctx->sys_cache_mode = 0;
2481 ctx->op_mode = 0;
2482 }
2483 } else {
2484 ctx->start_ctrl = BIT(0);
2485 }
2486
2487 SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
2488
Benjamin Chana9dd3052017-02-14 17:39:32 -05002489 /*
2490 * if Rotator HW is reset, but missing PM event notification, we
2491 * need to init the SW timestamp automatically.
2492 */
2493 rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
2494 if (!rot->reset_hw_ts && rststs) {
2495 u32 l_ts, h_ts, swts;
2496
2497 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2498 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2499 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2500 SDEROT_EVTLOG(0xbad0, rststs, swts, h_ts, l_ts);
2501
2502 if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY)
2503 h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
2504 else
2505 l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
2506
2507 /* construct the combined timstamp */
2508 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2509 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2510 SDE_REGDMA_SWTS_SHIFT);
2511
2512 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2513 swts, h_ts, l_ts);
2514 SDEROT_EVTLOG(0x900d, swts, h_ts, l_ts);
2515 rot->last_hw_ts = swts;
2516
2517 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2518 rot->last_hw_ts);
2519 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
2520 /* ensure write is issued to the rotator HW */
2521 wmb();
2522 }
2523
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002524 if (rot->reset_hw_ts) {
2525 SDEROT_EVTLOG(rot->last_hw_ts);
2526 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2527 rot->last_hw_ts);
Benjamin Chana9dd3052017-02-14 17:39:32 -05002528 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002529 /* ensure write is issued to the rotator HW */
2530 wmb();
2531 rot->reset_hw_ts = false;
2532 }
2533
Alan Kwong9487de22016-01-16 22:06:36 -05002534 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
2535 SDE_ROT_FLAG_FLIP_LR : 0;
2536 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
2537 SDE_ROT_FLAG_FLIP_UD : 0;
2538 flags |= (item->flags & SDE_ROTATION_90) ?
2539 SDE_ROT_FLAG_ROT_90 : 0;
2540 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
2541 SDE_ROT_FLAG_DEINTERLACE : 0;
2542 flags |= (item->flags & SDE_ROTATION_SECURE) ?
2543 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002544 flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
2545 SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
2546
Alan Kwong9487de22016-01-16 22:06:36 -05002547
2548 sspp_cfg.img_width = item->input.width;
2549 sspp_cfg.img_height = item->input.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002550 sspp_cfg.fps = entry->perf->config.frame_rate;
2551 sspp_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002552 sspp_cfg.fmt = sde_get_format_params(item->input.format);
2553 if (!sspp_cfg.fmt) {
2554 SDEROT_ERR("null format\n");
Alan Kwong6bc64622017-02-04 17:36:03 -08002555 ret = -EINVAL;
2556 goto error;
Alan Kwong9487de22016-01-16 22:06:36 -05002557 }
2558 sspp_cfg.src_rect = &item->src_rect;
2559 sspp_cfg.data = &entry->src_buf;
2560 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
2561 item->input.height, &sspp_cfg.src_plane,
2562 0, /* No bwc_mode */
2563 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
2564 true : false);
2565
2566 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002567 &sspp_cfg, danger_lut, safe_lut,
2568 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05002569
2570 wb_cfg.img_width = item->output.width;
2571 wb_cfg.img_height = item->output.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002572 wb_cfg.fps = entry->perf->config.frame_rate;
2573 wb_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002574 wb_cfg.fmt = sde_get_format_params(item->output.format);
2575 wb_cfg.dst_rect = &item->dst_rect;
2576 wb_cfg.data = &entry->dst_buf;
2577 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
2578 item->output.height, &wb_cfg.dst_plane,
2579 0, /* No bwc_mode */
2580 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
2581
2582 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
2583 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
Alan Kwong498d59f2017-02-11 18:56:34 -08002584 wb_cfg.prefill_bw = item->prefill_bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002585
2586 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
2587
2588 /* setup VA mapping for debugfs */
2589 if (rot->dbgmem) {
2590 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
2591 &item->input,
2592 &entry->src_buf);
2593
2594 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
2595 &item->output,
2596 &entry->dst_buf);
2597 }
2598
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002599 SDEROT_EVTLOG(ctx->timestamp, flags,
2600 item->input.width, item->input.height,
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002601 item->output.width, item->output.height,
Benjamin Chan59a06052017-01-12 18:06:03 -05002602 entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
Benjamin Chan1b94f952017-01-23 17:42:30 -05002603 item->input.format, item->output.format,
2604 entry->perf->config.frame_rate);
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002605
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002606 if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002607 struct sde_mdp_set_ot_params ot_params;
2608
2609 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2610 ot_params.xin_id = XIN_SSPP;
2611 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002612 ot_params.width = entry->perf->config.input.width;
2613 ot_params.height = entry->perf->config.input.height;
2614 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002615 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
2616 ot_params.reg_off_mdp_clk_ctrl =
2617 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2618 ot_params.bit_off_mdp_clk_ctrl =
2619 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002620 ot_params.fmt = ctx->is_traffic_shaping ?
2621 SDE_PIX_FMT_ABGR_8888 :
2622 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002623 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2624 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002625 sde_mdp_set_ot_limit(&ot_params);
2626 }
2627
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002628 if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002629 struct sde_mdp_set_ot_params ot_params;
2630
2631 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2632 ot_params.xin_id = XIN_WRITEBACK;
2633 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002634 ot_params.width = entry->perf->config.input.width;
2635 ot_params.height = entry->perf->config.input.height;
2636 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002637 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
2638 ot_params.reg_off_mdp_clk_ctrl =
2639 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2640 ot_params.bit_off_mdp_clk_ctrl =
2641 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002642 ot_params.fmt = ctx->is_traffic_shaping ?
2643 SDE_PIX_FMT_ABGR_8888 :
2644 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002645 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2646 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002647 sde_mdp_set_ot_limit(&ot_params);
2648 }
2649
2650 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
2651 u32 qos_lut = 0; /* low priority for nrt read client */
2652
2653 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
2654 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
2655
2656 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
2657 }
2658
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -07002659 /* VBIF QoS and other settings */
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002660 if (!ctx->sbuf_mode)
2661 sde_hw_rotator_vbif_setting(rot);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002662
Alan Kwong9487de22016-01-16 22:06:36 -05002663 return 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08002664
2665error:
2666 sde_hw_rotator_free_rotctx(rot, ctx);
2667 return ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002668}
2669
2670/*
Clarence Ip3ce07c02017-08-11 16:21:45 -04002671 * sde_hw_rotator_cancel - cancel hw configuration for the given rotation entry
2672 * @hw: Pointer to rotator resource
2673 * @entry: Pointer to rotation entry
2674 *
2675 * This function cancels a previously configured rotation entry.
2676 */
2677static int sde_hw_rotator_cancel(struct sde_rot_hw_resource *hw,
2678 struct sde_rot_entry *entry)
2679{
2680 struct sde_hw_rotator *rot;
2681 struct sde_hw_rotator_resource_info *resinfo;
2682 struct sde_hw_rotator_context *ctx;
2683 unsigned long flags;
2684
2685 if (!hw || !entry) {
2686 SDEROT_ERR("null hw resource/entry\n");
2687 return -EINVAL;
2688 }
2689
2690 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2691 rot = resinfo->rot;
2692
2693 /* Lookup rotator context from session-id */
2694 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2695 entry->item.sequence_id, hw->wb_id);
2696 if (!ctx) {
2697 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2698 entry->item.session_id);
2699 return -EINVAL;
2700 }
2701
2702 spin_lock_irqsave(&rot->rotisr_lock, flags);
2703 sde_hw_rotator_update_swts(rot, ctx, ctx->timestamp);
2704 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
2705
2706 SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
2707
2708 if (rot->dbgmem) {
2709 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
2710 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
2711 }
2712
2713 /* Current rotator context job is finished, time to free up */
2714 sde_hw_rotator_free_rotctx(rot, ctx);
2715
2716 return 0;
2717}
2718
2719/*
Alan Kwong9487de22016-01-16 22:06:36 -05002720 * sde_hw_rotator_kickoff - kickoff processing on the given entry
2721 * @hw: Pointer to rotator resource
2722 * @entry: Pointer to rotation entry
2723 */
2724static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
2725 struct sde_rot_entry *entry)
2726{
2727 struct sde_hw_rotator *rot;
2728 struct sde_hw_rotator_resource_info *resinfo;
2729 struct sde_hw_rotator_context *ctx;
Alan Kwong9487de22016-01-16 22:06:36 -05002730
2731 if (!hw || !entry) {
2732 SDEROT_ERR("null hw resource/entry\n");
2733 return -EINVAL;
2734 }
2735
2736 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2737 rot = resinfo->rot;
2738
2739 /* Lookup rotator context from session-id */
Clarence Ip9e6c3302017-06-02 11:02:57 -04002740 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2741 entry->item.sequence_id, hw->wb_id);
Alan Kwong9487de22016-01-16 22:06:36 -05002742 if (!ctx) {
2743 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2744 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002745 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002746 }
Alan Kwong9487de22016-01-16 22:06:36 -05002747
Alan Kwong9487de22016-01-16 22:06:36 -05002748 rot->ops.start_rotator(ctx, ctx->q_id);
2749
2750 return 0;
2751}
2752
2753/*
2754 * sde_hw_rotator_wait4done - wait for completion notification
2755 * @hw: Pointer to rotator resource
2756 * @entry: Pointer to rotation entry
2757 *
2758 * This function blocks until the given entry is complete, error
2759 * is detected, or timeout.
2760 */
2761static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
2762 struct sde_rot_entry *entry)
2763{
2764 struct sde_hw_rotator *rot;
2765 struct sde_hw_rotator_resource_info *resinfo;
2766 struct sde_hw_rotator_context *ctx;
2767 int ret;
2768
2769 if (!hw || !entry) {
2770 SDEROT_ERR("null hw resource/entry\n");
2771 return -EINVAL;
2772 }
2773
2774 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2775 rot = resinfo->rot;
2776
2777 /* Lookup rotator context from session-id */
Clarence Ip9e6c3302017-06-02 11:02:57 -04002778 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2779 entry->item.sequence_id, hw->wb_id);
Alan Kwong9487de22016-01-16 22:06:36 -05002780 if (!ctx) {
2781 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2782 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002783 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002784 }
Alan Kwong9487de22016-01-16 22:06:36 -05002785
2786 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
2787
Alan Kwong9487de22016-01-16 22:06:36 -05002788 if (rot->dbgmem) {
2789 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
2790 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
2791 }
2792
2793 /* Current rotator context job is finished, time to free up*/
2794 sde_hw_rotator_free_rotctx(rot, ctx);
2795
2796 return ret;
2797}
2798
2799/*
2800 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
2801 * @rot: Pointer to hw rotator
2802 *
2803 * This function initializes feature and/or capability bitmask based on
2804 * h/w version read from the device.
2805 */
2806static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
2807{
2808 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2809 u32 hw_version;
2810
2811 if (!mdata) {
2812 SDEROT_ERR("null rotator data\n");
2813 return -EINVAL;
2814 }
2815
2816 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
2817 SDEROT_DBG("hw version %8.8x\n", hw_version);
2818
2819 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
2820 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
Alan Kwong9487de22016-01-16 22:06:36 -05002821 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
2822 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
2823 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
2824
2825 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
2826
Alan Kwong6bc64622017-02-04 17:36:03 -08002827 /* features exposed via rotator top h/w version */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002828 if (hw_version != SDE_ROT_TYPE_V1_0) {
2829 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
2830 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
2831 }
2832
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002833 set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
2834
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002835 mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
2836 mdata->nrt_vbif_dbg_bus_size =
2837 ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
2838
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002839 mdata->rot_dbg_bus = rot_dbgbus_r3;
2840 mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
2841
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002842 mdata->regdump = sde_rot_r3_regdump;
2843 mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002844 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
Alan Kwong6bc64622017-02-04 17:36:03 -08002845
2846 /* features exposed via mdss h/w version */
Jayant Shekhar292e0a22017-09-12 15:23:24 +05302847 if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400) ||
2848 IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
2849 SDE_MDP_HW_REV_410)) {
Alan Kwong6bc64622017-02-04 17:36:03 -08002850 SDEROT_DBG("Supporting sys cache inline rotation\n");
2851 set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
Alan Kwongfb8eeb22017-02-06 15:00:03 -08002852 set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
Clarence Ip22fed4c2017-05-16 15:30:51 -04002853 set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
Alan Kwong4b416162017-08-11 21:03:10 -04002854 rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
2855 sde_hw_rotator_v4_inpixfmts;
2856 rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
2857 ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
2858 rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
2859 sde_hw_rotator_v4_outpixfmts;
2860 rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
2861 ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
2862 rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
2863 sde_hw_rotator_v4_inpixfmts_sbuf;
2864 rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
2865 ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
2866 rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
2867 sde_hw_rotator_v4_outpixfmts_sbuf;
2868 rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
2869 ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
Alan Kwong6bc64622017-02-04 17:36:03 -08002870 rot->downscale_caps =
2871 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
2872 } else {
Alan Kwong4b416162017-08-11 21:03:10 -04002873 rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
2874 sde_hw_rotator_v3_inpixfmts;
2875 rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
2876 ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
2877 rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
2878 sde_hw_rotator_v3_outpixfmts;
2879 rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
2880 ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
Alan Kwong6bc64622017-02-04 17:36:03 -08002881 rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
2882 "LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
2883 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
2884 }
2885
Alan Kwong9487de22016-01-16 22:06:36 -05002886 return 0;
2887}
2888
2889/*
2890 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
2891 * @irq: Interrupt number
2892 * @ptr: Pointer to private handle provided during registration
2893 *
2894 * This function services rotator interrupt and wakes up waiting client
2895 * with pending rotation requests already submitted to h/w.
2896 */
2897static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
2898{
2899 struct sde_hw_rotator *rot = ptr;
2900 struct sde_hw_rotator_context *ctx;
2901 irqreturn_t ret = IRQ_NONE;
2902 u32 isr;
2903
2904 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
2905
2906 SDEROT_DBG("intr_status = %8.8x\n", isr);
2907
2908 if (isr & ROT_DONE_MASK) {
2909 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002910 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05002911 SDEROT_DBG("Notify rotator complete\n");
2912
2913 /* Normal rotator only 1 session, no need to lookup */
2914 ctx = rot->rotCtx[0][0];
2915 WARN_ON(ctx == NULL);
2916 complete_all(&ctx->rot_comp);
2917
2918 spin_lock(&rot->rotisr_lock);
2919 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
2920 ROT_DONE_CLEAR);
2921 spin_unlock(&rot->rotisr_lock);
2922 ret = IRQ_HANDLED;
2923 }
2924
2925 return ret;
2926}
2927
2928/*
2929 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
2930 * @irq: Interrupt number
2931 * @ptr: Pointer to private handle provided during registration
2932 *
2933 * This function services rotator interrupt, decoding the source of
2934 * events (high/low priority queue), and wakes up all waiting clients
2935 * with pending rotation requests already submitted to h/w.
2936 */
2937static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
2938{
2939 struct sde_hw_rotator *rot = ptr;
Clarence Ip96854c2db12017-06-12 14:32:26 -04002940 struct sde_hw_rotator_context *ctx, *tmp;
Alan Kwong9487de22016-01-16 22:06:36 -05002941 irqreturn_t ret = IRQ_NONE;
Clarence Ip96854c2db12017-06-12 14:32:26 -04002942 u32 isr, isr_tmp;
Alan Kwong9487de22016-01-16 22:06:36 -05002943 u32 ts;
2944 u32 q_id;
2945
2946 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002947 /* acknowledge interrupt before reading latest timestamp */
2948 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05002949 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2950
2951 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
2952
2953 /* Any REGDMA status, including error and watchdog timer, should
2954 * trigger and wake up waiting thread
2955 */
2956 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
2957 spin_lock(&rot->rotisr_lock);
2958
2959 /*
2960 * Obtain rotator context based on timestamp from regdma
2961 * and low/high interrupt status
2962 */
2963 if (isr & REGDMA_INT_HIGH_MASK) {
2964 q_id = ROT_QUEUE_HIGH_PRIORITY;
2965 ts = ts & SDE_REGDMA_SWTS_MASK;
2966 } else if (isr & REGDMA_INT_LOW_MASK) {
2967 q_id = ROT_QUEUE_LOW_PRIORITY;
2968 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
2969 SDE_REGDMA_SWTS_MASK;
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002970 } else {
2971 SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
2972 goto done_isr_handle;
Alan Kwong9487de22016-01-16 22:06:36 -05002973 }
Alan Kwong6bc64622017-02-04 17:36:03 -08002974
2975 /*
2976 * Timestamp packet is not available in sbuf mode.
2977 * Simulate timestamp update in the handler instead.
2978 */
Clarence Ip96854c2db12017-06-12 14:32:26 -04002979 if (list_empty(&rot->sbuf_ctx[q_id]))
2980 goto skip_sbuf;
2981
2982 ctx = NULL;
2983 isr_tmp = isr;
2984 list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) {
2985 u32 mask;
2986
2987 mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK :
2988 REGDMA_INT_0_MASK;
2989 if (isr_tmp & mask) {
2990 isr_tmp &= ~mask;
2991 ctx = tmp;
Alan Kwong6bc64622017-02-04 17:36:03 -08002992 ts = ctx->timestamp;
2993 sde_hw_rotator_update_swts(rot, ctx, ts);
2994 SDEROT_DBG("update swts:0x%X\n", ts);
Alan Kwong6bc64622017-02-04 17:36:03 -08002995 }
Clarence Ip96854c2db12017-06-12 14:32:26 -04002996 SDEROT_EVTLOG(isr, tmp->timestamp);
Alan Kwong6bc64622017-02-04 17:36:03 -08002997 }
Clarence Ip96854c2db12017-06-12 14:32:26 -04002998 if (ctx == NULL)
2999 SDEROT_ERR("invalid swts ctx\n");
3000skip_sbuf:
Alan Kwong9487de22016-01-16 22:06:36 -05003001 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05003002
3003 /*
3004 * Wake up all waiting context from the current and previous
3005 * SW Timestamp.
3006 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04003007 while (ctx &&
3008 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05003009 ctx->last_regdma_isr_status = isr;
3010 ctx->last_regdma_timestamp = ts;
3011 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04003012 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04003013 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05003014
3015 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
3016 ctx = rot->rotCtx[q_id]
3017 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04003018 };
Alan Kwong9487de22016-01-16 22:06:36 -05003019
Benjamin Chan62b94ed2016-08-18 23:55:21 -04003020done_isr_handle:
Alan Kwong9487de22016-01-16 22:06:36 -05003021 spin_unlock(&rot->rotisr_lock);
3022 ret = IRQ_HANDLED;
3023 } else if (isr & REGDMA_INT_ERR_MASK) {
3024 /*
3025 * For REGDMA Err, we save the isr info and wake up
3026 * all waiting contexts
3027 */
3028 int i, j;
3029
3030 SDEROT_ERR(
3031 "regdma err isr:%X, wake up all waiting contexts\n",
3032 isr);
3033
3034 spin_lock(&rot->rotisr_lock);
3035
3036 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3037 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
3038 ctx = rot->rotCtx[i][j];
3039 if (ctx && ctx->last_regdma_isr_status == 0) {
3040 ctx->last_regdma_isr_status = isr;
3041 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04003042 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05003043 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
3044 i, j, ctx);
3045 }
3046 }
3047 }
3048
Alan Kwong9487de22016-01-16 22:06:36 -05003049 spin_unlock(&rot->rotisr_lock);
3050 ret = IRQ_HANDLED;
3051 }
3052
3053 return ret;
3054}
3055
3056/*
3057 * sde_hw_rotator_validate_entry - validate rotation entry
3058 * @mgr: Pointer to rotator manager
3059 * @entry: Pointer to rotation entry
3060 *
3061 * This function validates the given rotation entry and provides possible
3062 * fixup (future improvement) if available. This function returns 0 if
3063 * the entry is valid, and returns error code otherwise.
3064 */
3065static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
3066 struct sde_rot_entry *entry)
3067{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04003068 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwongb6c049c2017-03-31 12:50:27 -07003069 struct sde_hw_rotator *hw_data;
Alan Kwong9487de22016-01-16 22:06:36 -05003070 int ret = 0;
3071 u16 src_w, src_h, dst_w, dst_h;
3072 struct sde_rotation_item *item = &entry->item;
3073 struct sde_mdp_format_params *fmt;
3074
Alan Kwongb6c049c2017-03-31 12:50:27 -07003075 if (!mgr || !entry || !mgr->hw_data) {
3076 SDEROT_ERR("invalid parameters\n");
3077 return -EINVAL;
3078 }
3079
3080 hw_data = mgr->hw_data;
3081
3082 if (hw_data->maxlinewidth < item->src_rect.w) {
3083 SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
3084 return -EINVAL;
3085 }
3086
Alan Kwong9487de22016-01-16 22:06:36 -05003087 src_w = item->src_rect.w;
3088 src_h = item->src_rect.h;
3089
3090 if (item->flags & SDE_ROTATION_90) {
3091 dst_w = item->dst_rect.h;
3092 dst_h = item->dst_rect.w;
3093 } else {
3094 dst_w = item->dst_rect.w;
3095 dst_h = item->dst_rect.h;
3096 }
3097
3098 entry->dnsc_factor_w = 0;
3099 entry->dnsc_factor_h = 0;
3100
Alan Kwong6bc64622017-02-04 17:36:03 -08003101 if (item->output.sbuf &&
3102 !test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
3103 SDEROT_ERR("stream buffer not supported\n");
3104 return -EINVAL;
3105 }
3106
Alan Kwong9487de22016-01-16 22:06:36 -05003107 if ((src_w != dst_w) || (src_h != dst_h)) {
Clarence Ip4db1ea82017-05-01 12:18:55 -07003108 if (!dst_w || !dst_h) {
3109 SDEROT_DBG("zero output width/height not support\n");
3110 ret = -EINVAL;
3111 goto dnsc_err;
3112 }
Alan Kwong9487de22016-01-16 22:06:36 -05003113 if ((src_w % dst_w) || (src_h % dst_h)) {
3114 SDEROT_DBG("non integral scale not support\n");
3115 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04003116 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05003117 }
3118 entry->dnsc_factor_w = src_w / dst_w;
3119 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
3120 (entry->dnsc_factor_w > 64)) {
3121 SDEROT_DBG("non power-of-2 w_scale not support\n");
3122 ret = -EINVAL;
3123 goto dnsc_err;
3124 }
3125 entry->dnsc_factor_h = src_h / dst_h;
3126 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
3127 (entry->dnsc_factor_h > 64)) {
3128 SDEROT_DBG("non power-of-2 h_scale not support\n");
3129 ret = -EINVAL;
3130 goto dnsc_err;
3131 }
3132 }
3133
Benjamin Chan0e96afd2017-01-17 16:49:12 -05003134 fmt = sde_get_format_params(item->output.format);
Benjamin Chan886ff672016-11-07 15:23:17 -05003135 /*
3136 * Rotator downscale support max 4 times for UBWC format and
3137 * max 2 times for TP10/TP10_UBWC format
3138 */
3139 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
3140 SDEROT_DBG("max downscale for UBWC format is 4\n");
Alan Kwong9487de22016-01-16 22:06:36 -05003141 ret = -EINVAL;
3142 goto dnsc_err;
3143 }
Benjamin Chan886ff672016-11-07 15:23:17 -05003144 if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
3145 SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
Alan Kwong9487de22016-01-16 22:06:36 -05003146 ret = -EINVAL;
3147 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04003148 goto dnsc_err;
3149
3150dnsc_1p5_check:
3151 /* Check for 1.5 downscale that only applies to V2 HW */
3152 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
3153 entry->dnsc_factor_w = src_w / dst_w;
3154 if ((entry->dnsc_factor_w != 1) ||
3155 ((dst_w * 3) != (src_w * 2))) {
3156 SDEROT_DBG(
3157 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
3158 src_w, dst_w);
3159 ret = -EINVAL;
3160 goto dnsc_err;
3161 }
3162
3163 entry->dnsc_factor_h = src_h / dst_h;
3164 if ((entry->dnsc_factor_h != 1) ||
3165 ((dst_h * 3) != (src_h * 2))) {
3166 SDEROT_DBG(
3167 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
3168 src_h, dst_h);
3169 ret = -EINVAL;
3170 goto dnsc_err;
3171 }
3172 ret = 0;
3173 }
Alan Kwong9487de22016-01-16 22:06:36 -05003174
3175dnsc_err:
3176 /* Downscaler does not support asymmetrical dnsc */
3177 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
3178 SDEROT_DBG("asymmetric downscale not support\n");
3179 ret = -EINVAL;
3180 }
3181
3182 if (ret) {
3183 entry->dnsc_factor_w = 0;
3184 entry->dnsc_factor_h = 0;
3185 }
3186 return ret;
3187}
3188
3189/*
3190 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
3191 * @mgr: Pointer to rotator manager
3192 * @attr: Pointer to device attribute interface
3193 * @buf: Pointer to output buffer
3194 * @len: Length of output buffer
3195 */
3196static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
3197 struct device_attribute *attr, char *buf, ssize_t len)
3198{
3199 struct sde_hw_rotator *hw_data;
Benjamin Chan886ff672016-11-07 15:23:17 -05003200 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05003201 int cnt = 0;
3202
3203 if (!mgr || !buf)
3204 return 0;
3205
3206 hw_data = mgr->hw_data;
3207
3208#define SPRINT(fmt, ...) \
3209 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
3210
3211 /* insert capabilities here */
Benjamin Chan886ff672016-11-07 15:23:17 -05003212 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
3213 SPRINT("min_downscale=1.5\n");
3214 else
3215 SPRINT("min_downscale=2.0\n");
Alan Kwong9487de22016-01-16 22:06:36 -05003216
Benjamin Chan42db2c92016-11-22 22:50:01 -05003217 SPRINT("downscale_compression=1\n");
3218
Alan Kwong6bc64622017-02-04 17:36:03 -08003219 if (hw_data->downscale_caps)
3220 SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
3221
Alan Kwong9487de22016-01-16 22:06:36 -05003222#undef SPRINT
3223 return cnt;
3224}
3225
3226/*
3227 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
3228 * @mgr: Pointer to rotator manager
3229 * @attr: Pointer to device attribute interface
3230 * @buf: Pointer to output buffer
3231 * @len: Length of output buffer
3232 */
3233static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
3234 struct device_attribute *attr, char *buf, ssize_t len)
3235{
3236 struct sde_hw_rotator *rot;
3237 struct sde_hw_rotator_context *ctx;
3238 int cnt = 0;
3239 int num_active = 0;
3240 int i, j;
3241
3242 if (!mgr || !buf) {
3243 SDEROT_ERR("null parameters\n");
3244 return 0;
3245 }
3246
3247 rot = mgr->hw_data;
3248
3249#define SPRINT(fmt, ...) \
3250 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
3251
3252 if (rot) {
3253 SPRINT("rot_mode=%d\n", rot->mode);
3254 SPRINT("irq_num=%d\n", rot->irq_num);
3255
3256 if (rot->mode == ROT_REGDMA_OFF) {
3257 SPRINT("max_active=1\n");
3258 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
3259 } else {
3260 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3261 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
3262 j++) {
3263 ctx = rot->rotCtx[i][j];
3264
3265 if (ctx) {
3266 SPRINT(
3267 "rotCtx[%d][%d]:%p\n",
3268 i, j, ctx);
3269 ++num_active;
3270 }
3271 }
3272 }
3273
3274 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
3275 SPRINT("num_active=%d\n", num_active);
3276 }
3277 }
3278
3279#undef SPRINT
3280 return cnt;
3281}
3282
3283/*
Alan Kwongda16e442016-08-14 20:47:18 -04003284 * sde_hw_rotator_get_pixfmt - get the indexed pixel format
3285 * @mgr: Pointer to rotator manager
3286 * @index: index of pixel format
3287 * @input: true for input port; false for output port
Alan Kwong4b416162017-08-11 21:03:10 -04003288 * @mode: operating mode
Alan Kwongda16e442016-08-14 20:47:18 -04003289 */
3290static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
Alan Kwong4b416162017-08-11 21:03:10 -04003291 int index, bool input, u32 mode)
Alan Kwongda16e442016-08-14 20:47:18 -04003292{
Alan Kwong6bc64622017-02-04 17:36:03 -08003293 struct sde_hw_rotator *rot;
3294
3295 if (!mgr || !mgr->hw_data) {
3296 SDEROT_ERR("null parameters\n");
3297 return 0;
3298 }
3299
3300 rot = mgr->hw_data;
3301
Alan Kwong4b416162017-08-11 21:03:10 -04003302 if (mode >= SDE_ROTATOR_MODE_MAX) {
3303 SDEROT_ERR("invalid rotator mode %d\n", mode);
3304 return 0;
3305 }
3306
Alan Kwongda16e442016-08-14 20:47:18 -04003307 if (input) {
Alan Kwong4b416162017-08-11 21:03:10 -04003308 if ((index < rot->num_inpixfmt[mode]) && rot->inpixfmts[mode])
3309 return rot->inpixfmts[mode][index];
Alan Kwongda16e442016-08-14 20:47:18 -04003310 else
3311 return 0;
3312 } else {
Alan Kwong4b416162017-08-11 21:03:10 -04003313 if ((index < rot->num_outpixfmt[mode]) && rot->outpixfmts[mode])
3314 return rot->outpixfmts[mode][index];
Alan Kwongda16e442016-08-14 20:47:18 -04003315 else
3316 return 0;
3317 }
3318}
3319
3320/*
3321 * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
3322 * @mgr: Pointer to rotator manager
3323 * @pixfmt: pixel format to be verified
3324 * @input: true for input port; false for output port
Alan Kwong4b416162017-08-11 21:03:10 -04003325 * @mode: operating mode
Alan Kwongda16e442016-08-14 20:47:18 -04003326 */
3327static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
Alan Kwong4b416162017-08-11 21:03:10 -04003328 bool input, u32 mode)
Alan Kwongda16e442016-08-14 20:47:18 -04003329{
Alan Kwong6bc64622017-02-04 17:36:03 -08003330 struct sde_hw_rotator *rot;
Alan Kwong4b416162017-08-11 21:03:10 -04003331 const u32 *pixfmts;
Alan Kwong6bc64622017-02-04 17:36:03 -08003332 u32 num_pixfmt;
Alan Kwongda16e442016-08-14 20:47:18 -04003333 int i;
3334
Alan Kwong6bc64622017-02-04 17:36:03 -08003335 if (!mgr || !mgr->hw_data) {
3336 SDEROT_ERR("null parameters\n");
3337 return false;
Alan Kwongda16e442016-08-14 20:47:18 -04003338 }
3339
Alan Kwong6bc64622017-02-04 17:36:03 -08003340 rot = mgr->hw_data;
3341
Alan Kwong4b416162017-08-11 21:03:10 -04003342 if (mode >= SDE_ROTATOR_MODE_MAX) {
3343 SDEROT_ERR("invalid rotator mode %d\n", mode);
3344 return false;
3345 }
3346
Alan Kwong6bc64622017-02-04 17:36:03 -08003347 if (input) {
Alan Kwong4b416162017-08-11 21:03:10 -04003348 pixfmts = rot->inpixfmts[mode];
3349 num_pixfmt = rot->num_inpixfmt[mode];
Alan Kwong6bc64622017-02-04 17:36:03 -08003350 } else {
Alan Kwong4b416162017-08-11 21:03:10 -04003351 pixfmts = rot->outpixfmts[mode];
3352 num_pixfmt = rot->num_outpixfmt[mode];
Alan Kwong6bc64622017-02-04 17:36:03 -08003353 }
3354
3355 if (!pixfmts || !num_pixfmt) {
3356 SDEROT_ERR("invalid pixel format tables\n");
3357 return false;
3358 }
3359
3360 for (i = 0; i < num_pixfmt; i++)
3361 if (pixfmts[i] == pixfmt)
3362 return true;
3363
Alan Kwongda16e442016-08-14 20:47:18 -04003364 return false;
3365}
3366
3367/*
Alan Kwong6bc64622017-02-04 17:36:03 -08003368 * sde_hw_rotator_get_downscale_caps - get scaling capability string
3369 * @mgr: Pointer to rotator manager
3370 * @caps: Pointer to capability string buffer; NULL to return maximum length
3371 * @len: length of capability string buffer
3372 * return: length of capability string
3373 */
3374static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
3375 char *caps, int len)
3376{
3377 struct sde_hw_rotator *rot;
3378 int rc = 0;
3379
3380 if (!mgr || !mgr->hw_data) {
3381 SDEROT_ERR("null parameters\n");
3382 return -EINVAL;
3383 }
3384
3385 rot = mgr->hw_data;
3386
3387 if (rot->downscale_caps) {
3388 if (caps)
3389 rc = snprintf(caps, len, "%s", rot->downscale_caps);
3390 else
3391 rc = strlen(rot->downscale_caps);
3392 }
3393
3394 return rc;
3395}
3396
3397/*
Alan Kwongb6c049c2017-03-31 12:50:27 -07003398 * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
3399 * @mgr: Pointer to rotator manager
3400 * return: maximum line width supported by hardware
3401 */
3402static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
3403{
3404 struct sde_hw_rotator *rot;
3405
3406 if (!mgr || !mgr->hw_data) {
3407 SDEROT_ERR("null parameters\n");
3408 return -EINVAL;
3409 }
3410
3411 rot = mgr->hw_data;
3412
3413 return rot->maxlinewidth;
3414}
3415
3416/*
Alan Kwong9487de22016-01-16 22:06:36 -05003417 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
3418 * @hw_data: Pointer to rotator hw
3419 * @dev: Pointer to platform device
3420 */
3421static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
3422 struct platform_device *dev)
3423{
3424 int ret = 0;
3425 u32 data;
3426
3427 if (!hw_data || !dev)
3428 return -EINVAL;
3429
3430 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
3431 &data);
3432 if (ret) {
3433 SDEROT_DBG("default to regdma off\n");
3434 ret = 0;
3435 hw_data->mode = ROT_REGDMA_OFF;
3436 } else if (data < ROT_REGDMA_MAX) {
3437 SDEROT_DBG("set to regdma mode %d\n", data);
3438 hw_data->mode = data;
3439 } else {
3440 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
3441 hw_data->mode = ROT_REGDMA_OFF;
3442 }
3443
3444 ret = of_property_read_u32(dev->dev.of_node,
3445 "qcom,mdss-highest-bank-bit", &data);
3446 if (ret) {
3447 SDEROT_DBG("default to A5X bank\n");
3448 ret = 0;
3449 hw_data->highest_bank = 2;
3450 } else {
3451 SDEROT_DBG("set highest bank bit to %d\n", data);
3452 hw_data->highest_bank = data;
3453 }
3454
Alan Kwong6bc64622017-02-04 17:36:03 -08003455 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwongfb8eeb22017-02-06 15:00:03 -08003456 "qcom,sde-ubwc-malsize", &data);
3457 if (ret) {
3458 ret = 0;
3459 hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
3460 } else {
3461 SDEROT_DBG("set ubwc malsize to %d\n", data);
3462 hw_data->ubwc_malsize = data;
3463 }
3464
3465 ret = of_property_read_u32(dev->dev.of_node,
3466 "qcom,sde-ubwc_swizzle", &data);
3467 if (ret) {
3468 ret = 0;
3469 hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
3470 } else {
3471 SDEROT_DBG("set ubwc swizzle to %d\n", data);
3472 hw_data->ubwc_swizzle = data;
3473 }
3474
3475 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwong6bc64622017-02-04 17:36:03 -08003476 "qcom,mdss-sbuf-headroom", &data);
3477 if (ret) {
3478 ret = 0;
3479 hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
3480 } else {
3481 SDEROT_DBG("set sbuf headroom to %d\n", data);
3482 hw_data->sbuf_headroom = data;
3483 }
3484
Alan Kwongb6c049c2017-03-31 12:50:27 -07003485 ret = of_property_read_u32(dev->dev.of_node,
3486 "qcom,mdss-rot-linewidth", &data);
3487 if (ret) {
3488 ret = 0;
3489 hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
3490 } else {
3491 SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
3492 hw_data->maxlinewidth = data;
3493 }
3494
Alan Kwong9487de22016-01-16 22:06:36 -05003495 return ret;
3496}
3497
3498/*
3499 * sde_rotator_r3_init - initialize the r3 module
3500 * @mgr: Pointer to rotator manager
3501 *
3502 * This function setup r3 callback functions, parses r3 specific
3503 * device tree settings, installs r3 specific interrupt handler,
3504 * as well as initializes r3 internal data structure.
3505 */
3506int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
3507{
3508 struct sde_hw_rotator *rot;
3509 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
3510 int i;
3511 int ret;
3512
3513 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
3514 if (!rot)
3515 return -ENOMEM;
3516
3517 mgr->hw_data = rot;
3518 mgr->queue_count = ROT_QUEUE_MAX;
3519
3520 rot->mdss_base = mdata->sde_io.base;
3521 rot->pdev = mgr->pdev;
Alan Kwong6bc64622017-02-04 17:36:03 -08003522 rot->koff_timeout = KOFF_TIMEOUT;
3523 rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
3524 rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
Alan Kwong9487de22016-01-16 22:06:36 -05003525
3526 /* Assign ops */
3527 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
3528 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
3529 mgr->ops_hw_free = sde_hw_rotator_free_ext;
3530 mgr->ops_config_hw = sde_hw_rotator_config;
Clarence Ip3ce07c02017-08-11 16:21:45 -04003531 mgr->ops_cancel_hw = sde_hw_rotator_cancel;
Alan Kwong9487de22016-01-16 22:06:36 -05003532 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
3533 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
3534 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
3535 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
3536 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
3537 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
Alan Kwongda16e442016-08-14 20:47:18 -04003538 mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
3539 mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04003540 mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
3541 mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
Alan Kwong6bc64622017-02-04 17:36:03 -08003542 mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
Alan Kwongb6c049c2017-03-31 12:50:27 -07003543 mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
Alan Kwong9487de22016-01-16 22:06:36 -05003544
3545 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
3546 if (ret)
3547 goto error_parse_dt;
3548
3549 rot->irq_num = platform_get_irq(mgr->pdev, 0);
Lloyd Atkinson073635b2017-08-17 17:03:59 -04003550 if (rot->irq_num == -EPROBE_DEFER) {
3551 SDEROT_INFO("irq master master not ready, defer probe\n");
3552 return -EPROBE_DEFER;
3553 } else if (rot->irq_num < 0) {
3554 SDEROT_ERR("fail to get rotator irq, fallback to polling\n");
Alan Kwong9487de22016-01-16 22:06:36 -05003555 } else {
3556 if (rot->mode == ROT_REGDMA_OFF)
3557 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3558 rot->irq_num,
3559 sde_hw_rotator_rotirq_handler,
3560 NULL, 0, "sde_rotator_r3", rot);
3561 else
3562 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3563 rot->irq_num,
3564 sde_hw_rotator_regdmairq_handler,
3565 NULL, 0, "sde_rotator_r3", rot);
3566 if (ret) {
3567 SDEROT_ERR("fail to request irq r:%d\n", ret);
3568 rot->irq_num = -1;
3569 } else {
3570 disable_irq(rot->irq_num);
3571 }
3572 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04003573 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05003574
3575 setup_rotator_ops(&rot->ops, rot->mode);
3576
3577 spin_lock_init(&rot->rotctx_lock);
3578 spin_lock_init(&rot->rotisr_lock);
3579
3580 /* REGDMA initialization */
3581 if (rot->mode == ROT_REGDMA_OFF) {
3582 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
Benjamin Chanda32f8b2017-09-20 17:11:27 -04003583 rot->cmd_wr_ptr[0][i] = (char __iomem *)(
3584 &rot->cmd_queue[
3585 SDE_HW_ROT_REGDMA_SEG_SIZE * i]);
Alan Kwong9487de22016-01-16 22:06:36 -05003586 } else {
3587 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3588 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
Benjamin Chanda32f8b2017-09-20 17:11:27 -04003589 rot->mdss_base +
Alan Kwong9487de22016-01-16 22:06:36 -05003590 REGDMA_RAM_REGDMA_CMD_RAM +
Benjamin Chanda32f8b2017-09-20 17:11:27 -04003591 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i;
Alan Kwong9487de22016-01-16 22:06:36 -05003592
3593 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3594 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
Benjamin Chanda32f8b2017-09-20 17:11:27 -04003595 rot->mdss_base +
Alan Kwong9487de22016-01-16 22:06:36 -05003596 REGDMA_RAM_REGDMA_CMD_RAM +
3597 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
Benjamin Chanda32f8b2017-09-20 17:11:27 -04003598 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX);
Alan Kwong9487de22016-01-16 22:06:36 -05003599 }
3600
Alan Kwong6bc64622017-02-04 17:36:03 -08003601 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3602 atomic_set(&rot->timestamp[i], 0);
3603 INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
3604 }
Alan Kwong9487de22016-01-16 22:06:36 -05003605
3606 ret = sde_rotator_hw_rev_init(rot);
3607 if (ret)
3608 goto error_hw_rev_init;
3609
Alan Kwong315cd772016-08-03 22:29:42 -04003610 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
Clarence Ip77c053d2017-04-24 19:26:37 -07003611 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003612 CLKFLAG_NORETAIN_MEM);
Clarence Ip77c053d2017-04-24 19:26:37 -07003613 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003614 CLKFLAG_NORETAIN_PERIPH);
3615
Benjamin Chan53e3bce2016-08-31 14:43:29 -04003616 mdata->sde_rot_hw = rot;
Alan Kwong9487de22016-01-16 22:06:36 -05003617 return 0;
3618error_hw_rev_init:
3619 if (rot->irq_num >= 0)
3620 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
3621 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
3622error_parse_dt:
3623 return ret;
3624}