blob: c94830a0f8c611f525c04b9d68c2f386857220f1 [file] [log] [blame]
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Alan Kwong9487de22016-01-16 22:06:36 -05002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
Alan Kwong4b416162017-08-11 21:03:10 -040014#define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
Alan Kwong9487de22016-01-16 22:06:36 -050015
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/file.h>
Alan Kwong9487de22016-01-16 22:06:36 -050020#include <linux/delay.h>
21#include <linux/debugfs.h>
22#include <linux/interrupt.h>
23#include <linux/dma-mapping.h>
24#include <linux/dma-buf.h>
25#include <linux/msm_ion.h>
Alan Kwong6ce448d2016-11-24 18:45:20 -080026#include <linux/clk.h>
27#include <linux/clk/qcom.h>
Alan Kwong9487de22016-01-16 22:06:36 -050028
29#include "sde_rotator_core.h"
30#include "sde_rotator_util.h"
31#include "sde_rotator_smmu.h"
32#include "sde_rotator_r3.h"
33#include "sde_rotator_r3_internal.h"
34#include "sde_rotator_r3_hwio.h"
35#include "sde_rotator_r3_debug.h"
36#include "sde_rotator_trace.h"
Benjamin Chan53e3bce2016-08-31 14:43:29 -040037#include "sde_rotator_debug.h"
Alan Kwong9487de22016-01-16 22:06:36 -050038
Benjamin Chan99eb63b2016-12-21 15:45:26 -050039#define RES_UHD (3840*2160)
Benjamin Chan2f6fc402017-09-27 16:15:33 -040040#define MS_TO_US(t) ((t) * USEC_PER_MSEC)
Benjamin Chan99eb63b2016-12-21 15:45:26 -050041
42/* traffic shaping clock ticks = finish_time x 19.2MHz */
43#define TRAFFIC_SHAPE_CLKTICK_14MS 268800
44#define TRAFFIC_SHAPE_CLKTICK_12MS 230400
Alan Kwong498d59f2017-02-11 18:56:34 -080045#define TRAFFIC_SHAPE_VSYNC_CLK 19200000
Benjamin Chan99eb63b2016-12-21 15:45:26 -050046
Alan Kwong9487de22016-01-16 22:06:36 -050047/* XIN mapping */
48#define XIN_SSPP 0
49#define XIN_WRITEBACK 1
50
51/* wait for at most 2 vsync for lowest refresh rate (24hz) */
Benjamin Chan2f6fc402017-09-27 16:15:33 -040052#define KOFF_TIMEOUT (42 * 8)
Alan Kwong6bc64622017-02-04 17:36:03 -080053
Clarence Ip19339b32017-10-14 20:59:00 -040054/*
55 * When in sbuf mode, select a much longer wait, to allow the other driver
56 * to detect timeouts and abort if necessary.
57 */
Clarence Ip2d7d24f2017-11-03 14:37:51 -040058#define KOFF_TIMEOUT_SBUF (10000)
Clarence Ip19339b32017-10-14 20:59:00 -040059
Alan Kwong6bc64622017-02-04 17:36:03 -080060/* default stream buffer headroom in lines */
61#define DEFAULT_SBUF_HEADROOM 20
Clarence Ip37e013c2017-05-04 12:23:13 -070062#define DEFAULT_UBWC_MALSIZE 0
63#define DEFAULT_UBWC_SWIZZLE 0
Alan Kwong9487de22016-01-16 22:06:36 -050064
Alan Kwongb6c049c2017-03-31 12:50:27 -070065#define DEFAULT_MAXLINEWIDTH 4096
66
Clarence Ip77cadd12017-06-19 17:51:46 -040067/* stride alignment requirement for avoiding partial writes */
68#define PARTIAL_WRITE_ALIGNMENT 0x1F
69
Alan Kwong9487de22016-01-16 22:06:36 -050070/* Macro for constructing the REGDMA command */
71#define SDE_REGDMA_WRITE(p, off, data) \
72 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080073 SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
74 (u32)(data));\
Benjamin Chanda32f8b2017-09-20 17:11:27 -040075 writel_relaxed_no_log( \
76 (REGDMA_OP_REGWRITE | \
77 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
78 p); \
79 p += sizeof(u32); \
80 writel_relaxed_no_log(data, p); \
81 p += sizeof(u32); \
Alan Kwong9487de22016-01-16 22:06:36 -050082 } while (0)
83
84#define SDE_REGDMA_MODIFY(p, off, mask, data) \
85 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -080086 SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
87 (u32)(data));\
Benjamin Chanda32f8b2017-09-20 17:11:27 -040088 writel_relaxed_no_log( \
89 (REGDMA_OP_REGMODIFY | \
90 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
91 p); \
92 p += sizeof(u32); \
93 writel_relaxed_no_log(mask, p); \
94 p += sizeof(u32); \
95 writel_relaxed_no_log(data, p); \
96 p += sizeof(u32); \
Alan Kwong9487de22016-01-16 22:06:36 -050097 } while (0)
98
99#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
100 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -0800101 SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
102 (u32)(len));\
Benjamin Chanda32f8b2017-09-20 17:11:27 -0400103 writel_relaxed_no_log( \
104 (REGDMA_OP_BLKWRITE_INC | \
105 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
106 p); \
107 p += sizeof(u32); \
108 writel_relaxed_no_log(len, p); \
109 p += sizeof(u32); \
Alan Kwong9487de22016-01-16 22:06:36 -0500110 } while (0)
111
112#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
113 do { \
Alan Kwong6bc64622017-02-04 17:36:03 -0800114 SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
Benjamin Chanda32f8b2017-09-20 17:11:27 -0400115 writel_relaxed_no_log(data, p); \
116 p += sizeof(u32); \
117 } while (0)
118
119#define SDE_REGDMA_READ(p, data) \
120 do { \
121 data = readl_relaxed_no_log(p); \
122 p += sizeof(u32); \
Alan Kwong9487de22016-01-16 22:06:36 -0500123 } while (0)
124
125/* Macro for directly accessing mapped registers */
126#define SDE_ROTREG_WRITE(base, off, data) \
Alan Kwong6bc64622017-02-04 17:36:03 -0800127 do { \
128 SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
129 , (u32)(data));\
130 writel_relaxed(data, (base + (off))); \
131 } while (0)
Alan Kwong9487de22016-01-16 22:06:36 -0500132
133#define SDE_ROTREG_READ(base, off) \
134 readl_relaxed(base + (off))
135
Alan Kwong94e0f602017-11-07 23:01:44 -0500136#define SDE_ROTTOP_IN_OFFLINE_MODE(_rottop_op_mode_) \
137 (((_rottop_op_mode_) & ROTTOP_OP_MODE_ROT_OUT_MASK) == 0)
138
Alan Kwong4b416162017-08-11 21:03:10 -0400139static const u32 sde_hw_rotator_v3_inpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400140 SDE_PIX_FMT_XRGB_8888,
141 SDE_PIX_FMT_ARGB_8888,
142 SDE_PIX_FMT_ABGR_8888,
143 SDE_PIX_FMT_RGBA_8888,
144 SDE_PIX_FMT_BGRA_8888,
145 SDE_PIX_FMT_RGBX_8888,
146 SDE_PIX_FMT_BGRX_8888,
147 SDE_PIX_FMT_XBGR_8888,
148 SDE_PIX_FMT_RGBA_5551,
149 SDE_PIX_FMT_ARGB_1555,
150 SDE_PIX_FMT_ABGR_1555,
151 SDE_PIX_FMT_BGRA_5551,
152 SDE_PIX_FMT_BGRX_5551,
153 SDE_PIX_FMT_RGBX_5551,
154 SDE_PIX_FMT_XBGR_1555,
155 SDE_PIX_FMT_XRGB_1555,
156 SDE_PIX_FMT_ARGB_4444,
157 SDE_PIX_FMT_RGBA_4444,
158 SDE_PIX_FMT_BGRA_4444,
159 SDE_PIX_FMT_ABGR_4444,
160 SDE_PIX_FMT_RGBX_4444,
161 SDE_PIX_FMT_XRGB_4444,
162 SDE_PIX_FMT_BGRX_4444,
163 SDE_PIX_FMT_XBGR_4444,
164 SDE_PIX_FMT_RGB_888,
165 SDE_PIX_FMT_BGR_888,
166 SDE_PIX_FMT_RGB_565,
167 SDE_PIX_FMT_BGR_565,
168 SDE_PIX_FMT_Y_CB_CR_H2V2,
169 SDE_PIX_FMT_Y_CR_CB_H2V2,
170 SDE_PIX_FMT_Y_CR_CB_GH2V2,
171 SDE_PIX_FMT_Y_CBCR_H2V2,
172 SDE_PIX_FMT_Y_CRCB_H2V2,
173 SDE_PIX_FMT_Y_CBCR_H1V2,
174 SDE_PIX_FMT_Y_CRCB_H1V2,
175 SDE_PIX_FMT_Y_CBCR_H2V1,
176 SDE_PIX_FMT_Y_CRCB_H2V1,
177 SDE_PIX_FMT_YCBYCR_H2V1,
178 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
179 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
180 SDE_PIX_FMT_RGBA_8888_UBWC,
181 SDE_PIX_FMT_RGBX_8888_UBWC,
182 SDE_PIX_FMT_RGB_565_UBWC,
183 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
184 SDE_PIX_FMT_RGBA_1010102,
185 SDE_PIX_FMT_RGBX_1010102,
186 SDE_PIX_FMT_ARGB_2101010,
187 SDE_PIX_FMT_XRGB_2101010,
188 SDE_PIX_FMT_BGRA_1010102,
189 SDE_PIX_FMT_BGRX_1010102,
190 SDE_PIX_FMT_ABGR_2101010,
191 SDE_PIX_FMT_XBGR_2101010,
192 SDE_PIX_FMT_RGBA_1010102_UBWC,
193 SDE_PIX_FMT_RGBX_1010102_UBWC,
194 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
195 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
196 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
197};
198
Alan Kwong4b416162017-08-11 21:03:10 -0400199static const u32 sde_hw_rotator_v3_outpixfmts[] = {
Alan Kwongda16e442016-08-14 20:47:18 -0400200 SDE_PIX_FMT_XRGB_8888,
201 SDE_PIX_FMT_ARGB_8888,
202 SDE_PIX_FMT_ABGR_8888,
203 SDE_PIX_FMT_RGBA_8888,
204 SDE_PIX_FMT_BGRA_8888,
205 SDE_PIX_FMT_RGBX_8888,
206 SDE_PIX_FMT_BGRX_8888,
207 SDE_PIX_FMT_XBGR_8888,
208 SDE_PIX_FMT_RGBA_5551,
209 SDE_PIX_FMT_ARGB_1555,
210 SDE_PIX_FMT_ABGR_1555,
211 SDE_PIX_FMT_BGRA_5551,
212 SDE_PIX_FMT_BGRX_5551,
213 SDE_PIX_FMT_RGBX_5551,
214 SDE_PIX_FMT_XBGR_1555,
215 SDE_PIX_FMT_XRGB_1555,
216 SDE_PIX_FMT_ARGB_4444,
217 SDE_PIX_FMT_RGBA_4444,
218 SDE_PIX_FMT_BGRA_4444,
219 SDE_PIX_FMT_ABGR_4444,
220 SDE_PIX_FMT_RGBX_4444,
221 SDE_PIX_FMT_XRGB_4444,
222 SDE_PIX_FMT_BGRX_4444,
223 SDE_PIX_FMT_XBGR_4444,
224 SDE_PIX_FMT_RGB_888,
225 SDE_PIX_FMT_BGR_888,
226 SDE_PIX_FMT_RGB_565,
227 SDE_PIX_FMT_BGR_565,
228 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
229 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
230 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
231 SDE_PIX_FMT_Y_CBCR_H2V2,
232 SDE_PIX_FMT_Y_CRCB_H2V2,
233 SDE_PIX_FMT_Y_CBCR_H1V2,
234 SDE_PIX_FMT_Y_CRCB_H1V2,
235 SDE_PIX_FMT_Y_CBCR_H2V1,
236 SDE_PIX_FMT_Y_CRCB_H2V1,
237 /* SDE_PIX_FMT_YCBYCR_H2V1 */
238 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
239 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
240 SDE_PIX_FMT_RGBA_8888_UBWC,
241 SDE_PIX_FMT_RGBX_8888_UBWC,
242 SDE_PIX_FMT_RGB_565_UBWC,
243 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
244 SDE_PIX_FMT_RGBA_1010102,
245 SDE_PIX_FMT_RGBX_1010102,
246 /* SDE_PIX_FMT_ARGB_2101010 */
247 /* SDE_PIX_FMT_XRGB_2101010 */
248 SDE_PIX_FMT_BGRA_1010102,
249 SDE_PIX_FMT_BGRX_1010102,
250 /* SDE_PIX_FMT_ABGR_2101010 */
251 /* SDE_PIX_FMT_XBGR_2101010 */
252 SDE_PIX_FMT_RGBA_1010102_UBWC,
253 SDE_PIX_FMT_RGBX_1010102_UBWC,
254 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
255 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
256 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
257};
258
Alan Kwong4b416162017-08-11 21:03:10 -0400259static const u32 sde_hw_rotator_v4_inpixfmts[] = {
Alan Kwong6bc64622017-02-04 17:36:03 -0800260 SDE_PIX_FMT_XRGB_8888,
261 SDE_PIX_FMT_ARGB_8888,
262 SDE_PIX_FMT_ABGR_8888,
263 SDE_PIX_FMT_RGBA_8888,
264 SDE_PIX_FMT_BGRA_8888,
265 SDE_PIX_FMT_RGBX_8888,
266 SDE_PIX_FMT_BGRX_8888,
267 SDE_PIX_FMT_XBGR_8888,
268 SDE_PIX_FMT_RGBA_5551,
269 SDE_PIX_FMT_ARGB_1555,
270 SDE_PIX_FMT_ABGR_1555,
271 SDE_PIX_FMT_BGRA_5551,
272 SDE_PIX_FMT_BGRX_5551,
273 SDE_PIX_FMT_RGBX_5551,
274 SDE_PIX_FMT_XBGR_1555,
275 SDE_PIX_FMT_XRGB_1555,
276 SDE_PIX_FMT_ARGB_4444,
277 SDE_PIX_FMT_RGBA_4444,
278 SDE_PIX_FMT_BGRA_4444,
279 SDE_PIX_FMT_ABGR_4444,
280 SDE_PIX_FMT_RGBX_4444,
281 SDE_PIX_FMT_XRGB_4444,
282 SDE_PIX_FMT_BGRX_4444,
283 SDE_PIX_FMT_XBGR_4444,
284 SDE_PIX_FMT_RGB_888,
285 SDE_PIX_FMT_BGR_888,
286 SDE_PIX_FMT_RGB_565,
287 SDE_PIX_FMT_BGR_565,
288 SDE_PIX_FMT_Y_CB_CR_H2V2,
289 SDE_PIX_FMT_Y_CR_CB_H2V2,
290 SDE_PIX_FMT_Y_CR_CB_GH2V2,
291 SDE_PIX_FMT_Y_CBCR_H2V2,
292 SDE_PIX_FMT_Y_CRCB_H2V2,
293 SDE_PIX_FMT_Y_CBCR_H1V2,
294 SDE_PIX_FMT_Y_CRCB_H1V2,
295 SDE_PIX_FMT_Y_CBCR_H2V1,
296 SDE_PIX_FMT_Y_CRCB_H2V1,
297 SDE_PIX_FMT_YCBYCR_H2V1,
298 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
299 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
300 SDE_PIX_FMT_RGBA_8888_UBWC,
301 SDE_PIX_FMT_RGBX_8888_UBWC,
302 SDE_PIX_FMT_RGB_565_UBWC,
303 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
304 SDE_PIX_FMT_RGBA_1010102,
305 SDE_PIX_FMT_RGBX_1010102,
306 SDE_PIX_FMT_ARGB_2101010,
307 SDE_PIX_FMT_XRGB_2101010,
308 SDE_PIX_FMT_BGRA_1010102,
309 SDE_PIX_FMT_BGRX_1010102,
310 SDE_PIX_FMT_ABGR_2101010,
311 SDE_PIX_FMT_XBGR_2101010,
312 SDE_PIX_FMT_RGBA_1010102_UBWC,
313 SDE_PIX_FMT_RGBX_1010102_UBWC,
314 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
Alan Kwong25e46862017-11-21 19:13:33 -0500315 SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
Alan Kwong6bc64622017-02-04 17:36:03 -0800316 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
317 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800318 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
319 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800320 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
321 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
322 SDE_PIX_FMT_XRGB_8888_TILE,
323 SDE_PIX_FMT_ARGB_8888_TILE,
324 SDE_PIX_FMT_ABGR_8888_TILE,
325 SDE_PIX_FMT_XBGR_8888_TILE,
326 SDE_PIX_FMT_RGBA_8888_TILE,
327 SDE_PIX_FMT_BGRA_8888_TILE,
328 SDE_PIX_FMT_RGBX_8888_TILE,
329 SDE_PIX_FMT_BGRX_8888_TILE,
330 SDE_PIX_FMT_RGBA_1010102_TILE,
331 SDE_PIX_FMT_RGBX_1010102_TILE,
332 SDE_PIX_FMT_ARGB_2101010_TILE,
333 SDE_PIX_FMT_XRGB_2101010_TILE,
334 SDE_PIX_FMT_BGRA_1010102_TILE,
335 SDE_PIX_FMT_BGRX_1010102_TILE,
336 SDE_PIX_FMT_ABGR_2101010_TILE,
337 SDE_PIX_FMT_XBGR_2101010_TILE,
338};
339
Alan Kwong4b416162017-08-11 21:03:10 -0400340static const u32 sde_hw_rotator_v4_outpixfmts[] = {
Alan Kwong6bc64622017-02-04 17:36:03 -0800341 SDE_PIX_FMT_XRGB_8888,
342 SDE_PIX_FMT_ARGB_8888,
343 SDE_PIX_FMT_ABGR_8888,
344 SDE_PIX_FMT_RGBA_8888,
345 SDE_PIX_FMT_BGRA_8888,
346 SDE_PIX_FMT_RGBX_8888,
347 SDE_PIX_FMT_BGRX_8888,
348 SDE_PIX_FMT_XBGR_8888,
349 SDE_PIX_FMT_RGBA_5551,
350 SDE_PIX_FMT_ARGB_1555,
351 SDE_PIX_FMT_ABGR_1555,
352 SDE_PIX_FMT_BGRA_5551,
353 SDE_PIX_FMT_BGRX_5551,
354 SDE_PIX_FMT_RGBX_5551,
355 SDE_PIX_FMT_XBGR_1555,
356 SDE_PIX_FMT_XRGB_1555,
357 SDE_PIX_FMT_ARGB_4444,
358 SDE_PIX_FMT_RGBA_4444,
359 SDE_PIX_FMT_BGRA_4444,
360 SDE_PIX_FMT_ABGR_4444,
361 SDE_PIX_FMT_RGBX_4444,
362 SDE_PIX_FMT_XRGB_4444,
363 SDE_PIX_FMT_BGRX_4444,
364 SDE_PIX_FMT_XBGR_4444,
365 SDE_PIX_FMT_RGB_888,
366 SDE_PIX_FMT_BGR_888,
367 SDE_PIX_FMT_RGB_565,
368 SDE_PIX_FMT_BGR_565,
369 /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
370 /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
371 /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
372 SDE_PIX_FMT_Y_CBCR_H2V2,
373 SDE_PIX_FMT_Y_CRCB_H2V2,
374 SDE_PIX_FMT_Y_CBCR_H1V2,
375 SDE_PIX_FMT_Y_CRCB_H1V2,
376 SDE_PIX_FMT_Y_CBCR_H2V1,
377 SDE_PIX_FMT_Y_CRCB_H2V1,
378 /* SDE_PIX_FMT_YCBYCR_H2V1 */
379 SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
380 SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
381 SDE_PIX_FMT_RGBA_8888_UBWC,
382 SDE_PIX_FMT_RGBX_8888_UBWC,
383 SDE_PIX_FMT_RGB_565_UBWC,
384 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
385 SDE_PIX_FMT_RGBA_1010102,
386 SDE_PIX_FMT_RGBX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400387 SDE_PIX_FMT_ARGB_2101010,
388 SDE_PIX_FMT_XRGB_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800389 SDE_PIX_FMT_BGRA_1010102,
390 SDE_PIX_FMT_BGRX_1010102,
Alan Kwong7f59c872017-05-31 11:36:11 -0400391 SDE_PIX_FMT_ABGR_2101010,
392 SDE_PIX_FMT_XBGR_2101010,
Alan Kwong6bc64622017-02-04 17:36:03 -0800393 SDE_PIX_FMT_RGBA_1010102_UBWC,
394 SDE_PIX_FMT_RGBX_1010102_UBWC,
395 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
Alan Kwong25e46862017-11-21 19:13:33 -0500396 SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
Alan Kwong6bc64622017-02-04 17:36:03 -0800397 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
398 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
Alan Kwong2ad00bc2017-02-06 23:32:17 -0800399 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
400 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
Alan Kwong6bc64622017-02-04 17:36:03 -0800401 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
402 SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
403 SDE_PIX_FMT_XRGB_8888_TILE,
404 SDE_PIX_FMT_ARGB_8888_TILE,
405 SDE_PIX_FMT_ABGR_8888_TILE,
406 SDE_PIX_FMT_XBGR_8888_TILE,
407 SDE_PIX_FMT_RGBA_8888_TILE,
408 SDE_PIX_FMT_BGRA_8888_TILE,
409 SDE_PIX_FMT_RGBX_8888_TILE,
410 SDE_PIX_FMT_BGRX_8888_TILE,
411 SDE_PIX_FMT_RGBA_1010102_TILE,
412 SDE_PIX_FMT_RGBX_1010102_TILE,
413 SDE_PIX_FMT_ARGB_2101010_TILE,
414 SDE_PIX_FMT_XRGB_2101010_TILE,
415 SDE_PIX_FMT_BGRA_1010102_TILE,
416 SDE_PIX_FMT_BGRX_1010102_TILE,
417 SDE_PIX_FMT_ABGR_2101010_TILE,
418 SDE_PIX_FMT_XBGR_2101010_TILE,
419};
420
Alan Kwong4b416162017-08-11 21:03:10 -0400421static const u32 sde_hw_rotator_v4_inpixfmts_sbuf[] = {
422 SDE_PIX_FMT_Y_CBCR_H2V2_P010,
423 SDE_PIX_FMT_Y_CBCR_H2V2,
Alan Kwongd12a4812017-11-30 16:12:57 -0500424 SDE_PIX_FMT_Y_CRCB_H2V2,
Alan Kwong4b416162017-08-11 21:03:10 -0400425 SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
426 SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
427 SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
428 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
429 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
430 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
431};
432
433static const u32 sde_hw_rotator_v4_outpixfmts_sbuf[] = {
434 SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
435 SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
436 SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
437};
438
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400439static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400440 {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400441 {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
442 {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
443};
444
Benjamin Chan2d6411a2017-03-28 18:01:53 -0400445static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
446 /*
447 * rottop - 0xA8850
448 */
449 /* REGDMA */
450 { 0XA8850, 0, 0 },
451 { 0XA8850, 0, 1 },
452 { 0XA8850, 0, 2 },
453 { 0XA8850, 0, 3 },
454 { 0XA8850, 0, 4 },
455
456 /* ROT_WB */
457 { 0XA8850, 1, 0 },
458 { 0XA8850, 1, 1 },
459 { 0XA8850, 1, 2 },
460 { 0XA8850, 1, 3 },
461 { 0XA8850, 1, 4 },
462 { 0XA8850, 1, 5 },
463 { 0XA8850, 1, 6 },
464 { 0XA8850, 1, 7 },
465
466 /* UBWC_DEC */
467 { 0XA8850, 2, 0 },
468
469 /* UBWC_ENC */
470 { 0XA8850, 3, 0 },
471
472 /* ROT_FETCH_0 */
473 { 0XA8850, 4, 0 },
474 { 0XA8850, 4, 1 },
475 { 0XA8850, 4, 2 },
476 { 0XA8850, 4, 3 },
477 { 0XA8850, 4, 4 },
478 { 0XA8850, 4, 5 },
479 { 0XA8850, 4, 6 },
480 { 0XA8850, 4, 7 },
481
482 /* ROT_FETCH_1 */
483 { 0XA8850, 5, 0 },
484 { 0XA8850, 5, 1 },
485 { 0XA8850, 5, 2 },
486 { 0XA8850, 5, 3 },
487 { 0XA8850, 5, 4 },
488 { 0XA8850, 5, 5 },
489 { 0XA8850, 5, 6 },
490 { 0XA8850, 5, 7 },
491
492 /* ROT_FETCH_2 */
493 { 0XA8850, 6, 0 },
494 { 0XA8850, 6, 1 },
495 { 0XA8850, 6, 2 },
496 { 0XA8850, 6, 3 },
497 { 0XA8850, 6, 4 },
498 { 0XA8850, 6, 5 },
499 { 0XA8850, 6, 6 },
500 { 0XA8850, 6, 7 },
501
502 /* ROT_FETCH_3 */
503 { 0XA8850, 7, 0 },
504 { 0XA8850, 7, 1 },
505 { 0XA8850, 7, 2 },
506 { 0XA8850, 7, 3 },
507 { 0XA8850, 7, 4 },
508 { 0XA8850, 7, 5 },
509 { 0XA8850, 7, 6 },
510 { 0XA8850, 7, 7 },
511
512 /* ROT_FETCH_4 */
513 { 0XA8850, 8, 0 },
514 { 0XA8850, 8, 1 },
515 { 0XA8850, 8, 2 },
516 { 0XA8850, 8, 3 },
517 { 0XA8850, 8, 4 },
518 { 0XA8850, 8, 5 },
519 { 0XA8850, 8, 6 },
520 { 0XA8850, 8, 7 },
521
522 /* ROT_UNPACK_0*/
523 { 0XA8850, 9, 0 },
524 { 0XA8850, 9, 1 },
525 { 0XA8850, 9, 2 },
526 { 0XA8850, 9, 3 },
527};
528
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400529static struct sde_rot_regdump sde_rot_r3_regdump[] = {
530 { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
531 { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
532 { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
533 { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
534 SDE_ROT_REGDUMP_READ },
535 /*
536 * Need to perform a SW reset to REGDMA in order to access the
537 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
538 * REGDMA RAM should be dump at last.
539 */
540 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
541 SDE_ROT_REGDUMP_WRITE },
542 { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
543 SDE_ROT_REGDUMP_READ },
Benjamin Chan59a06052017-01-12 18:06:03 -0500544 { "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
545 SDE_ROT_REGDUMP_VBIF },
Clarence Ipcd140292017-09-22 16:24:08 -0400546 { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 0,
547 SDE_ROT_REGDUMP_WRITE },
Benjamin Chan53e3bce2016-08-31 14:43:29 -0400548};
549
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -0700550struct sde_rot_cdp_params {
551 bool enable;
552 struct sde_mdp_format_params *fmt;
553 u32 offset;
554};
555
Alan Kwong818b7fc2016-07-24 22:07:41 -0400556/* Invalid software timestamp value for initialization */
557#define SDE_REGDMA_SWTS_INVALID (~0)
558
559/**
560 * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
561 * @ts_curr: current software timestamp
562 * @ts_prev: previous software timestamp
563 * @return: the amount ts_curr is ahead of ts_prev
564 */
565static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
566{
567 u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
568
569 return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
570}
571
572/**
573 * sde_hw_rotator_pending_swts - Check if the given context is still pending
574 * @rot: Pointer to hw rotator
575 * @ctx: Pointer to rotator context
576 * @pswts: Pointer to returned reference software timestamp, optional
577 * @return: true if context has pending requests
578 */
579static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
580 struct sde_hw_rotator_context *ctx, u32 *pswts)
581{
582 u32 swts;
583 int ts_diff;
584 bool pending;
585
586 if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
587 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
588 else
589 swts = ctx->last_regdma_timestamp;
590
591 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
592 swts >>= SDE_REGDMA_SWTS_SHIFT;
593
594 swts &= SDE_REGDMA_SWTS_MASK;
595
596 ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
597
598 if (pswts)
599 *pswts = swts;
600
601 pending = (ts_diff > 0) ? true : false;
602
603 SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
604 ctx->timestamp, ctx->q_id, swts, pending);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -0400605 SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
Alan Kwong818b7fc2016-07-24 22:07:41 -0400606 return pending;
607}
608
609/**
Alan Kwong6bc64622017-02-04 17:36:03 -0800610 * sde_hw_rotator_update_swts - update software timestamp with given value
611 * @rot: Pointer to hw rotator
612 * @ctx: Pointer to rotator contxt
613 * @swts: new software timestamp
614 * @return: new combined swts
615 */
616static u32 sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
617 struct sde_hw_rotator_context *ctx, u32 swts)
618{
619 u32 mask = SDE_REGDMA_SWTS_MASK;
620
621 swts &= SDE_REGDMA_SWTS_MASK;
622 if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY) {
623 swts <<= SDE_REGDMA_SWTS_SHIFT;
624 mask <<= SDE_REGDMA_SWTS_SHIFT;
625 }
626
627 swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
628 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
629
630 return swts;
631}
632
633/**
Alan Kwong818b7fc2016-07-24 22:07:41 -0400634 * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
635 * Also, clear rotator/regdma irq status.
636 * @rot: Pointer to hw rotator
637 */
638static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
639{
640 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
641 atomic_read(&rot->irq_enabled));
642
643 if (!atomic_read(&rot->irq_enabled)) {
644 if (rot->mode == ROT_REGDMA_OFF)
645 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
646 ROT_DONE_MASK);
647 else
648 SDE_ROTREG_WRITE(rot->mdss_base,
649 REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
650
651 enable_irq(rot->irq_num);
652 }
653 atomic_inc(&rot->irq_enabled);
654}
655
656/**
657 * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
658 * Also, clear rotator/regdma irq enable masks.
659 * @rot: Pointer to hw rotator
660 */
661static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
662{
663 SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
664 atomic_read(&rot->irq_enabled));
665
666 if (!atomic_read(&rot->irq_enabled)) {
667 SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
668 return;
669 }
670
671 if (!atomic_dec_return(&rot->irq_enabled)) {
672 if (rot->mode == ROT_REGDMA_OFF)
673 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
674 else
675 SDE_ROTREG_WRITE(rot->mdss_base,
676 REGDMA_CSR_REGDMA_INT_EN, 0);
677 /* disable irq after last pending irq is handled, if any */
678 synchronize_irq(rot->irq_num);
679 disable_irq_nosync(rot->irq_num);
680 }
681}
682
Benjamin Chane412a062017-10-17 14:16:50 -0400683static void sde_hw_rotator_halt_vbif_xin_client(void)
684{
685 struct sde_mdp_vbif_halt_params halt_params;
686
687 memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
688 halt_params.xin_id = XIN_SSPP;
689 halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
690 halt_params.bit_off_mdp_clk_ctrl =
691 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
692 sde_mdp_halt_vbif_xin(&halt_params);
693
694 memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
695 halt_params.xin_id = XIN_WRITEBACK;
696 halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
697 halt_params.bit_off_mdp_clk_ctrl =
698 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
699 sde_mdp_halt_vbif_xin(&halt_params);
700}
701
Alan Kwong818b7fc2016-07-24 22:07:41 -0400702/**
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400703 * sde_hw_rotator_reset - Reset rotator hardware
704 * @rot: pointer to hw rotator
Alan Kwong94e0f602017-11-07 23:01:44 -0500705 * @ctx: pointer to current rotator context during the hw hang (optional)
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400706 */
707static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
708 struct sde_hw_rotator_context *ctx)
709{
710 struct sde_hw_rotator_context *rctx = NULL;
711 u32 int_mask = (REGDMA_INT_0_MASK | REGDMA_INT_1_MASK |
712 REGDMA_INT_2_MASK);
713 u32 last_ts[ROT_QUEUE_MAX] = {0,};
714 u32 latest_ts;
715 int elapsed_time, t;
716 int i, j;
717 unsigned long flags;
718
Alan Kwong94e0f602017-11-07 23:01:44 -0500719 if (!rot) {
720 SDEROT_ERR("NULL rotator\n");
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400721 return -EINVAL;
722 }
723
724 /* sw reset the hw rotator */
725 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 1);
726 usleep_range(MS_TO_US(10), MS_TO_US(20));
727 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 0);
728
Benjamin Chane412a062017-10-17 14:16:50 -0400729 /* halt vbif xin client to ensure no pending transaction */
730 sde_hw_rotator_halt_vbif_xin_client();
731
Alan Kwong94e0f602017-11-07 23:01:44 -0500732 /* if no ctx is specified, skip ctx wake up */
733 if (!ctx)
734 return 0;
735
736 if (ctx->q_id >= ROT_QUEUE_MAX) {
737 SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
738 return -EINVAL;
739 }
740
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400741 spin_lock_irqsave(&rot->rotisr_lock, flags);
742
743 /* update timestamp register with current context */
744 last_ts[ctx->q_id] = ctx->timestamp;
745 sde_hw_rotator_update_swts(rot, ctx, ctx->timestamp);
746 SDEROT_EVTLOG(ctx->timestamp);
747
748 /*
749 * Search for any pending rot session, and look for last timestamp
750 * per hw queue.
751 */
752 for (i = 0; i < ROT_QUEUE_MAX; i++) {
753 latest_ts = atomic_read(&rot->timestamp[i]);
754 latest_ts &= SDE_REGDMA_SWTS_MASK;
755 elapsed_time = sde_hw_rotator_elapsed_swts(latest_ts,
756 last_ts[i]);
757
758 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
759 rctx = rot->rotCtx[i][j];
760 if (rctx && rctx != ctx) {
761 rctx->last_regdma_isr_status = int_mask;
762 rctx->last_regdma_timestamp = rctx->timestamp;
763
764 t = sde_hw_rotator_elapsed_swts(latest_ts,
765 rctx->timestamp);
766 if (t < elapsed_time) {
767 elapsed_time = t;
768 last_ts[i] = rctx->timestamp;
769 sde_hw_rotator_update_swts(rot, rctx,
770 last_ts[i]);
771 }
772
773 SDEROT_DBG("rotctx[%d][%d], ts:%d\n",
774 i, j, rctx->timestamp);
775 SDEROT_EVTLOG(i, j, rctx->timestamp,
776 last_ts[i]);
777 }
778 }
779 }
780
781 /* Finally wakeup all pending rotator context in queue */
782 for (i = 0; i < ROT_QUEUE_MAX; i++) {
783 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
784 rctx = rot->rotCtx[i][j];
785 if (rctx && rctx != ctx)
786 wake_up_all(&rctx->regdma_waitq);
787 }
788 }
789
790 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
791
792 return 0;
793}
794
795/**
Clarence Ipcd140292017-09-22 16:24:08 -0400796 * _sde_hw_rotator_dump_status - Dump hw rotator status on error
Alan Kwong818b7fc2016-07-24 22:07:41 -0400797 * @rot: Pointer to hw rotator
798 */
Clarence Ipcd140292017-09-22 16:24:08 -0400799static void _sde_hw_rotator_dump_status(struct sde_hw_rotator *rot,
800 u32 *ubwcerr)
Alan Kwong818b7fc2016-07-24 22:07:41 -0400801{
Benjamin Chan1b94f952017-01-23 17:42:30 -0500802 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400803 u32 reg = 0;
Benjamin Chan1b94f952017-01-23 17:42:30 -0500804
Alan Kwong818b7fc2016-07-24 22:07:41 -0400805 SDEROT_ERR(
806 "op_mode = %x, int_en = %x, int_status = %x\n",
807 SDE_ROTREG_READ(rot->mdss_base,
808 REGDMA_CSR_REGDMA_OP_MODE),
809 SDE_ROTREG_READ(rot->mdss_base,
810 REGDMA_CSR_REGDMA_INT_EN),
811 SDE_ROTREG_READ(rot->mdss_base,
812 REGDMA_CSR_REGDMA_INT_STATUS));
813
814 SDEROT_ERR(
815 "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
816 SDE_ROTREG_READ(rot->mdss_base,
817 REGDMA_TIMESTAMP_REG),
818 SDE_ROTREG_READ(rot->mdss_base,
819 REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
820 SDE_ROTREG_READ(rot->mdss_base,
821 REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
822 SDE_ROTREG_READ(rot->mdss_base,
823 REGDMA_CSR_REGDMA_BLOCK_STATUS));
824
825 SDEROT_ERR(
826 "invalid_cmd_offset = %x, fsm_state = %x\n",
827 SDE_ROTREG_READ(rot->mdss_base,
828 REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
829 SDE_ROTREG_READ(rot->mdss_base,
830 REGDMA_CSR_REGDMA_FSM_STATE));
Benjamin Chan59a06052017-01-12 18:06:03 -0500831
Alan Kwong94e0f602017-11-07 23:01:44 -0500832 SDEROT_ERR("rottop: op_mode = %x, status = %x, clk_status = %x\n",
833 SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE),
834 SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS),
835 SDE_ROTREG_READ(rot->mdss_base, ROTTOP_CLK_STATUS));
836
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400837 reg = SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS);
838 if (ubwcerr)
839 *ubwcerr = reg;
Benjamin Chan59a06052017-01-12 18:06:03 -0500840 SDEROT_ERR(
Benjamin Chan2f6fc402017-09-27 16:15:33 -0400841 "UBWC decode status = %x, UBWC encode status = %x\n", reg,
Benjamin Chan59a06052017-01-12 18:06:03 -0500842 SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
Benjamin Chan1b94f952017-01-23 17:42:30 -0500843
844 SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
845 SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
846 SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
Alan Kwong6bc64622017-02-04 17:36:03 -0800847
Clarence Ipcd140292017-09-22 16:24:08 -0400848 SDEROT_ERR("sspp unpack wr: plane0 = %x, plane1 = %x, plane2 = %x\n",
849 SDE_ROTREG_READ(rot->mdss_base,
850 ROT_SSPP_FETCH_SMP_WR_PLANE0),
851 SDE_ROTREG_READ(rot->mdss_base,
852 ROT_SSPP_FETCH_SMP_WR_PLANE1),
853 SDE_ROTREG_READ(rot->mdss_base,
854 ROT_SSPP_FETCH_SMP_WR_PLANE2));
855 SDEROT_ERR("sspp unpack rd: plane0 = %x, plane1 = %x, plane2 = %x\n",
856 SDE_ROTREG_READ(rot->mdss_base,
857 ROT_SSPP_SMP_UNPACK_RD_PLANE0),
858 SDE_ROTREG_READ(rot->mdss_base,
859 ROT_SSPP_SMP_UNPACK_RD_PLANE1),
860 SDE_ROTREG_READ(rot->mdss_base,
861 ROT_SSPP_SMP_UNPACK_RD_PLANE2));
862 SDEROT_ERR("sspp: unpack_ln = %x, unpack_blk = %x, fill_lvl = %x\n",
863 SDE_ROTREG_READ(rot->mdss_base,
864 ROT_SSPP_UNPACK_LINE_COUNT),
865 SDE_ROTREG_READ(rot->mdss_base,
866 ROT_SSPP_UNPACK_BLK_COUNT),
867 SDE_ROTREG_READ(rot->mdss_base,
868 ROT_SSPP_FILL_LEVELS));
869
870 SDEROT_ERR("wb: sbuf0 = %x, sbuf1 = %x, sys_cache = %x\n",
871 SDE_ROTREG_READ(rot->mdss_base,
872 ROT_WB_SBUF_STATUS_PLANE0),
873 SDE_ROTREG_READ(rot->mdss_base,
874 ROT_WB_SBUF_STATUS_PLANE1),
875 SDE_ROTREG_READ(rot->mdss_base,
876 ROT_WB_SYS_CACHE_MODE));
Alan Kwong818b7fc2016-07-24 22:07:41 -0400877}
878
Alan Kwong9487de22016-01-16 22:06:36 -0500879/**
880 * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
881 * on provided session_id. Each rotator has a different session_id.
Clarence Ip9e6c3302017-06-02 11:02:57 -0400882 * @rot: Pointer to rotator hw
883 * @session_id: Identifier for rotator session
884 * @sequence_id: Identifier for rotation request within the session
885 * @q_id: Rotator queue identifier
Alan Kwong9487de22016-01-16 22:06:36 -0500886 */
887static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
Clarence Ip9e6c3302017-06-02 11:02:57 -0400888 struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
Alan Kwong9487de22016-01-16 22:06:36 -0500889 enum sde_rot_queue_prio q_id)
890{
891 int i;
892 struct sde_hw_rotator_context *ctx = NULL;
893
894 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
895 ctx = rot->rotCtx[q_id][i];
896
Clarence Ip9e6c3302017-06-02 11:02:57 -0400897 if (ctx && (ctx->session_id == session_id) &&
898 (ctx->sequence_id == sequence_id)) {
Alan Kwong9487de22016-01-16 22:06:36 -0500899 SDEROT_DBG(
Clarence Ip9e6c3302017-06-02 11:02:57 -0400900 "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d | sequence-id:%d\n",
901 q_id, i, ctx, ctx->session_id,
902 ctx->sequence_id);
Alan Kwong9487de22016-01-16 22:06:36 -0500903 return ctx;
904 }
905 }
906
907 return NULL;
908}
909
910/*
911 * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
912 * @dbgbuf: Pointer to debug buffer
913 * @buf: Pointer to layer buffer structure
914 * @data: Pointer to h/w mapped buffer structure
915 */
916static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
917 struct sde_layer_buffer *buf, struct sde_mdp_data *data)
918{
919 dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
920 dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
921
922 dbgbuf->vaddr = NULL;
923 dbgbuf->width = buf->width;
924 dbgbuf->height = buf->height;
925
926 if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
Alan Kwong6ce448d2016-11-24 18:45:20 -0800927 dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500928 dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
929 SDEROT_DBG("vaddr mapping: 0x%p/%ld w:%d/h:%d\n",
930 dbgbuf->vaddr, dbgbuf->buflen,
931 dbgbuf->width, dbgbuf->height);
932 }
933}
934
935/*
936 * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
937 * @dbgbuf: Pointer to debug buffer
938 */
939static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
940{
941 if (dbgbuf->vaddr) {
942 dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
Alan Kwong6ce448d2016-11-24 18:45:20 -0800943 dma_buf_end_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -0500944 }
945
946 dbgbuf->vaddr = NULL;
947 dbgbuf->dmabuf = NULL;
948 dbgbuf->buflen = 0;
949 dbgbuf->width = 0;
950 dbgbuf->height = 0;
951}
952
953/*
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -0700954 * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
955 * levels, enable write gather enable and avoid clk gating setting for
956 * debug purpose.
957 *
958 * @rot: Pointer to rotator hw
959 */
960static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
961{
962 u32 i, mask, vbif_qos, reg_val = 0;
963 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
964
965 /* VBIF_ROT QoS remapper setting */
966 switch (mdata->npriority_lvl) {
967
968 case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
969 for (i = 0; i < mdata->npriority_lvl; i++) {
970 reg_val = SDE_VBIF_READ(mdata,
971 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
972 mask = 0x3 << (XIN_SSPP * 2);
973 vbif_qos = mdata->vbif_nrt_qos[i];
974 reg_val |= vbif_qos << (XIN_SSPP * 2);
975 /* ensure write is issued after the read operation */
976 mb();
977 SDE_VBIF_WRITE(mdata,
978 MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
979 reg_val);
980 }
981 break;
982
983 case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
984 mask = mdata->npriority_lvl - 1;
985 for (i = 0; i < mdata->npriority_lvl; i++) {
986 /* RD and WR client */
987 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
988 << (XIN_SSPP * 4);
989 reg_val |= (mdata->vbif_nrt_qos[i] & mask)
990 << (XIN_WRITEBACK * 4);
991
992 SDE_VBIF_WRITE(mdata,
993 MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
994 reg_val);
995 SDE_VBIF_WRITE(mdata,
996 MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
997 reg_val);
998 }
999 break;
1000
1001 default:
1002 SDEROT_DBG("invalid vbif remapper levels\n");
1003 }
1004
1005 /* Enable write gather for writeback to remove write gaps, which
1006 * may hang AXI/BIMC/SDE.
1007 */
1008 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
1009 BIT(XIN_WRITEBACK));
1010
1011 /*
1012 * For debug purpose, disable clock gating, i.e. Clocks always on
1013 */
1014 if (mdata->clk_always_on) {
1015 SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
1016 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
1017 SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
1018 0xFFFF);
1019 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
1020 }
1021}
1022
1023/*
Alan Kwong9487de22016-01-16 22:06:36 -05001024 * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
1025 * @ctx: Pointer to rotator context
1026 * @mask: Bit mask location of the timestamp
1027 * @swts: Software timestamp
1028 */
1029static void sde_hw_rotator_setup_timestamp_packet(
1030 struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
1031{
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001032 char __iomem *wrptr;
Alan Kwong9487de22016-01-16 22:06:36 -05001033
1034 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1035
1036 /*
1037 * Create a dummy packet write out to 1 location for timestamp
1038 * generation.
1039 */
1040 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
1041 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
1042 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1043 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1044 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
1045 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1046 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
1047 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
1048 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
1049 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
1050 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
1051 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
1052 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
Benjamin Chan15c93d82016-08-29 10:04:22 -04001053 /*
1054 * Must clear secure buffer setting for SW timestamp because
1055 * SW timstamp buffer allocation is always non-secure region.
1056 */
1057 if (ctx->is_secure) {
1058 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
1059 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
1060 }
Alan Kwong9487de22016-01-16 22:06:36 -05001061 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
1062 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
1063 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1064 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
1065 SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
1066 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
1067 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
1068 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
1069 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
Clarence Ip77cadd12017-06-19 17:51:46 -04001070 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
1071 (ctx->rot->highest_bank & 0x3) << 8);
Alan Kwong9487de22016-01-16 22:06:36 -05001072 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
1073 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
1074 SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
1075 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
1076
1077 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1078}
1079
1080/*
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001081 * sde_hw_rotator_cdp_configs - configures the CDP registers
1082 * @ctx: Pointer to rotator context
1083 * @params: Pointer to parameters needed for CDP configs
1084 */
1085static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
1086 struct sde_rot_cdp_params *params)
1087{
1088 int reg_val;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001089 char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001090
1091 if (!params->enable) {
1092 SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
1093 goto end;
1094 }
1095
1096 reg_val = BIT(0); /* enable cdp */
1097
1098 if (sde_mdp_is_ubwc_format(params->fmt))
1099 reg_val |= BIT(1); /* enable UBWC meta cdp */
1100
1101 if (sde_mdp_is_ubwc_format(params->fmt)
1102 || sde_mdp_is_tilea4x_format(params->fmt)
1103 || sde_mdp_is_tilea5x_format(params->fmt))
1104 reg_val |= BIT(2); /* enable tile amortize */
1105
1106 reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
1107
1108 SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
1109
1110end:
1111 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1112}
1113
1114/*
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001115 * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
1116 * for the WRITEBACK rotator for inline and offline rotation.
1117 *
1118 * @ctx: Pointer to rotator context
1119 */
1120static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
1121{
1122 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001123 char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001124
1125 /* Offline rotation setting */
1126 if (!ctx->sbuf_mode) {
1127 /* QOS LUT WR setting */
1128 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
1129 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
1130 mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
1131 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
1132 mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
1133 }
1134
1135 /* Danger LUT WR setting */
1136 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
1137 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
1138 mdata->lut_cfg[SDE_ROT_WR].danger_lut);
1139
1140 /* Safe LUT WR setting */
1141 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
1142 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
1143 mdata->lut_cfg[SDE_ROT_WR].safe_lut);
1144
1145 /* Inline rotation setting */
1146 } else {
1147 /* QOS LUT WR setting */
1148 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
1149 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
1150 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
1151 SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
1152 mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
1153 }
1154
1155 /* Danger LUT WR setting */
1156 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
1157 mdata->sde_inline_qos_map))
1158 SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
1159 mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
1160
1161 /* Safe LUT WR setting */
1162 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
1163 mdata->sde_inline_qos_map))
1164 SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
1165 mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
1166 }
1167
1168 /* Update command queue write ptr */
1169 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1170}
1171
1172/*
1173 * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
1174 * for the SSPP rotator for inline and offline rotation.
1175 *
1176 * @ctx: Pointer to rotator context
1177 */
1178static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
1179{
1180 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001181 char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001182
1183 /* Offline rotation setting */
1184 if (!ctx->sbuf_mode) {
1185 /* QOS LUT RD setting */
1186 if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
1187 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
1188 mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
1189 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
1190 mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
1191 }
1192
1193 /* Danger LUT RD setting */
1194 if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
1195 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
1196 mdata->lut_cfg[SDE_ROT_RD].danger_lut);
1197
1198 /* Safe LUT RD setting */
1199 if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
1200 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1201 mdata->lut_cfg[SDE_ROT_RD].safe_lut);
1202
1203 /* inline rotation setting */
1204 } else {
1205 /* QOS LUT RD setting */
1206 if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
1207 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
1208 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
1209 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
1210 mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
1211 }
1212
1213 /* Danger LUT RD setting */
1214 if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
1215 mdata->sde_inline_qos_map))
1216 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
1217 mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
1218
1219 /* Safe LUT RD setting */
1220 if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
1221 mdata->sde_inline_qos_map))
1222 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
1223 mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
1224 }
1225
1226 /* Update command queue write ptr */
1227 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1228}
1229
1230/*
Alan Kwong9487de22016-01-16 22:06:36 -05001231 * sde_hw_rotator_setup_fetchengine - setup fetch engine
1232 * @ctx: Pointer to rotator context
1233 * @queue_id: Priority queue identifier
1234 * @cfg: Fetch configuration
1235 * @danger_lut: real-time QoS LUT for danger setting (not used)
1236 * @safe_lut: real-time QoS LUT for safe setting (not used)
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001237 * @dnsc_factor_w: downscale factor for width
1238 * @dnsc_factor_h: downscale factor for height
Alan Kwong9487de22016-01-16 22:06:36 -05001239 * @flags: Control flag
1240 */
1241static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
1242 enum sde_rot_queue_prio queue_id,
1243 struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001244 u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
Alan Kwong9487de22016-01-16 22:06:36 -05001245{
1246 struct sde_hw_rotator *rot = ctx->rot;
1247 struct sde_mdp_format_params *fmt;
1248 struct sde_mdp_data *data;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001249 struct sde_rot_cdp_params cdp_params = {0};
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001250 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001251 char __iomem *wrptr;
Alan Kwong9487de22016-01-16 22:06:36 -05001252 u32 opmode = 0;
1253 u32 chroma_samp = 0;
1254 u32 src_format = 0;
1255 u32 unpack = 0;
1256 u32 width = cfg->img_width;
1257 u32 height = cfg->img_height;
1258 u32 fetch_blocksize = 0;
1259 int i;
1260
1261 if (ctx->rot->mode == ROT_REGDMA_ON) {
Alan Kwong708eacd82017-08-24 14:26:57 -04001262 if (rot->irq_num >= 0)
1263 SDE_ROTREG_WRITE(rot->mdss_base,
1264 REGDMA_CSR_REGDMA_INT_EN,
1265 REGDMA_INT_MASK);
Alan Kwong9487de22016-01-16 22:06:36 -05001266 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
1267 REGDMA_EN);
1268 }
1269
1270 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1271
Alan Kwong5b4d71b2017-02-10 20:52:59 -08001272 /*
1273 * initialize start control trigger selection first
1274 */
1275 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
1276 if (ctx->sbuf_mode)
1277 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
1278 ctx->start_ctrl);
1279 else
1280 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
1281 }
1282
Alan Kwong9487de22016-01-16 22:06:36 -05001283 /* source image setup */
1284 if ((flags & SDE_ROT_FLAG_DEINTERLACE)
1285 && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
1286 for (i = 0; i < cfg->src_plane.num_planes; i++)
1287 cfg->src_plane.ystride[i] *= 2;
1288 width *= 2;
1289 height /= 2;
1290 }
1291
1292 /*
1293 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
1294 */
1295 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
1296
1297 /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
1298 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1299 cfg->src_rect->w | (cfg->src_rect->h << 16));
1300 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
1301 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1302 cfg->src_rect->x | (cfg->src_rect->y << 16));
1303 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1304 cfg->src_rect->w | (cfg->src_rect->h << 16));
1305 SDE_REGDMA_BLKWRITE_DATA(wrptr,
1306 cfg->src_rect->x | (cfg->src_rect->y << 16));
1307
1308 /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
1309 data = cfg->data;
1310 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1311 SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
1312 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
1313 (cfg->src_plane.ystride[1] << 16));
1314 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
1315 (cfg->src_plane.ystride[3] << 16));
1316
1317 /* UNUSED, write 0 */
1318 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1319
1320 /* setup source format */
1321 fmt = cfg->fmt;
1322
1323 chroma_samp = fmt->chroma_sample;
1324 if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
1325 if (chroma_samp == SDE_MDP_CHROMA_H2V1)
1326 chroma_samp = SDE_MDP_CHROMA_H1V2;
1327 else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
1328 chroma_samp = SDE_MDP_CHROMA_H2V1;
1329 }
1330
1331 src_format = (chroma_samp << 23) |
1332 (fmt->fetch_planes << 19) |
1333 (fmt->bits[C3_ALPHA] << 6) |
1334 (fmt->bits[C2_R_Cr] << 4) |
1335 (fmt->bits[C1_B_Cb] << 2) |
1336 (fmt->bits[C0_G_Y] << 0);
1337
1338 if (fmt->alpha_enable &&
1339 (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
1340 src_format |= BIT(8); /* SRCC3_EN */
1341
1342 src_format |= ((fmt->unpack_count - 1) << 12) |
1343 (fmt->unpack_tight << 17) |
1344 (fmt->unpack_align_msb << 18) |
1345 ((fmt->bpp - 1) << 9) |
1346 ((fmt->frame_format & 3) << 30);
1347
1348 if (flags & SDE_ROT_FLAG_ROT_90)
1349 src_format |= BIT(11); /* ROT90 */
1350
1351 if (sde_mdp_is_ubwc_format(fmt))
1352 opmode |= BIT(0); /* BWC_DEC_EN */
1353
1354 /* if this is YUV pixel format, enable CSC */
1355 if (sde_mdp_is_yuv_format(fmt))
1356 src_format |= BIT(15); /* SRC_COLOR_SPACE */
1357
1358 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1359 src_format |= BIT(14); /* UNPACK_DX_FORMAT */
1360
Alan Kwong3bef26f2017-02-26 15:38:09 -08001361 if (rot->solid_fill)
1362 src_format |= BIT(22); /* SOLID_FILL */
1363
Alan Kwong9487de22016-01-16 22:06:36 -05001364 /* SRC_FORMAT */
1365 SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
1366
1367 /* setup source unpack pattern */
1368 unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1369 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1370
1371 /* SRC_UNPACK_PATTERN */
1372 SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
1373
1374 /* setup source op mode */
1375 if (flags & SDE_ROT_FLAG_FLIP_LR)
1376 opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
1377 if (flags & SDE_ROT_FLAG_FLIP_UD)
1378 opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
1379 opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
1380
1381 /* SRC_OP_MODE */
1382 SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
1383
1384 /* setup source fetch config, TP10 uses different block size */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04001385 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
1386 (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
1387 if (sde_mdp_is_tp10_format(fmt))
1388 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
1389 else
1390 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
1391 } else {
1392 if (sde_mdp_is_tp10_format(fmt))
1393 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
1394 else
1395 fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
1396 }
1397
Alan Kwong3bef26f2017-02-26 15:38:09 -08001398 if (rot->solid_fill)
1399 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
1400 rot->constant_color);
1401
Alan Kwong9487de22016-01-16 22:06:36 -05001402 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
1403 fetch_blocksize |
1404 SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
1405 ((rot->highest_bank & 0x3) << 18));
1406
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001407 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1408 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(31) |
1409 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1410 ((ctx->rot->highest_bank & 0x3) << 4) |
1411 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1412
Alan Kwong9487de22016-01-16 22:06:36 -05001413 /* setup source buffer plane security status */
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001414 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1415 SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
Alan Kwong9487de22016-01-16 22:06:36 -05001416 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
1417 ctx->is_secure = true;
Benjamin Chan15c93d82016-08-29 10:04:22 -04001418 } else {
1419 SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
1420 ctx->is_secure = false;
Alan Kwong9487de22016-01-16 22:06:36 -05001421 }
1422
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001423 /* Update command queue write ptr */
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001424 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1425
1426 /* CDP register RD setting */
1427 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1428 mdata->enable_cdp[SDE_ROT_RD] : false;
1429 cdp_params.fmt = fmt;
1430 cdp_params.offset = ROT_SSPP_CDP_CNTL;
1431 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1432
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001433 /* QOS LUT/ Danger LUT/ Safe Lut WR setting */
1434 sde_hw_rotator_setup_qos_lut_rd(ctx);
1435
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001436 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1437
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001438 /*
1439 * Determine if traffic shaping is required. Only enable traffic
1440 * shaping when content is 4k@30fps. The actual traffic shaping
1441 * bandwidth calculation is done in output setup.
1442 */
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001443 if (((!ctx->sbuf_mode)
1444 && (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
1445 && (cfg->fps <= 30)) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001446 SDEROT_DBG("Enable Traffic Shaper\n");
1447 ctx->is_traffic_shaping = true;
1448 } else {
1449 SDEROT_DBG("Disable Traffic Shaper\n");
1450 ctx->is_traffic_shaping = false;
1451 }
1452
Alan Kwong9487de22016-01-16 22:06:36 -05001453 /* Update command queue write ptr */
1454 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1455}
1456
1457/*
1458 * sde_hw_rotator_setup_wbengine - setup writeback engine
1459 * @ctx: Pointer to rotator context
1460 * @queue_id: Priority queue identifier
1461 * @cfg: Writeback configuration
1462 * @flags: Control flag
1463 */
1464static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
1465 enum sde_rot_queue_prio queue_id,
1466 struct sde_hw_rot_wb_cfg *cfg,
1467 u32 flags)
1468{
Alan Kwong6bc64622017-02-04 17:36:03 -08001469 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05001470 struct sde_mdp_format_params *fmt;
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001471 struct sde_rot_cdp_params cdp_params = {0};
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001472 char __iomem *wrptr;
Alan Kwong9487de22016-01-16 22:06:36 -05001473 u32 pack = 0;
1474 u32 dst_format = 0;
Clarence Ip77cadd12017-06-19 17:51:46 -04001475 u32 no_partial_writes = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001476 int i;
1477
1478 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1479
1480 fmt = cfg->fmt;
1481
1482 /* setup WB DST format */
1483 dst_format |= (fmt->chroma_sample << 23) |
1484 (fmt->fetch_planes << 19) |
1485 (fmt->bits[C3_ALPHA] << 6) |
1486 (fmt->bits[C2_R_Cr] << 4) |
1487 (fmt->bits[C1_B_Cb] << 2) |
1488 (fmt->bits[C0_G_Y] << 0);
1489
1490 /* alpha control */
1491 if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
1492 dst_format |= BIT(8);
1493 if (!fmt->alpha_enable) {
1494 dst_format |= BIT(14);
1495 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
1496 }
1497 }
1498
1499 dst_format |= ((fmt->unpack_count - 1) << 12) |
1500 (fmt->unpack_tight << 17) |
1501 (fmt->unpack_align_msb << 18) |
1502 ((fmt->bpp - 1) << 9) |
1503 ((fmt->frame_format & 3) << 30);
1504
1505 if (sde_mdp_is_yuv_format(fmt))
1506 dst_format |= BIT(15);
1507
1508 if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
1509 dst_format |= BIT(21); /* PACK_DX_FORMAT */
1510
1511 /*
1512 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
1513 */
1514 SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
1515
1516 /* DST_FORMAT */
1517 SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
1518
1519 /* DST_OP_MODE */
1520 if (sde_mdp_is_ubwc_format(fmt))
1521 SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
1522 else
1523 SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
1524
1525 /* DST_PACK_PATTERN */
1526 pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
1527 (fmt->element[1] << 8) | (fmt->element[0] << 0);
1528 SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
1529
1530 /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
1531 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1532 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
1533 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
1534 (cfg->dst_plane.ystride[1] << 16));
1535 SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
1536 (cfg->dst_plane.ystride[3] << 16));
1537
1538 /* setup WB out image size and ROI */
1539 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
1540 cfg->img_width | (cfg->img_height << 16));
1541 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
1542 cfg->dst_rect->w | (cfg->dst_rect->h << 16));
1543 SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
1544 cfg->dst_rect->x | (cfg->dst_rect->y << 16));
1545
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07001546 if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
1547 SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
Benjamin Chan15c93d82016-08-29 10:04:22 -04001548 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
1549 else
1550 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
1551
Alan Kwong9487de22016-01-16 22:06:36 -05001552 /*
1553 * setup Downscale factor
1554 */
1555 SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
1556 cfg->v_downscale_factor |
1557 (cfg->h_downscale_factor << 16));
1558
Clarence Ip22fed4c2017-05-16 15:30:51 -04001559 /* partial write check */
Clarence Ip77cadd12017-06-19 17:51:46 -04001560 if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
1561 no_partial_writes = BIT(10);
1562
1563 /*
1564 * For simplicity, don't disable partial writes if
1565 * the ROI does not span the entire width of the
1566 * output image, and require the total stride to
1567 * also be properly aligned.
1568 *
1569 * This avoids having to determine the memory access
1570 * alignment of the actual horizontal ROI on a per
1571 * color format basis.
1572 */
1573 if (sde_mdp_is_ubwc_format(fmt)) {
1574 no_partial_writes = 0x0;
1575 } else if (cfg->dst_rect->x ||
1576 cfg->dst_rect->w != cfg->img_width) {
1577 no_partial_writes = 0x0;
1578 } else {
1579 for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
1580 if (cfg->dst_plane.ystride[i] &
1581 PARTIAL_WRITE_ALIGNMENT)
1582 no_partial_writes = 0x0;
1583 }
1584 }
Clarence Ip22fed4c2017-05-16 15:30:51 -04001585
Alan Kwong6bc64622017-02-04 17:36:03 -08001586 /* write config setup for bank configuration */
Clarence Ip77cadd12017-06-19 17:51:46 -04001587 SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
Alan Kwong9487de22016-01-16 22:06:36 -05001588 (ctx->rot->highest_bank & 0x3) << 8);
1589
Alan Kwongfb8eeb22017-02-06 15:00:03 -08001590 if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
1591 SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
1592 ((ctx->rot->ubwc_malsize & 0x3) << 8) |
1593 ((ctx->rot->highest_bank & 0x3) << 4) |
1594 ((ctx->rot->ubwc_swizzle & 0x1) << 0));
1595
Alan Kwong6bc64622017-02-04 17:36:03 -08001596 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
1597 SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
1598 ctx->sys_cache_mode);
1599
1600 SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
1601 (flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
Alan Kwong9487de22016-01-16 22:06:36 -05001602
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001603 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1604
1605 /* CDP register WR setting */
1606 cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
1607 mdata->enable_cdp[SDE_ROT_WR] : false;
1608 cdp_params.fmt = fmt;
1609 cdp_params.offset = ROT_WB_CDP_CNTL;
1610 sde_hw_rotator_cdp_configs(ctx, &cdp_params);
1611
Veera Sundaram Sankaranf13fb322017-05-11 15:42:28 -07001612 /* QOS LUT/ Danger LUT/ Safe LUT WR setting */
1613 sde_hw_rotator_setup_qos_lut_wr(ctx);
1614
Veera Sundaram Sankaran3f0141e2017-05-10 18:19:29 -07001615 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1616
Alan Kwong498d59f2017-02-11 18:56:34 -08001617 /* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
Alan Kwong1513d3d2017-08-27 21:20:01 -04001618 if (ctx->is_traffic_shaping || cfg->prefill_bw) {
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001619 u32 bw;
1620
1621 /*
1622 * Target to finish in 12ms, and we need to set number of bytes
1623 * per clock tick for traffic shaping.
1624 * Each clock tick run @ 19.2MHz, so we need we know total of
1625 * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
1626 * Finally, calcualte the byte count per clock tick based on
1627 * resolution, bpp and compression ratio.
1628 */
1629 bw = cfg->dst_rect->w * cfg->dst_rect->h;
1630
1631 if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
1632 bw = (bw * 3) / 2;
1633 else
1634 bw *= fmt->bpp;
1635
1636 bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
Alan Kwong498d59f2017-02-11 18:56:34 -08001637
1638 /* use prefill bandwidth instead if specified */
1639 if (cfg->prefill_bw)
Maria Yuf4195c72017-09-26 16:24:59 +08001640 bw = DIV_ROUND_UP_SECTOR_T(cfg->prefill_bw,
Alan Kwong498d59f2017-02-11 18:56:34 -08001641 TRAFFIC_SHAPE_VSYNC_CLK);
1642
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001643 if (bw > 0xFF)
1644 bw = 0xFF;
Benjamin Chanf2f3e632017-07-12 10:21:39 -04001645 else if (bw == 0)
1646 bw = 1;
1647
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001648 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
Alan Kwong498d59f2017-02-11 18:56:34 -08001649 BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
Benjamin Chan99eb63b2016-12-21 15:45:26 -05001650 SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
1651 } else {
1652 SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
1653 SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
1654 }
1655
Alan Kwong9487de22016-01-16 22:06:36 -05001656 /* Update command queue write ptr */
1657 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1658}
1659
1660/*
1661 * sde_hw_rotator_start_no_regdma - start non-regdma operation
1662 * @ctx: Pointer to rotator context
1663 * @queue_id: Priority queue identifier
1664 */
1665static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
1666 enum sde_rot_queue_prio queue_id)
1667{
1668 struct sde_hw_rotator *rot = ctx->rot;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001669 char __iomem *wrptr;
1670 char __iomem *mem_rdptr;
1671 char __iomem *addr;
Alan Kwong9487de22016-01-16 22:06:36 -05001672 u32 mask;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001673 u32 cmd0, cmd1, cmd2;
Alan Kwong9487de22016-01-16 22:06:36 -05001674 u32 blksize;
1675
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001676 /*
1677 * when regdma is not using, the regdma segment is just a normal
1678 * DRAM, and not an iomem.
1679 */
1680 mem_rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05001681 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1682
1683 if (rot->irq_num >= 0) {
1684 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
1685 SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
1686 reinit_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001687 sde_hw_rotator_enable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001688 }
1689
Alan Kwong6bc64622017-02-04 17:36:03 -08001690 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
Alan Kwong9487de22016-01-16 22:06:36 -05001691
1692 /* Update command queue write ptr */
1693 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1694
1695 SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
1696 /* Write all command stream to Rotator blocks */
1697 /* Rotator will start right away after command stream finish writing */
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001698 while (mem_rdptr < wrptr) {
1699 u32 op = REGDMA_OP_MASK & readl_relaxed_no_log(mem_rdptr);
Alan Kwong9487de22016-01-16 22:06:36 -05001700
1701 switch (op) {
1702 case REGDMA_OP_NOP:
1703 SDEROT_DBG("NOP\n");
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001704 mem_rdptr += sizeof(u32);
Alan Kwong9487de22016-01-16 22:06:36 -05001705 break;
1706 case REGDMA_OP_REGWRITE:
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001707 SDE_REGDMA_READ(mem_rdptr, cmd0);
1708 SDE_REGDMA_READ(mem_rdptr, cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001709 SDEROT_DBG("REGW %6.6x %8.8x\n",
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001710 cmd0 & REGDMA_ADDR_OFFSET_MASK,
1711 cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001712 addr = rot->mdss_base +
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001713 (cmd0 & REGDMA_ADDR_OFFSET_MASK);
1714 writel_relaxed(cmd1, addr);
Alan Kwong9487de22016-01-16 22:06:36 -05001715 break;
1716 case REGDMA_OP_REGMODIFY:
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001717 SDE_REGDMA_READ(mem_rdptr, cmd0);
1718 SDE_REGDMA_READ(mem_rdptr, cmd1);
1719 SDE_REGDMA_READ(mem_rdptr, cmd2);
Alan Kwong9487de22016-01-16 22:06:36 -05001720 SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001721 cmd0 & REGDMA_ADDR_OFFSET_MASK,
1722 cmd1, cmd2);
Alan Kwong9487de22016-01-16 22:06:36 -05001723 addr = rot->mdss_base +
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001724 (cmd0 & REGDMA_ADDR_OFFSET_MASK);
1725 mask = cmd1;
1726 writel_relaxed((readl_relaxed(addr) & mask) | cmd2,
Alan Kwong9487de22016-01-16 22:06:36 -05001727 addr);
1728 break;
1729 case REGDMA_OP_BLKWRITE_SINGLE:
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001730 SDE_REGDMA_READ(mem_rdptr, cmd0);
1731 SDE_REGDMA_READ(mem_rdptr, cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001732 SDEROT_DBG("BLKWS %6.6x %6.6x\n",
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001733 cmd0 & REGDMA_ADDR_OFFSET_MASK,
1734 cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001735 addr = rot->mdss_base +
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001736 (cmd0 & REGDMA_ADDR_OFFSET_MASK);
1737 blksize = cmd1;
Alan Kwong9487de22016-01-16 22:06:36 -05001738 while (blksize--) {
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001739 SDE_REGDMA_READ(mem_rdptr, cmd0);
1740 SDEROT_DBG("DATA %8.8x\n", cmd0);
1741 writel_relaxed(cmd0, addr);
Alan Kwong9487de22016-01-16 22:06:36 -05001742 }
1743 break;
1744 case REGDMA_OP_BLKWRITE_INC:
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001745 SDE_REGDMA_READ(mem_rdptr, cmd0);
1746 SDE_REGDMA_READ(mem_rdptr, cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001747 SDEROT_DBG("BLKWI %6.6x %6.6x\n",
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001748 cmd0 & REGDMA_ADDR_OFFSET_MASK,
1749 cmd1);
Alan Kwong9487de22016-01-16 22:06:36 -05001750 addr = rot->mdss_base +
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001751 (cmd0 & REGDMA_ADDR_OFFSET_MASK);
1752 blksize = cmd1;
Alan Kwong9487de22016-01-16 22:06:36 -05001753 while (blksize--) {
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001754 SDE_REGDMA_READ(mem_rdptr, cmd0);
1755 SDEROT_DBG("DATA %8.8x\n", cmd0);
1756 writel_relaxed(cmd0, addr);
Alan Kwong9487de22016-01-16 22:06:36 -05001757 addr += 4;
1758 }
1759 break;
1760 default:
1761 /* Other not supported OP mode
1762 * Skip data for now for unregonized OP mode
1763 */
1764 SDEROT_DBG("UNDEFINED\n");
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001765 mem_rdptr += sizeof(u32);
Alan Kwong9487de22016-01-16 22:06:36 -05001766 break;
1767 }
1768 }
1769 SDEROT_DBG("END %d\n", ctx->timestamp);
1770
1771 return ctx->timestamp;
1772}
1773
1774/*
1775 * sde_hw_rotator_start_regdma - start regdma operation
1776 * @ctx: Pointer to rotator context
1777 * @queue_id: Priority queue identifier
1778 */
1779static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
1780 enum sde_rot_queue_prio queue_id)
1781{
1782 struct sde_hw_rotator *rot = ctx->rot;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001783 char __iomem *wrptr;
Alan Kwong9487de22016-01-16 22:06:36 -05001784 u32 regdmaSlot;
1785 u32 offset;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001786 u32 length;
1787 u32 ts_length;
Alan Kwong9487de22016-01-16 22:06:36 -05001788 u32 enableInt;
1789 u32 swts = 0;
1790 u32 mask = 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08001791 u32 trig_sel;
Alan Kwong9487de22016-01-16 22:06:36 -05001792
1793 wrptr = sde_hw_rotator_get_regdma_segment(ctx);
1794
Alan Kwong9487de22016-01-16 22:06:36 -05001795 /*
1796 * Last ROT command must be ROT_START before REGDMA start
1797 */
Alan Kwong6bc64622017-02-04 17:36:03 -08001798 SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
1799
Alan Kwong9487de22016-01-16 22:06:36 -05001800 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1801
1802 /*
1803 * Start REGDMA with command offset and size
1804 */
1805 regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001806 length = (wrptr - ctx->regdma_base) / 4;
1807 offset = (ctx->regdma_base - (rot->mdss_base +
1808 REGDMA_RAM_REGDMA_CMD_RAM)) / sizeof(u32);
Alan Kwong9487de22016-01-16 22:06:36 -05001809 enableInt = ((ctx->timestamp & 1) + 1) << 30;
Alan Kwong6bc64622017-02-04 17:36:03 -08001810 trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
1811 REGDMA_CMD_TRIG_SEL_SW_START;
Alan Kwong9487de22016-01-16 22:06:36 -05001812
1813 SDEROT_DBG(
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001814 "regdma(%d)[%d] <== INT:0x%X|length:%d|offset:0x%X, ts:%X\n",
Alan Kwong9487de22016-01-16 22:06:36 -05001815 queue_id, regdmaSlot, enableInt, length, offset,
1816 ctx->timestamp);
1817
1818 /* ensure the command packet is issued before the submit command */
1819 wmb();
1820
1821 /* REGDMA submission for current context */
1822 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1823 SDE_ROTREG_WRITE(rot->mdss_base,
1824 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001825 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1826 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001827 swts = ctx->timestamp;
1828 mask = ~SDE_REGDMA_SWTS_MASK;
1829 } else {
1830 SDE_ROTREG_WRITE(rot->mdss_base,
1831 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
Alan Kwong6bc64622017-02-04 17:36:03 -08001832 (ctx->sbuf_mode ? enableInt : 0) | trig_sel |
1833 ((length & 0x3ff) << 14) | offset);
Alan Kwong9487de22016-01-16 22:06:36 -05001834 swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
1835 mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
1836 }
1837
Clarence Ip19339b32017-10-14 20:59:00 -04001838 SDEROT_EVTLOG(ctx->timestamp, queue_id, length, offset, ctx->sbuf_mode);
1839
Alan Kwong6bc64622017-02-04 17:36:03 -08001840 /* timestamp update can only be used in offline multi-context mode */
1841 if (!ctx->sbuf_mode) {
1842 /* Write timestamp after previous rotator job finished */
1843 sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
1844 offset += length;
1845 ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
Benjamin Chanda32f8b2017-09-20 17:11:27 -04001846 ts_length /= sizeof(u32);
Alan Kwong6bc64622017-02-04 17:36:03 -08001847 WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
Alan Kwong9487de22016-01-16 22:06:36 -05001848
Alan Kwong6bc64622017-02-04 17:36:03 -08001849 /* ensure command packet is issue before the submit command */
1850 wmb();
Alan Kwong9487de22016-01-16 22:06:36 -05001851
Clarence Ip19339b32017-10-14 20:59:00 -04001852 SDEROT_EVTLOG(queue_id, enableInt, ts_length, offset);
1853
Alan Kwong6bc64622017-02-04 17:36:03 -08001854 if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
1855 SDE_ROTREG_WRITE(rot->mdss_base,
1856 REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
1857 enableInt | (ts_length << 14) | offset);
1858 } else {
1859 SDE_ROTREG_WRITE(rot->mdss_base,
1860 REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
1861 enableInt | (ts_length << 14) | offset);
1862 }
Alan Kwong9487de22016-01-16 22:06:36 -05001863 }
1864
Alan Kwong9487de22016-01-16 22:06:36 -05001865 /* Update command queue write ptr */
1866 sde_hw_rotator_put_regdma_segment(ctx, wrptr);
1867
1868 return ctx->timestamp;
1869}
1870
1871/*
1872 * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
1873 * @ctx: Pointer to rotator context
1874 * @queue_id: Priority queue identifier
1875 * @flags: Option flag
1876 */
1877static u32 sde_hw_rotator_wait_done_no_regdma(
1878 struct sde_hw_rotator_context *ctx,
1879 enum sde_rot_queue_prio queue_id, u32 flag)
1880{
1881 struct sde_hw_rotator *rot = ctx->rot;
1882 int rc = 0;
1883 u32 sts = 0;
1884 u32 status;
1885 unsigned long flags;
1886
1887 if (rot->irq_num >= 0) {
1888 SDEROT_DBG("Wait for Rotator completion\n");
1889 rc = wait_for_completion_timeout(&ctx->rot_comp,
Clarence Ip19339b32017-10-14 20:59:00 -04001890 ctx->sbuf_mode ?
1891 msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
Alan Kwong6bc64622017-02-04 17:36:03 -08001892 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001893
1894 spin_lock_irqsave(&rot->rotisr_lock, flags);
1895 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1896 if (rc == 0) {
1897 /*
1898 * Timeout, there might be error,
1899 * or rotator still busy
1900 */
1901 if (status & ROT_BUSY_BIT)
1902 SDEROT_ERR(
1903 "Timeout waiting for rotator done\n");
1904 else if (status & ROT_ERROR_BIT)
1905 SDEROT_ERR(
1906 "Rotator report error status\n");
1907 else
1908 SDEROT_WARN(
1909 "Timeout waiting, but rotator job is done!!\n");
1910
Alan Kwong818b7fc2016-07-24 22:07:41 -04001911 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05001912 }
1913 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
1914 } else {
1915 int cnt = 200;
1916
1917 do {
1918 udelay(500);
1919 status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
1920 cnt--;
1921 } while ((cnt > 0) && (status & ROT_BUSY_BIT)
1922 && ((status & ROT_ERROR_BIT) == 0));
1923
1924 if (status & ROT_ERROR_BIT)
1925 SDEROT_ERR("Rotator error\n");
1926 else if (status & ROT_BUSY_BIT)
1927 SDEROT_ERR("Rotator busy\n");
1928
1929 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
1930 ROT_DONE_CLEAR);
1931 }
1932
1933 sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
1934
1935 return sts;
1936}
1937
1938/*
1939 * sde_hw_rotator_wait_done_regdma - wait for regdma completion
1940 * @ctx: Pointer to rotator context
1941 * @queue_id: Priority queue identifier
1942 * @flags: Option flag
1943 */
1944static u32 sde_hw_rotator_wait_done_regdma(
1945 struct sde_hw_rotator_context *ctx,
1946 enum sde_rot_queue_prio queue_id, u32 flag)
1947{
1948 struct sde_hw_rotator *rot = ctx->rot;
1949 int rc = 0;
Clarence Ip19339b32017-10-14 20:59:00 -04001950 bool abort;
Alan Kwong9487de22016-01-16 22:06:36 -05001951 u32 status;
1952 u32 last_isr;
1953 u32 last_ts;
1954 u32 int_id;
Alan Kwong818b7fc2016-07-24 22:07:41 -04001955 u32 swts;
Alan Kwong9487de22016-01-16 22:06:36 -05001956 u32 sts = 0;
Benjamin Chan2f6fc402017-09-27 16:15:33 -04001957 u32 ubwcerr = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05001958 unsigned long flags;
1959
1960 if (rot->irq_num >= 0) {
1961 SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
1962 ctx, ctx->timestamp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04001963 rc = wait_event_timeout(ctx->regdma_waitq,
1964 !sde_hw_rotator_pending_swts(rot, ctx, &swts),
Clarence Ip19339b32017-10-14 20:59:00 -04001965 ctx->sbuf_mode ?
1966 msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
Alan Kwong6bc64622017-02-04 17:36:03 -08001967 msecs_to_jiffies(rot->koff_timeout));
Alan Kwong9487de22016-01-16 22:06:36 -05001968
Benjamin Chane7ca72e2016-12-22 18:42:34 -05001969 ATRACE_INT("sde_rot_done", 0);
Alan Kwong9487de22016-01-16 22:06:36 -05001970 spin_lock_irqsave(&rot->rotisr_lock, flags);
1971
1972 last_isr = ctx->last_regdma_isr_status;
1973 last_ts = ctx->last_regdma_timestamp;
Clarence Ip19339b32017-10-14 20:59:00 -04001974 abort = ctx->abort;
Alan Kwong9487de22016-01-16 22:06:36 -05001975 status = last_isr & REGDMA_INT_MASK;
1976 int_id = last_ts & 1;
1977 SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
1978 status, int_id, last_ts);
1979
Clarence Ip19339b32017-10-14 20:59:00 -04001980 if (rc == 0 || (status & REGDMA_INT_ERR_MASK) || abort) {
Alan Kwong818b7fc2016-07-24 22:07:41 -04001981 bool pending;
1982
1983 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05001984 SDEROT_ERR(
Clarence Ip19339b32017-10-14 20:59:00 -04001985 "Timeout wait for regdma interrupt status, ts:0x%X/0x%X, pending:%d, abort:%d\n",
1986 ctx->timestamp, swts, pending, abort);
Alan Kwong9487de22016-01-16 22:06:36 -05001987
1988 if (status & REGDMA_WATCHDOG_INT)
1989 SDEROT_ERR("REGDMA watchdog interrupt\n");
1990 else if (status & REGDMA_INVALID_DESCRIPTOR)
1991 SDEROT_ERR("REGDMA invalid descriptor\n");
1992 else if (status & REGDMA_INCOMPLETE_CMD)
1993 SDEROT_ERR("REGDMA incomplete command\n");
1994 else if (status & REGDMA_INVALID_CMD)
1995 SDEROT_ERR("REGDMA invalid command\n");
1996
Clarence Ipcd140292017-09-22 16:24:08 -04001997 _sde_hw_rotator_dump_status(rot, &ubwcerr);
Benjamin Chan2f6fc402017-09-27 16:15:33 -04001998
Clarence Ip19339b32017-10-14 20:59:00 -04001999 if (ubwcerr || abort) {
Benjamin Chan2f6fc402017-09-27 16:15:33 -04002000 /*
2001 * Perform recovery for ROT SSPP UBWC decode
2002 * error.
2003 * - SW reset rotator hw block
2004 * - reset TS logic so all pending rotation
2005 * in hw queue got done signalled
2006 */
2007 spin_unlock_irqrestore(&rot->rotisr_lock,
2008 flags);
2009 if (!sde_hw_rotator_reset(rot, ctx))
2010 status = REGDMA_INCOMPLETE_CMD;
2011 else
2012 status = ROT_ERROR_BIT;
2013 spin_lock_irqsave(&rot->rotisr_lock, flags);
2014 } else {
2015 status = ROT_ERROR_BIT;
2016 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04002017 } else {
2018 if (rc == 1)
2019 SDEROT_WARN(
2020 "REGDMA done but no irq, ts:0x%X/0x%X\n",
2021 ctx->timestamp, swts);
Alan Kwong9487de22016-01-16 22:06:36 -05002022 status = 0;
2023 }
2024
Alan Kwong9487de22016-01-16 22:06:36 -05002025 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
2026 } else {
2027 int cnt = 200;
Alan Kwongb0679602016-11-27 17:04:13 -08002028 bool pending;
Alan Kwong9487de22016-01-16 22:06:36 -05002029
2030 do {
2031 udelay(500);
Alan Kwongb0679602016-11-27 17:04:13 -08002032 last_isr = SDE_ROTREG_READ(rot->mdss_base,
2033 REGDMA_CSR_REGDMA_INT_STATUS);
2034 pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
Alan Kwong9487de22016-01-16 22:06:36 -05002035 cnt--;
Alan Kwongb0679602016-11-27 17:04:13 -08002036 } while ((cnt > 0) && pending &&
2037 ((last_isr & REGDMA_INT_ERR_MASK) == 0));
Alan Kwong9487de22016-01-16 22:06:36 -05002038
Alan Kwongb0679602016-11-27 17:04:13 -08002039 if (last_isr & REGDMA_INT_ERR_MASK) {
2040 SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
2041 ctx->timestamp, swts, last_isr);
Clarence Ipcd140292017-09-22 16:24:08 -04002042 _sde_hw_rotator_dump_status(rot, NULL);
Alan Kwongb0679602016-11-27 17:04:13 -08002043 status = ROT_ERROR_BIT;
2044 } else if (pending) {
2045 SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
2046 ctx->timestamp, swts, last_isr);
Clarence Ipcd140292017-09-22 16:24:08 -04002047 _sde_hw_rotator_dump_status(rot, NULL);
Alan Kwongb0679602016-11-27 17:04:13 -08002048 status = ROT_ERROR_BIT;
2049 } else {
2050 status = 0;
2051 }
Alan Kwong9487de22016-01-16 22:06:36 -05002052
2053 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
Alan Kwongb0679602016-11-27 17:04:13 -08002054 last_isr);
Alan Kwong9487de22016-01-16 22:06:36 -05002055 }
2056
Benjamin Chan2f6fc402017-09-27 16:15:33 -04002057 sts = (status & (ROT_ERROR_BIT | REGDMA_INCOMPLETE_CMD)) ? -ENODEV : 0;
Alan Kwong9487de22016-01-16 22:06:36 -05002058
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04002059 if (status & ROT_ERROR_BIT)
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002060 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
2061 "vbif_dbg_bus", "panic");
Benjamin Chan4ec1f1d2016-09-15 22:49:49 -04002062
Alan Kwong9487de22016-01-16 22:06:36 -05002063 return sts;
2064}
2065
2066/*
2067 * setup_rotator_ops - setup callback functions for the low-level HAL
2068 * @ops: Pointer to low-level ops callback
2069 * @mode: Operation mode (non-regdma or regdma)
2070 */
2071static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
2072 enum sde_rotator_regdma_mode mode)
2073{
2074 ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
2075 ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
2076 if (mode == ROT_REGDMA_ON) {
2077 ops->start_rotator = sde_hw_rotator_start_regdma;
2078 ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
2079 } else {
2080 ops->start_rotator = sde_hw_rotator_start_no_regdma;
2081 ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
2082 }
2083}
2084
2085/*
2086 * sde_hw_rotator_swts_create - create software timestamp buffer
2087 * @rot: Pointer to rotator hw
2088 *
2089 * This buffer is used by regdma to keep track of last completed command.
2090 */
2091static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
2092{
2093 int rc = 0;
2094 struct ion_handle *handle;
2095 struct sde_mdp_img_data *data;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002096 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05002097 u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
2098
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002099 rot->iclient = mdata->iclient;
Alan Kwong9487de22016-01-16 22:06:36 -05002100
2101 handle = ion_alloc(rot->iclient, bufsize, SZ_4K,
2102 ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
2103 if (IS_ERR_OR_NULL(handle)) {
2104 SDEROT_ERR("ion memory allocation failed\n");
2105 return -ENOMEM;
2106 }
2107
2108 data = &rot->swts_buf;
2109 data->len = bufsize;
2110 data->srcp_dma_buf = ion_share_dma_buf(rot->iclient, handle);
2111 if (IS_ERR(data->srcp_dma_buf)) {
2112 SDEROT_ERR("ion_dma_buf setup failed\n");
2113 rc = -ENOMEM;
2114 goto imap_err;
2115 }
2116
2117 sde_smmu_ctrl(1);
2118
2119 data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
2120 &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
2121 if (IS_ERR_OR_NULL(data->srcp_attachment)) {
2122 SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
2123 rc = -ENOMEM;
2124 goto err_put;
2125 }
2126
2127 data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
2128 DMA_BIDIRECTIONAL);
2129 if (IS_ERR_OR_NULL(data->srcp_table)) {
2130 SDEROT_ERR("dma_buf_map_attachment error\n");
2131 rc = -ENOMEM;
2132 goto err_detach;
2133 }
2134
2135 rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
2136 SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
2137 &data->len, DMA_BIDIRECTIONAL);
Alan Kwong6ce448d2016-11-24 18:45:20 -08002138 if (rc < 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05002139 SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
2140 goto err_unmap;
2141 }
2142
Alan Kwong6ce448d2016-11-24 18:45:20 -08002143 dma_buf_begin_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05002144 rot->swts_buffer = dma_buf_kmap(data->srcp_dma_buf, 0);
2145 if (IS_ERR_OR_NULL(rot->swts_buffer)) {
2146 SDEROT_ERR("ion kernel memory mapping failed\n");
2147 rc = IS_ERR(rot->swts_buffer);
2148 goto kmap_err;
2149 }
2150
2151 data->mapped = true;
2152 SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr,
2153 data->len, rot->swts_buffer);
2154
2155 ion_free(rot->iclient, handle);
2156
2157 sde_smmu_ctrl(0);
2158
2159 return rc;
2160kmap_err:
2161 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
2162 DMA_FROM_DEVICE, data->srcp_dma_buf);
2163err_unmap:
2164 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
2165 DMA_FROM_DEVICE);
2166err_detach:
2167 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
2168err_put:
2169 dma_buf_put(data->srcp_dma_buf);
2170 data->srcp_dma_buf = NULL;
2171imap_err:
2172 ion_free(rot->iclient, handle);
2173
2174 return rc;
2175}
2176
2177/*
2178 * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer
2179 * @rot: Pointer to rotator hw
2180 */
2181static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot)
2182{
2183 struct sde_mdp_img_data *data;
2184
2185 data = &rot->swts_buf;
2186
Alan Kwong6ce448d2016-11-24 18:45:20 -08002187 dma_buf_end_cpu_access(data->srcp_dma_buf, DMA_FROM_DEVICE);
Alan Kwong9487de22016-01-16 22:06:36 -05002188 dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer);
2189
2190 sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
2191 DMA_FROM_DEVICE, data->srcp_dma_buf);
2192 dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
2193 DMA_FROM_DEVICE);
2194 dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
2195 dma_buf_put(data->srcp_dma_buf);
2196 data->srcp_dma_buf = NULL;
2197}
2198
2199/*
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002200 * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
2201 * PM event occurs
2202 * @mgr: Pointer to rotator manager
2203 * @pmon: Boolean indicate an on/off power event
2204 */
2205void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
2206{
2207 struct sde_hw_rotator *rot;
2208 u32 l_ts, h_ts, swts, hwts;
Alan Kwong94e0f602017-11-07 23:01:44 -05002209 u32 rotsts, regdmasts, rotopmode;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002210
2211 /*
2212 * Check last HW timestamp with SW timestamp before power off event.
2213 * If there is a mismatch, that will be quite possible the rotator HW
2214 * is either hang or not finishing last submitted job. In that case,
2215 * it is best to do a timeout eventlog to capture some good events
2216 * log data for analysis.
2217 */
2218 if (!pmon && mgr && mgr->hw_data) {
2219 rot = mgr->hw_data;
2220 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2221 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2222
2223 /* contruct the combined timstamp */
2224 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2225 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2226 SDE_REGDMA_SWTS_SHIFT);
2227
2228 /* Need to turn on clock to access rotator register */
2229 sde_rotator_clk_ctrl(mgr, true);
2230 hwts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2231 regdmasts = SDE_ROTREG_READ(rot->mdss_base,
2232 REGDMA_CSR_REGDMA_BLOCK_STATUS);
2233 rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
Alan Kwong94e0f602017-11-07 23:01:44 -05002234 rotopmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002235
2236 SDEROT_DBG(
Alan Kwong94e0f602017-11-07 23:01:44 -05002237 "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x, rottop-opmode:0x%x\n",
2238 swts, hwts, regdmasts, rotsts, rotopmode);
2239 SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts, rotopmode);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002240
2241 if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
2242 (rotsts & ROT_STATUS_MASK))) {
2243 SDEROT_ERR(
2244 "Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
2245 swts, hwts, regdmasts, rotsts);
Alan Kwong94e0f602017-11-07 23:01:44 -05002246 _sde_hw_rotator_dump_status(rot, NULL);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002247 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
2248 "vbif_dbg_bus", "panic");
Alan Kwong94e0f602017-11-07 23:01:44 -05002249 } else if (!SDE_ROTTOP_IN_OFFLINE_MODE(rotopmode) &&
2250 ((regdmasts & REGDMA_BUSY) ||
2251 (rotsts & ROT_BUSY_BIT))) {
2252 /*
2253 * rotator can stuck in inline while mdp is detached
2254 */
2255 SDEROT_WARN(
2256 "Inline Rot busy: regdma-sts:0x%x, rottop-sts:0x%x, rottop-opmode:0x%x\n",
2257 regdmasts, rotsts, rotopmode);
2258 sde_hw_rotator_reset(rot, NULL);
2259 } else if ((regdmasts & REGDMA_BUSY) ||
2260 (rotsts & ROT_BUSY_BIT)) {
2261 _sde_hw_rotator_dump_status(rot, NULL);
2262 SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
2263 "vbif_dbg_bus", "panic");
2264 sde_hw_rotator_reset(rot, NULL);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002265 }
2266
2267 /* Turn off rotator clock after checking rotator registers */
2268 sde_rotator_clk_ctrl(mgr, false);
2269 }
2270}
2271
2272/*
2273 * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
2274 * PM event occurs
2275 * @mgr: Pointer to rotator manager
2276 * @pmon: Boolean indicate an on/off power event
2277 */
2278void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
2279{
2280 struct sde_hw_rotator *rot;
2281 u32 l_ts, h_ts, swts;
2282
2283 /*
2284 * After a power on event, the rotator HW is reset to default setting.
2285 * It is necessary to synchronize the SW timestamp with the HW.
2286 */
2287 if (pmon && mgr && mgr->hw_data) {
2288 rot = mgr->hw_data;
2289 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2290 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2291
2292 /* contruct the combined timstamp */
2293 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2294 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2295 SDE_REGDMA_SWTS_SHIFT);
2296
2297 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2298 swts, h_ts, l_ts);
2299 SDEROT_EVTLOG(swts, h_ts, l_ts);
2300 rot->reset_hw_ts = true;
2301 rot->last_hw_ts = swts;
2302 }
2303}
2304
2305/*
Alan Kwong9487de22016-01-16 22:06:36 -05002306 * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
2307 * @mgr: Pointer to rotator manager
2308 */
2309static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
2310{
2311 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2312 struct sde_hw_rotator *rot;
2313
2314 if (!mgr || !mgr->pdev || !mgr->hw_data) {
2315 SDEROT_ERR("null parameters\n");
2316 return;
2317 }
2318
2319 rot = mgr->hw_data;
2320 if (rot->irq_num >= 0)
2321 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
2322
2323 if (rot->mode == ROT_REGDMA_ON)
2324 sde_hw_rotator_swtc_destroy(rot);
2325
2326 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
2327 mgr->hw_data = NULL;
2328}
2329
2330/*
2331 * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
2332 * @mgr: Pointer to rotator manager
2333 * @pipe_id: pipe identifier (not used)
2334 * @wb_id: writeback identifier/priority queue identifier
2335 *
2336 * This function allocates a new hw rotator resource for the given priority.
2337 */
2338static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
2339 struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
2340{
2341 struct sde_hw_rotator_resource_info *resinfo;
2342
2343 if (!mgr || !mgr->hw_data) {
2344 SDEROT_ERR("null parameters\n");
2345 return NULL;
2346 }
2347
2348 /*
2349 * Allocate rotator resource info. Each allocation is per
2350 * HW priority queue
2351 */
2352 resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
2353 if (!resinfo) {
2354 SDEROT_ERR("Failed allocation HW rotator resource info\n");
2355 return NULL;
2356 }
2357
2358 resinfo->rot = mgr->hw_data;
2359 resinfo->hw.wb_id = wb_id;
2360 atomic_set(&resinfo->hw.num_active, 0);
2361 init_waitqueue_head(&resinfo->hw.wait_queue);
2362
2363 /* For non-regdma, only support one active session */
2364 if (resinfo->rot->mode == ROT_REGDMA_OFF)
2365 resinfo->hw.max_active = 1;
2366 else {
2367 resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
2368
2369 if (resinfo->rot->iclient == NULL)
2370 sde_hw_rotator_swts_create(resinfo->rot);
2371 }
2372
Alan Kwongf987ea32016-07-06 12:11:44 -04002373 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002374 sde_hw_rotator_enable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002375
Alan Kwong9487de22016-01-16 22:06:36 -05002376 SDEROT_DBG("New rotator resource:%p, priority:%d\n",
2377 resinfo, wb_id);
2378
2379 return &resinfo->hw;
2380}
2381
2382/*
2383 * sde_hw_rotator_free_ext - free the given rotator resource
2384 * @mgr: Pointer to rotator manager
2385 * @hw: Pointer to rotator resource
2386 */
2387static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
2388 struct sde_rot_hw_resource *hw)
2389{
2390 struct sde_hw_rotator_resource_info *resinfo;
2391
2392 if (!mgr || !mgr->hw_data)
2393 return;
2394
2395 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2396
2397 SDEROT_DBG(
2398 "Free rotator resource:%p, priority:%d, active:%d, pending:%d\n",
2399 resinfo, hw->wb_id, atomic_read(&hw->num_active),
2400 hw->pending_count);
2401
Alan Kwongf987ea32016-07-06 12:11:44 -04002402 if (resinfo->rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04002403 sde_hw_rotator_disable_irq(resinfo->rot);
Alan Kwongf987ea32016-07-06 12:11:44 -04002404
Alan Kwong9487de22016-01-16 22:06:36 -05002405 devm_kfree(&mgr->pdev->dev, resinfo);
2406}
2407
2408/*
2409 * sde_hw_rotator_alloc_rotctx - allocate rotator context
2410 * @rot: Pointer to rotator hw
2411 * @hw: Pointer to rotator resource
2412 * @session_id: Session identifier of this context
Clarence Ip9e6c3302017-06-02 11:02:57 -04002413 * @sequence_id: Sequence identifier of this request
Alan Kwong6bc64622017-02-04 17:36:03 -08002414 * @sbuf_mode: true if stream buffer is requested
Alan Kwong9487de22016-01-16 22:06:36 -05002415 *
2416 * This function allocates a new rotator context for the given session id.
2417 */
2418static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
2419 struct sde_hw_rotator *rot,
2420 struct sde_rot_hw_resource *hw,
Alan Kwong6bc64622017-02-04 17:36:03 -08002421 u32 session_id,
Clarence Ip9e6c3302017-06-02 11:02:57 -04002422 u32 sequence_id,
Alan Kwong6bc64622017-02-04 17:36:03 -08002423 bool sbuf_mode)
Alan Kwong9487de22016-01-16 22:06:36 -05002424{
2425 struct sde_hw_rotator_context *ctx;
2426
2427 /* Allocate rotator context */
2428 ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
2429 if (!ctx) {
2430 SDEROT_ERR("Failed allocation HW rotator context\n");
2431 return NULL;
2432 }
2433
2434 ctx->rot = rot;
2435 ctx->q_id = hw->wb_id;
2436 ctx->session_id = session_id;
Clarence Ip9e6c3302017-06-02 11:02:57 -04002437 ctx->sequence_id = sequence_id;
Alan Kwong9487de22016-01-16 22:06:36 -05002438 ctx->hwres = hw;
2439 ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
2440 ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
2441 ctx->is_secure = false;
Alan Kwong6bc64622017-02-04 17:36:03 -08002442 ctx->sbuf_mode = sbuf_mode;
2443 INIT_LIST_HEAD(&ctx->list);
Alan Kwong9487de22016-01-16 22:06:36 -05002444
2445 ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
2446 [sde_hw_rotator_get_regdma_ctxidx(ctx)];
2447 ctx->regdma_wrptr = ctx->regdma_base;
2448 ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
2449 ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
2450 sde_hw_rotator_get_regdma_ctxidx(ctx));
2451
Alan Kwong818b7fc2016-07-24 22:07:41 -04002452 ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
2453
Alan Kwong9487de22016-01-16 22:06:36 -05002454 init_completion(&ctx->rot_comp);
Alan Kwong818b7fc2016-07-24 22:07:41 -04002455 init_waitqueue_head(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05002456
2457 /* Store rotator context for lookup purpose */
2458 sde_hw_rotator_put_ctx(ctx);
2459
2460 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002461 "New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002462 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2463 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002464 atomic_read(&ctx->hwres->num_active),
2465 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002466
2467 return ctx;
2468}
2469
2470/*
2471 * sde_hw_rotator_free_rotctx - free the given rotator context
2472 * @rot: Pointer to rotator hw
2473 * @ctx: Pointer to rotator context
2474 */
2475static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
2476 struct sde_hw_rotator_context *ctx)
2477{
2478 if (!rot || !ctx)
2479 return;
2480
2481 SDEROT_DBG(
Alan Kwong6bc64622017-02-04 17:36:03 -08002482 "Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
Alan Kwong9487de22016-01-16 22:06:36 -05002483 ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
2484 ctx->q_id, ctx->timestamp,
Alan Kwong6bc64622017-02-04 17:36:03 -08002485 atomic_read(&ctx->hwres->num_active),
2486 ctx->sbuf_mode);
Alan Kwong9487de22016-01-16 22:06:36 -05002487
Benjamin Chanc3e185f2016-11-08 21:48:21 -05002488 /* Clear rotator context from lookup purpose */
2489 sde_hw_rotator_clr_ctx(ctx);
Alan Kwong9487de22016-01-16 22:06:36 -05002490
2491 devm_kfree(&rot->pdev->dev, ctx);
2492}
2493
2494/*
2495 * sde_hw_rotator_config - configure hw for the given rotation entry
2496 * @hw: Pointer to rotator resource
2497 * @entry: Pointer to rotation entry
2498 *
2499 * This function setup the fetch/writeback/rotator blocks, as well as VBIF
2500 * based on the given rotation entry.
2501 */
2502static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
2503 struct sde_rot_entry *entry)
2504{
2505 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2506 struct sde_hw_rotator *rot;
2507 struct sde_hw_rotator_resource_info *resinfo;
2508 struct sde_hw_rotator_context *ctx;
2509 struct sde_hw_rot_sspp_cfg sspp_cfg;
2510 struct sde_hw_rot_wb_cfg wb_cfg;
2511 u32 danger_lut = 0; /* applicable for realtime client only */
2512 u32 safe_lut = 0; /* applicable for realtime client only */
2513 u32 flags = 0;
Benjamin Chana9dd3052017-02-14 17:39:32 -05002514 u32 rststs = 0;
Alan Kwong9487de22016-01-16 22:06:36 -05002515 struct sde_rotation_item *item;
Alan Kwong6bc64622017-02-04 17:36:03 -08002516 int ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002517
2518 if (!hw || !entry) {
2519 SDEROT_ERR("null hw resource/entry\n");
2520 return -EINVAL;
2521 }
2522
2523 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2524 rot = resinfo->rot;
2525 item = &entry->item;
2526
Alan Kwong6bc64622017-02-04 17:36:03 -08002527 ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
Clarence Ip9e6c3302017-06-02 11:02:57 -04002528 item->sequence_id, item->output.sbuf);
Alan Kwong9487de22016-01-16 22:06:36 -05002529 if (!ctx) {
2530 SDEROT_ERR("Failed allocating rotator context!!\n");
2531 return -EINVAL;
2532 }
2533
Alan Kwong6bc64622017-02-04 17:36:03 -08002534 /* save entry for debugging purposes */
2535 ctx->last_entry = entry;
2536
2537 if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
2538 if (entry->dst_buf.sbuf) {
2539 u32 op_mode;
2540
2541 if (entry->item.trigger ==
2542 SDE_ROTATOR_TRIGGER_COMMAND)
2543 ctx->start_ctrl = (rot->cmd_trigger << 4);
2544 else if (entry->item.trigger ==
2545 SDE_ROTATOR_TRIGGER_VIDEO)
2546 ctx->start_ctrl = (rot->vid_trigger << 4);
2547 else
2548 ctx->start_ctrl = 0;
2549
2550 ctx->sys_cache_mode = BIT(15) |
2551 ((item->output.scid & 0x1f) << 8) |
2552 (item->output.writeback ? 0x5 : 0);
2553
2554 ctx->op_mode = BIT(4) |
2555 ((ctx->rot->sbuf_headroom & 0xff) << 8);
2556
2557 /* detect transition to inline mode */
2558 op_mode = (SDE_ROTREG_READ(rot->mdss_base,
2559 ROTTOP_OP_MODE) >> 4) & 0x3;
2560 if (!op_mode) {
2561 u32 status;
2562
2563 status = SDE_ROTREG_READ(rot->mdss_base,
2564 ROTTOP_STATUS);
2565 if (status & BIT(0)) {
2566 SDEROT_ERR("rotator busy 0x%x\n",
2567 status);
Clarence Ipcd140292017-09-22 16:24:08 -04002568 _sde_hw_rotator_dump_status(rot, NULL);
Alan Kwong6bc64622017-02-04 17:36:03 -08002569 SDEROT_EVTLOG_TOUT_HANDLER("rot",
2570 "vbif_dbg_bus",
2571 "panic");
2572 }
2573 }
2574
2575 } else {
2576 ctx->start_ctrl = BIT(0);
2577 ctx->sys_cache_mode = 0;
2578 ctx->op_mode = 0;
2579 }
2580 } else {
2581 ctx->start_ctrl = BIT(0);
2582 }
2583
2584 SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
2585
Benjamin Chana9dd3052017-02-14 17:39:32 -05002586 /*
2587 * if Rotator HW is reset, but missing PM event notification, we
2588 * need to init the SW timestamp automatically.
2589 */
2590 rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
2591 if (!rot->reset_hw_ts && rststs) {
2592 u32 l_ts, h_ts, swts;
2593
2594 swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
2595 h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
2596 l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
2597 SDEROT_EVTLOG(0xbad0, rststs, swts, h_ts, l_ts);
2598
2599 if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY)
2600 h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
2601 else
2602 l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
2603
2604 /* construct the combined timstamp */
2605 swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
2606 ((l_ts & SDE_REGDMA_SWTS_MASK) <<
2607 SDE_REGDMA_SWTS_SHIFT);
2608
2609 SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
2610 swts, h_ts, l_ts);
2611 SDEROT_EVTLOG(0x900d, swts, h_ts, l_ts);
2612 rot->last_hw_ts = swts;
2613
2614 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2615 rot->last_hw_ts);
2616 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
2617 /* ensure write is issued to the rotator HW */
2618 wmb();
2619 }
2620
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002621 if (rot->reset_hw_ts) {
2622 SDEROT_EVTLOG(rot->last_hw_ts);
2623 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
2624 rot->last_hw_ts);
Benjamin Chana9dd3052017-02-14 17:39:32 -05002625 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002626 /* ensure write is issued to the rotator HW */
2627 wmb();
2628 rot->reset_hw_ts = false;
2629 }
2630
Alan Kwong9487de22016-01-16 22:06:36 -05002631 flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
2632 SDE_ROT_FLAG_FLIP_LR : 0;
2633 flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
2634 SDE_ROT_FLAG_FLIP_UD : 0;
2635 flags |= (item->flags & SDE_ROTATION_90) ?
2636 SDE_ROT_FLAG_ROT_90 : 0;
2637 flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
2638 SDE_ROT_FLAG_DEINTERLACE : 0;
2639 flags |= (item->flags & SDE_ROTATION_SECURE) ?
2640 SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002641 flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
2642 SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
2643
Alan Kwong9487de22016-01-16 22:06:36 -05002644
2645 sspp_cfg.img_width = item->input.width;
2646 sspp_cfg.img_height = item->input.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002647 sspp_cfg.fps = entry->perf->config.frame_rate;
2648 sspp_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002649 sspp_cfg.fmt = sde_get_format_params(item->input.format);
2650 if (!sspp_cfg.fmt) {
2651 SDEROT_ERR("null format\n");
Alan Kwong6bc64622017-02-04 17:36:03 -08002652 ret = -EINVAL;
2653 goto error;
Alan Kwong9487de22016-01-16 22:06:36 -05002654 }
2655 sspp_cfg.src_rect = &item->src_rect;
2656 sspp_cfg.data = &entry->src_buf;
2657 sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
2658 item->input.height, &sspp_cfg.src_plane,
2659 0, /* No bwc_mode */
2660 (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
2661 true : false);
2662
2663 rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002664 &sspp_cfg, danger_lut, safe_lut,
2665 entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
Alan Kwong9487de22016-01-16 22:06:36 -05002666
2667 wb_cfg.img_width = item->output.width;
2668 wb_cfg.img_height = item->output.height;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002669 wb_cfg.fps = entry->perf->config.frame_rate;
2670 wb_cfg.bw = entry->perf->bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002671 wb_cfg.fmt = sde_get_format_params(item->output.format);
Samantha Tranf4259fd2017-09-28 16:42:12 -07002672 if (!wb_cfg.fmt) {
2673 SDEROT_ERR("null format\n");
2674 ret = -EINVAL;
2675 goto error;
2676 }
2677
Alan Kwong9487de22016-01-16 22:06:36 -05002678 wb_cfg.dst_rect = &item->dst_rect;
2679 wb_cfg.data = &entry->dst_buf;
2680 sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
2681 item->output.height, &wb_cfg.dst_plane,
2682 0, /* No bwc_mode */
2683 (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
2684
2685 wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
2686 wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
Alan Kwong498d59f2017-02-11 18:56:34 -08002687 wb_cfg.prefill_bw = item->prefill_bw;
Alan Kwong9487de22016-01-16 22:06:36 -05002688
2689 rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
2690
2691 /* setup VA mapping for debugfs */
2692 if (rot->dbgmem) {
2693 sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
2694 &item->input,
2695 &entry->src_buf);
2696
2697 sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
2698 &item->output,
2699 &entry->dst_buf);
2700 }
2701
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002702 SDEROT_EVTLOG(ctx->timestamp, flags,
2703 item->input.width, item->input.height,
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002704 item->output.width, item->output.height,
Benjamin Chan59a06052017-01-12 18:06:03 -05002705 entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
Benjamin Chan1b94f952017-01-23 17:42:30 -05002706 item->input.format, item->output.format,
2707 entry->perf->config.frame_rate);
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002708
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002709 if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002710 struct sde_mdp_set_ot_params ot_params;
2711
2712 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2713 ot_params.xin_id = XIN_SSPP;
2714 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002715 ot_params.width = entry->perf->config.input.width;
2716 ot_params.height = entry->perf->config.input.height;
2717 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002718 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
2719 ot_params.reg_off_mdp_clk_ctrl =
2720 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2721 ot_params.bit_off_mdp_clk_ctrl =
2722 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002723 ot_params.fmt = ctx->is_traffic_shaping ?
2724 SDE_PIX_FMT_ABGR_8888 :
2725 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002726 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2727 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002728 sde_mdp_set_ot_limit(&ot_params);
2729 }
2730
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002731 if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
Alan Kwong9487de22016-01-16 22:06:36 -05002732 struct sde_mdp_set_ot_params ot_params;
2733
2734 memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
2735 ot_params.xin_id = XIN_WRITEBACK;
2736 ot_params.num = 0; /* not used */
Alan Kwongeffb5ee2016-03-12 19:47:45 -05002737 ot_params.width = entry->perf->config.input.width;
2738 ot_params.height = entry->perf->config.input.height;
2739 ot_params.fps = entry->perf->config.frame_rate;
Alan Kwong9487de22016-01-16 22:06:36 -05002740 ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
2741 ot_params.reg_off_mdp_clk_ctrl =
2742 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
2743 ot_params.bit_off_mdp_clk_ctrl =
2744 MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
Benjamin Chan99eb63b2016-12-21 15:45:26 -05002745 ot_params.fmt = ctx->is_traffic_shaping ?
2746 SDE_PIX_FMT_ABGR_8888 :
2747 entry->perf->config.input.format;
Benjamin Chan1b94f952017-01-23 17:42:30 -05002748 ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
2749 ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
Alan Kwong9487de22016-01-16 22:06:36 -05002750 sde_mdp_set_ot_limit(&ot_params);
2751 }
2752
2753 if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
2754 u32 qos_lut = 0; /* low priority for nrt read client */
2755
2756 trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
2757 qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
2758
2759 SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
2760 }
2761
Veera Sundaram Sankarane15dd222017-04-20 08:13:08 -07002762 /* VBIF QoS and other settings */
Veera Sundaram Sankaran3e539fe2017-05-10 17:03:32 -07002763 if (!ctx->sbuf_mode)
2764 sde_hw_rotator_vbif_setting(rot);
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002765
Alan Kwong9487de22016-01-16 22:06:36 -05002766 return 0;
Alan Kwong6bc64622017-02-04 17:36:03 -08002767
2768error:
2769 sde_hw_rotator_free_rotctx(rot, ctx);
2770 return ret;
Alan Kwong9487de22016-01-16 22:06:36 -05002771}
2772
2773/*
Clarence Ip3ce07c02017-08-11 16:21:45 -04002774 * sde_hw_rotator_cancel - cancel hw configuration for the given rotation entry
2775 * @hw: Pointer to rotator resource
2776 * @entry: Pointer to rotation entry
2777 *
2778 * This function cancels a previously configured rotation entry.
2779 */
2780static int sde_hw_rotator_cancel(struct sde_rot_hw_resource *hw,
2781 struct sde_rot_entry *entry)
2782{
2783 struct sde_hw_rotator *rot;
2784 struct sde_hw_rotator_resource_info *resinfo;
2785 struct sde_hw_rotator_context *ctx;
2786 unsigned long flags;
2787
2788 if (!hw || !entry) {
2789 SDEROT_ERR("null hw resource/entry\n");
2790 return -EINVAL;
2791 }
2792
2793 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2794 rot = resinfo->rot;
2795
2796 /* Lookup rotator context from session-id */
2797 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2798 entry->item.sequence_id, hw->wb_id);
2799 if (!ctx) {
2800 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2801 entry->item.session_id);
2802 return -EINVAL;
2803 }
2804
2805 spin_lock_irqsave(&rot->rotisr_lock, flags);
2806 sde_hw_rotator_update_swts(rot, ctx, ctx->timestamp);
2807 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
2808
2809 SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
2810
2811 if (rot->dbgmem) {
2812 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
2813 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
2814 }
2815
2816 /* Current rotator context job is finished, time to free up */
2817 sde_hw_rotator_free_rotctx(rot, ctx);
2818
2819 return 0;
2820}
2821
2822/*
Alan Kwong9487de22016-01-16 22:06:36 -05002823 * sde_hw_rotator_kickoff - kickoff processing on the given entry
2824 * @hw: Pointer to rotator resource
2825 * @entry: Pointer to rotation entry
2826 */
2827static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
2828 struct sde_rot_entry *entry)
2829{
2830 struct sde_hw_rotator *rot;
2831 struct sde_hw_rotator_resource_info *resinfo;
2832 struct sde_hw_rotator_context *ctx;
Alan Kwong9487de22016-01-16 22:06:36 -05002833
2834 if (!hw || !entry) {
2835 SDEROT_ERR("null hw resource/entry\n");
2836 return -EINVAL;
2837 }
2838
2839 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2840 rot = resinfo->rot;
2841
2842 /* Lookup rotator context from session-id */
Clarence Ip9e6c3302017-06-02 11:02:57 -04002843 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2844 entry->item.sequence_id, hw->wb_id);
Alan Kwong9487de22016-01-16 22:06:36 -05002845 if (!ctx) {
2846 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2847 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002848 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002849 }
Alan Kwong9487de22016-01-16 22:06:36 -05002850
Alan Kwong9487de22016-01-16 22:06:36 -05002851 rot->ops.start_rotator(ctx, ctx->q_id);
2852
2853 return 0;
2854}
2855
Clarence Ip19339b32017-10-14 20:59:00 -04002856static int sde_hw_rotator_abort_kickoff(struct sde_rot_hw_resource *hw,
2857 struct sde_rot_entry *entry)
2858{
2859 struct sde_hw_rotator *rot;
2860 struct sde_hw_rotator_resource_info *resinfo;
2861 struct sde_hw_rotator_context *ctx;
2862 unsigned long flags;
2863
2864 if (!hw || !entry) {
2865 SDEROT_ERR("null hw resource/entry\n");
2866 return -EINVAL;
2867 }
2868
2869 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2870 rot = resinfo->rot;
2871
2872 /* Lookup rotator context from session-id */
2873 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2874 entry->item.sequence_id, hw->wb_id);
2875 if (!ctx) {
2876 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2877 entry->item.session_id);
2878 return -EINVAL;
2879 }
2880
2881 spin_lock_irqsave(&rot->rotisr_lock, flags);
2882 sde_hw_rotator_update_swts(rot, ctx, ctx->timestamp);
2883 ctx->abort = true;
2884 wake_up_all(&ctx->regdma_waitq);
2885 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
2886
2887 SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
2888
2889 return 0;
2890}
2891
Alan Kwong9487de22016-01-16 22:06:36 -05002892/*
2893 * sde_hw_rotator_wait4done - wait for completion notification
2894 * @hw: Pointer to rotator resource
2895 * @entry: Pointer to rotation entry
2896 *
2897 * This function blocks until the given entry is complete, error
2898 * is detected, or timeout.
2899 */
2900static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
2901 struct sde_rot_entry *entry)
2902{
2903 struct sde_hw_rotator *rot;
2904 struct sde_hw_rotator_resource_info *resinfo;
2905 struct sde_hw_rotator_context *ctx;
2906 int ret;
2907
2908 if (!hw || !entry) {
2909 SDEROT_ERR("null hw resource/entry\n");
2910 return -EINVAL;
2911 }
2912
2913 resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
2914 rot = resinfo->rot;
2915
2916 /* Lookup rotator context from session-id */
Clarence Ip9e6c3302017-06-02 11:02:57 -04002917 ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
2918 entry->item.sequence_id, hw->wb_id);
Alan Kwong9487de22016-01-16 22:06:36 -05002919 if (!ctx) {
2920 SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
2921 entry->item.session_id);
Benjamin Chan62b94ed2016-08-18 23:55:21 -04002922 return -EINVAL;
Alan Kwong9487de22016-01-16 22:06:36 -05002923 }
Alan Kwong9487de22016-01-16 22:06:36 -05002924
2925 ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
2926
Alan Kwong9487de22016-01-16 22:06:36 -05002927 if (rot->dbgmem) {
2928 sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
2929 sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
2930 }
2931
2932 /* Current rotator context job is finished, time to free up*/
2933 sde_hw_rotator_free_rotctx(rot, ctx);
2934
2935 return ret;
2936}
2937
2938/*
2939 * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
2940 * @rot: Pointer to hw rotator
2941 *
2942 * This function initializes feature and/or capability bitmask based on
2943 * h/w version read from the device.
2944 */
2945static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
2946{
2947 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
2948 u32 hw_version;
2949
2950 if (!mdata) {
2951 SDEROT_ERR("null rotator data\n");
2952 return -EINVAL;
2953 }
2954
2955 hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
2956 SDEROT_DBG("hw version %8.8x\n", hw_version);
2957
2958 clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
2959 set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
Alan Kwong9487de22016-01-16 22:06:36 -05002960 set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
2961 set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
2962 clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
2963
2964 set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
2965
Alan Kwong6bc64622017-02-04 17:36:03 -08002966 /* features exposed via rotator top h/w version */
Benjamin Chanfb6faa32016-08-16 17:21:01 -04002967 if (hw_version != SDE_ROT_TYPE_V1_0) {
2968 SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
2969 set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
2970 }
2971
Abhijit Kulkarni298c8232016-09-26 22:32:10 -07002972 set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
2973
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002974 mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
2975 mdata->nrt_vbif_dbg_bus_size =
2976 ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
2977
Benjamin Chan2d6411a2017-03-28 18:01:53 -04002978 mdata->rot_dbg_bus = rot_dbgbus_r3;
2979 mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
2980
Benjamin Chan53e3bce2016-08-31 14:43:29 -04002981 mdata->regdump = sde_rot_r3_regdump;
2982 mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04002983 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
Alan Kwong6bc64622017-02-04 17:36:03 -08002984
2985 /* features exposed via mdss h/w version */
Jayant Shekhar292e0a22017-09-12 15:23:24 +05302986 if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400) ||
2987 IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
2988 SDE_MDP_HW_REV_410)) {
Alan Kwong6bc64622017-02-04 17:36:03 -08002989 SDEROT_DBG("Supporting sys cache inline rotation\n");
2990 set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
Alan Kwongfb8eeb22017-02-06 15:00:03 -08002991 set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
Clarence Ip22fed4c2017-05-16 15:30:51 -04002992 set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
Alan Kwong4b416162017-08-11 21:03:10 -04002993 rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
2994 sde_hw_rotator_v4_inpixfmts;
2995 rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
2996 ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
2997 rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
2998 sde_hw_rotator_v4_outpixfmts;
2999 rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
3000 ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
3001 rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
3002 sde_hw_rotator_v4_inpixfmts_sbuf;
3003 rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
3004 ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
3005 rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
3006 sde_hw_rotator_v4_outpixfmts_sbuf;
3007 rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
3008 ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
Alan Kwong6bc64622017-02-04 17:36:03 -08003009 rot->downscale_caps =
3010 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
3011 } else {
Alan Kwong4b416162017-08-11 21:03:10 -04003012 rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
3013 sde_hw_rotator_v3_inpixfmts;
3014 rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
3015 ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
3016 rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
3017 sde_hw_rotator_v3_outpixfmts;
3018 rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
3019 ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
Alan Kwong6bc64622017-02-04 17:36:03 -08003020 rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
3021 "LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
3022 "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
3023 }
3024
Alan Kwong9487de22016-01-16 22:06:36 -05003025 return 0;
3026}
3027
3028/*
3029 * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
3030 * @irq: Interrupt number
3031 * @ptr: Pointer to private handle provided during registration
3032 *
3033 * This function services rotator interrupt and wakes up waiting client
3034 * with pending rotation requests already submitted to h/w.
3035 */
3036static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
3037{
3038 struct sde_hw_rotator *rot = ptr;
3039 struct sde_hw_rotator_context *ctx;
3040 irqreturn_t ret = IRQ_NONE;
3041 u32 isr;
3042
3043 isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
3044
3045 SDEROT_DBG("intr_status = %8.8x\n", isr);
3046
3047 if (isr & ROT_DONE_MASK) {
3048 if (rot->irq_num >= 0)
Alan Kwong818b7fc2016-07-24 22:07:41 -04003049 sde_hw_rotator_disable_irq(rot);
Alan Kwong9487de22016-01-16 22:06:36 -05003050 SDEROT_DBG("Notify rotator complete\n");
3051
3052 /* Normal rotator only 1 session, no need to lookup */
3053 ctx = rot->rotCtx[0][0];
3054 WARN_ON(ctx == NULL);
3055 complete_all(&ctx->rot_comp);
3056
3057 spin_lock(&rot->rotisr_lock);
3058 SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
3059 ROT_DONE_CLEAR);
3060 spin_unlock(&rot->rotisr_lock);
3061 ret = IRQ_HANDLED;
3062 }
3063
3064 return ret;
3065}
3066
3067/*
3068 * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
3069 * @irq: Interrupt number
3070 * @ptr: Pointer to private handle provided during registration
3071 *
3072 * This function services rotator interrupt, decoding the source of
3073 * events (high/low priority queue), and wakes up all waiting clients
3074 * with pending rotation requests already submitted to h/w.
3075 */
3076static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
3077{
3078 struct sde_hw_rotator *rot = ptr;
Clarence Ip96854c2db12017-06-12 14:32:26 -04003079 struct sde_hw_rotator_context *ctx, *tmp;
Alan Kwong9487de22016-01-16 22:06:36 -05003080 irqreturn_t ret = IRQ_NONE;
Clarence Ip96854c2db12017-06-12 14:32:26 -04003081 u32 isr, isr_tmp;
Alan Kwong9487de22016-01-16 22:06:36 -05003082 u32 ts;
3083 u32 q_id;
3084
3085 isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
Alan Kwong818b7fc2016-07-24 22:07:41 -04003086 /* acknowledge interrupt before reading latest timestamp */
3087 SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
Alan Kwong9487de22016-01-16 22:06:36 -05003088 ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
3089
3090 SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
3091
3092 /* Any REGDMA status, including error and watchdog timer, should
3093 * trigger and wake up waiting thread
3094 */
3095 if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
3096 spin_lock(&rot->rotisr_lock);
3097
3098 /*
3099 * Obtain rotator context based on timestamp from regdma
3100 * and low/high interrupt status
3101 */
3102 if (isr & REGDMA_INT_HIGH_MASK) {
3103 q_id = ROT_QUEUE_HIGH_PRIORITY;
3104 ts = ts & SDE_REGDMA_SWTS_MASK;
3105 } else if (isr & REGDMA_INT_LOW_MASK) {
3106 q_id = ROT_QUEUE_LOW_PRIORITY;
3107 ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
3108 SDE_REGDMA_SWTS_MASK;
Benjamin Chan62b94ed2016-08-18 23:55:21 -04003109 } else {
3110 SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
3111 goto done_isr_handle;
Alan Kwong9487de22016-01-16 22:06:36 -05003112 }
Alan Kwong6bc64622017-02-04 17:36:03 -08003113
3114 /*
3115 * Timestamp packet is not available in sbuf mode.
3116 * Simulate timestamp update in the handler instead.
3117 */
Clarence Ip96854c2db12017-06-12 14:32:26 -04003118 if (list_empty(&rot->sbuf_ctx[q_id]))
3119 goto skip_sbuf;
3120
3121 ctx = NULL;
3122 isr_tmp = isr;
3123 list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) {
3124 u32 mask;
3125
3126 mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK :
3127 REGDMA_INT_0_MASK;
3128 if (isr_tmp & mask) {
3129 isr_tmp &= ~mask;
3130 ctx = tmp;
Alan Kwong6bc64622017-02-04 17:36:03 -08003131 ts = ctx->timestamp;
3132 sde_hw_rotator_update_swts(rot, ctx, ts);
3133 SDEROT_DBG("update swts:0x%X\n", ts);
Alan Kwong6bc64622017-02-04 17:36:03 -08003134 }
Clarence Ip96854c2db12017-06-12 14:32:26 -04003135 SDEROT_EVTLOG(isr, tmp->timestamp);
Alan Kwong6bc64622017-02-04 17:36:03 -08003136 }
Clarence Ip96854c2db12017-06-12 14:32:26 -04003137 if (ctx == NULL)
3138 SDEROT_ERR("invalid swts ctx\n");
3139skip_sbuf:
Alan Kwong9487de22016-01-16 22:06:36 -05003140 ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong9487de22016-01-16 22:06:36 -05003141
3142 /*
3143 * Wake up all waiting context from the current and previous
3144 * SW Timestamp.
3145 */
Alan Kwong818b7fc2016-07-24 22:07:41 -04003146 while (ctx &&
3147 sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
Alan Kwong9487de22016-01-16 22:06:36 -05003148 ctx->last_regdma_isr_status = isr;
3149 ctx->last_regdma_timestamp = ts;
3150 SDEROT_DBG(
Alan Kwongf987ea32016-07-06 12:11:44 -04003151 "regdma complete: ctx:%p, ts:%X\n", ctx, ts);
Alan Kwong818b7fc2016-07-24 22:07:41 -04003152 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05003153
3154 ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
3155 ctx = rot->rotCtx[q_id]
3156 [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
Alan Kwong818b7fc2016-07-24 22:07:41 -04003157 };
Alan Kwong9487de22016-01-16 22:06:36 -05003158
Benjamin Chan62b94ed2016-08-18 23:55:21 -04003159done_isr_handle:
Alan Kwong9487de22016-01-16 22:06:36 -05003160 spin_unlock(&rot->rotisr_lock);
3161 ret = IRQ_HANDLED;
3162 } else if (isr & REGDMA_INT_ERR_MASK) {
3163 /*
3164 * For REGDMA Err, we save the isr info and wake up
3165 * all waiting contexts
3166 */
3167 int i, j;
3168
3169 SDEROT_ERR(
3170 "regdma err isr:%X, wake up all waiting contexts\n",
3171 isr);
3172
3173 spin_lock(&rot->rotisr_lock);
3174
3175 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3176 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
3177 ctx = rot->rotCtx[i][j];
3178 if (ctx && ctx->last_regdma_isr_status == 0) {
3179 ctx->last_regdma_isr_status = isr;
3180 ctx->last_regdma_timestamp = ts;
Alan Kwong818b7fc2016-07-24 22:07:41 -04003181 wake_up_all(&ctx->regdma_waitq);
Alan Kwong9487de22016-01-16 22:06:36 -05003182 SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
3183 i, j, ctx);
3184 }
3185 }
3186 }
3187
Alan Kwong9487de22016-01-16 22:06:36 -05003188 spin_unlock(&rot->rotisr_lock);
3189 ret = IRQ_HANDLED;
3190 }
3191
3192 return ret;
3193}
3194
3195/*
3196 * sde_hw_rotator_validate_entry - validate rotation entry
3197 * @mgr: Pointer to rotator manager
3198 * @entry: Pointer to rotation entry
3199 *
3200 * This function validates the given rotation entry and provides possible
3201 * fixup (future improvement) if available. This function returns 0 if
3202 * the entry is valid, and returns error code otherwise.
3203 */
3204static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
3205 struct sde_rot_entry *entry)
3206{
Benjamin Chanfb6faa32016-08-16 17:21:01 -04003207 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwongb6c049c2017-03-31 12:50:27 -07003208 struct sde_hw_rotator *hw_data;
Alan Kwong9487de22016-01-16 22:06:36 -05003209 int ret = 0;
3210 u16 src_w, src_h, dst_w, dst_h;
3211 struct sde_rotation_item *item = &entry->item;
3212 struct sde_mdp_format_params *fmt;
3213
Alan Kwongb6c049c2017-03-31 12:50:27 -07003214 if (!mgr || !entry || !mgr->hw_data) {
3215 SDEROT_ERR("invalid parameters\n");
3216 return -EINVAL;
3217 }
3218
3219 hw_data = mgr->hw_data;
3220
3221 if (hw_data->maxlinewidth < item->src_rect.w) {
3222 SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
3223 return -EINVAL;
3224 }
3225
Alan Kwong9487de22016-01-16 22:06:36 -05003226 src_w = item->src_rect.w;
3227 src_h = item->src_rect.h;
3228
3229 if (item->flags & SDE_ROTATION_90) {
3230 dst_w = item->dst_rect.h;
3231 dst_h = item->dst_rect.w;
3232 } else {
3233 dst_w = item->dst_rect.w;
3234 dst_h = item->dst_rect.h;
3235 }
3236
3237 entry->dnsc_factor_w = 0;
3238 entry->dnsc_factor_h = 0;
3239
Alan Kwong6bc64622017-02-04 17:36:03 -08003240 if (item->output.sbuf &&
3241 !test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
3242 SDEROT_ERR("stream buffer not supported\n");
3243 return -EINVAL;
3244 }
3245
Alan Kwong9487de22016-01-16 22:06:36 -05003246 if ((src_w != dst_w) || (src_h != dst_h)) {
Clarence Ip4db1ea82017-05-01 12:18:55 -07003247 if (!dst_w || !dst_h) {
3248 SDEROT_DBG("zero output width/height not support\n");
3249 ret = -EINVAL;
3250 goto dnsc_err;
3251 }
Alan Kwong9487de22016-01-16 22:06:36 -05003252 if ((src_w % dst_w) || (src_h % dst_h)) {
3253 SDEROT_DBG("non integral scale not support\n");
3254 ret = -EINVAL;
Benjamin Chanfb6faa32016-08-16 17:21:01 -04003255 goto dnsc_1p5_check;
Alan Kwong9487de22016-01-16 22:06:36 -05003256 }
3257 entry->dnsc_factor_w = src_w / dst_w;
3258 if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
3259 (entry->dnsc_factor_w > 64)) {
3260 SDEROT_DBG("non power-of-2 w_scale not support\n");
3261 ret = -EINVAL;
3262 goto dnsc_err;
3263 }
3264 entry->dnsc_factor_h = src_h / dst_h;
3265 if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
3266 (entry->dnsc_factor_h > 64)) {
3267 SDEROT_DBG("non power-of-2 h_scale not support\n");
3268 ret = -EINVAL;
3269 goto dnsc_err;
3270 }
3271 }
3272
Benjamin Chan0e96afd2017-01-17 16:49:12 -05003273 fmt = sde_get_format_params(item->output.format);
Benjamin Chan886ff672016-11-07 15:23:17 -05003274 /*
3275 * Rotator downscale support max 4 times for UBWC format and
3276 * max 2 times for TP10/TP10_UBWC format
3277 */
3278 if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
3279 SDEROT_DBG("max downscale for UBWC format is 4\n");
Alan Kwong9487de22016-01-16 22:06:36 -05003280 ret = -EINVAL;
3281 goto dnsc_err;
3282 }
Benjamin Chan886ff672016-11-07 15:23:17 -05003283 if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
3284 SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
Alan Kwong9487de22016-01-16 22:06:36 -05003285 ret = -EINVAL;
3286 }
Benjamin Chanfb6faa32016-08-16 17:21:01 -04003287 goto dnsc_err;
3288
3289dnsc_1p5_check:
3290 /* Check for 1.5 downscale that only applies to V2 HW */
3291 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
3292 entry->dnsc_factor_w = src_w / dst_w;
3293 if ((entry->dnsc_factor_w != 1) ||
3294 ((dst_w * 3) != (src_w * 2))) {
3295 SDEROT_DBG(
3296 "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
3297 src_w, dst_w);
3298 ret = -EINVAL;
3299 goto dnsc_err;
3300 }
3301
3302 entry->dnsc_factor_h = src_h / dst_h;
3303 if ((entry->dnsc_factor_h != 1) ||
3304 ((dst_h * 3) != (src_h * 2))) {
3305 SDEROT_DBG(
3306 "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
3307 src_h, dst_h);
3308 ret = -EINVAL;
3309 goto dnsc_err;
3310 }
3311 ret = 0;
3312 }
Alan Kwong9487de22016-01-16 22:06:36 -05003313
3314dnsc_err:
3315 /* Downscaler does not support asymmetrical dnsc */
3316 if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
3317 SDEROT_DBG("asymmetric downscale not support\n");
3318 ret = -EINVAL;
3319 }
3320
3321 if (ret) {
3322 entry->dnsc_factor_w = 0;
3323 entry->dnsc_factor_h = 0;
3324 }
3325 return ret;
3326}
3327
3328/*
3329 * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
3330 * @mgr: Pointer to rotator manager
3331 * @attr: Pointer to device attribute interface
3332 * @buf: Pointer to output buffer
3333 * @len: Length of output buffer
3334 */
3335static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
3336 struct device_attribute *attr, char *buf, ssize_t len)
3337{
3338 struct sde_hw_rotator *hw_data;
Benjamin Chan886ff672016-11-07 15:23:17 -05003339 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
Alan Kwong9487de22016-01-16 22:06:36 -05003340 int cnt = 0;
3341
3342 if (!mgr || !buf)
3343 return 0;
3344
3345 hw_data = mgr->hw_data;
3346
3347#define SPRINT(fmt, ...) \
3348 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
3349
3350 /* insert capabilities here */
Benjamin Chan886ff672016-11-07 15:23:17 -05003351 if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
3352 SPRINT("min_downscale=1.5\n");
3353 else
3354 SPRINT("min_downscale=2.0\n");
Alan Kwong9487de22016-01-16 22:06:36 -05003355
Benjamin Chan42db2c92016-11-22 22:50:01 -05003356 SPRINT("downscale_compression=1\n");
3357
Alan Kwong6bc64622017-02-04 17:36:03 -08003358 if (hw_data->downscale_caps)
3359 SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
3360
Alan Kwong9487de22016-01-16 22:06:36 -05003361#undef SPRINT
3362 return cnt;
3363}
3364
3365/*
3366 * sde_hw_rotator_show_state - output state info to sysfs 'state' file
3367 * @mgr: Pointer to rotator manager
3368 * @attr: Pointer to device attribute interface
3369 * @buf: Pointer to output buffer
3370 * @len: Length of output buffer
3371 */
3372static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
3373 struct device_attribute *attr, char *buf, ssize_t len)
3374{
3375 struct sde_hw_rotator *rot;
3376 struct sde_hw_rotator_context *ctx;
3377 int cnt = 0;
3378 int num_active = 0;
3379 int i, j;
3380
3381 if (!mgr || !buf) {
3382 SDEROT_ERR("null parameters\n");
3383 return 0;
3384 }
3385
3386 rot = mgr->hw_data;
3387
3388#define SPRINT(fmt, ...) \
3389 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
3390
3391 if (rot) {
3392 SPRINT("rot_mode=%d\n", rot->mode);
3393 SPRINT("irq_num=%d\n", rot->irq_num);
3394
3395 if (rot->mode == ROT_REGDMA_OFF) {
3396 SPRINT("max_active=1\n");
3397 SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
3398 } else {
3399 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3400 for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
3401 j++) {
3402 ctx = rot->rotCtx[i][j];
3403
3404 if (ctx) {
3405 SPRINT(
3406 "rotCtx[%d][%d]:%p\n",
3407 i, j, ctx);
3408 ++num_active;
3409 }
3410 }
3411 }
3412
3413 SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
3414 SPRINT("num_active=%d\n", num_active);
3415 }
3416 }
3417
3418#undef SPRINT
3419 return cnt;
3420}
3421
3422/*
Alan Kwongda16e442016-08-14 20:47:18 -04003423 * sde_hw_rotator_get_pixfmt - get the indexed pixel format
3424 * @mgr: Pointer to rotator manager
3425 * @index: index of pixel format
3426 * @input: true for input port; false for output port
Alan Kwong4b416162017-08-11 21:03:10 -04003427 * @mode: operating mode
Alan Kwongda16e442016-08-14 20:47:18 -04003428 */
3429static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
Alan Kwong4b416162017-08-11 21:03:10 -04003430 int index, bool input, u32 mode)
Alan Kwongda16e442016-08-14 20:47:18 -04003431{
Alan Kwong6bc64622017-02-04 17:36:03 -08003432 struct sde_hw_rotator *rot;
3433
3434 if (!mgr || !mgr->hw_data) {
3435 SDEROT_ERR("null parameters\n");
3436 return 0;
3437 }
3438
3439 rot = mgr->hw_data;
3440
Alan Kwong4b416162017-08-11 21:03:10 -04003441 if (mode >= SDE_ROTATOR_MODE_MAX) {
3442 SDEROT_ERR("invalid rotator mode %d\n", mode);
3443 return 0;
3444 }
3445
Alan Kwongda16e442016-08-14 20:47:18 -04003446 if (input) {
Alan Kwong4b416162017-08-11 21:03:10 -04003447 if ((index < rot->num_inpixfmt[mode]) && rot->inpixfmts[mode])
3448 return rot->inpixfmts[mode][index];
Alan Kwongda16e442016-08-14 20:47:18 -04003449 else
3450 return 0;
3451 } else {
Alan Kwong4b416162017-08-11 21:03:10 -04003452 if ((index < rot->num_outpixfmt[mode]) && rot->outpixfmts[mode])
3453 return rot->outpixfmts[mode][index];
Alan Kwongda16e442016-08-14 20:47:18 -04003454 else
3455 return 0;
3456 }
3457}
3458
3459/*
3460 * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
3461 * @mgr: Pointer to rotator manager
3462 * @pixfmt: pixel format to be verified
3463 * @input: true for input port; false for output port
Alan Kwong4b416162017-08-11 21:03:10 -04003464 * @mode: operating mode
Alan Kwongda16e442016-08-14 20:47:18 -04003465 */
3466static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
Alan Kwong4b416162017-08-11 21:03:10 -04003467 bool input, u32 mode)
Alan Kwongda16e442016-08-14 20:47:18 -04003468{
Alan Kwong6bc64622017-02-04 17:36:03 -08003469 struct sde_hw_rotator *rot;
Alan Kwong4b416162017-08-11 21:03:10 -04003470 const u32 *pixfmts;
Alan Kwong6bc64622017-02-04 17:36:03 -08003471 u32 num_pixfmt;
Alan Kwongda16e442016-08-14 20:47:18 -04003472 int i;
3473
Alan Kwong6bc64622017-02-04 17:36:03 -08003474 if (!mgr || !mgr->hw_data) {
3475 SDEROT_ERR("null parameters\n");
3476 return false;
Alan Kwongda16e442016-08-14 20:47:18 -04003477 }
3478
Alan Kwong6bc64622017-02-04 17:36:03 -08003479 rot = mgr->hw_data;
3480
Alan Kwong4b416162017-08-11 21:03:10 -04003481 if (mode >= SDE_ROTATOR_MODE_MAX) {
3482 SDEROT_ERR("invalid rotator mode %d\n", mode);
3483 return false;
3484 }
3485
Alan Kwong6bc64622017-02-04 17:36:03 -08003486 if (input) {
Alan Kwong4b416162017-08-11 21:03:10 -04003487 pixfmts = rot->inpixfmts[mode];
3488 num_pixfmt = rot->num_inpixfmt[mode];
Alan Kwong6bc64622017-02-04 17:36:03 -08003489 } else {
Alan Kwong4b416162017-08-11 21:03:10 -04003490 pixfmts = rot->outpixfmts[mode];
3491 num_pixfmt = rot->num_outpixfmt[mode];
Alan Kwong6bc64622017-02-04 17:36:03 -08003492 }
3493
3494 if (!pixfmts || !num_pixfmt) {
3495 SDEROT_ERR("invalid pixel format tables\n");
3496 return false;
3497 }
3498
3499 for (i = 0; i < num_pixfmt; i++)
3500 if (pixfmts[i] == pixfmt)
3501 return true;
3502
Alan Kwongda16e442016-08-14 20:47:18 -04003503 return false;
3504}
3505
3506/*
Alan Kwong6bc64622017-02-04 17:36:03 -08003507 * sde_hw_rotator_get_downscale_caps - get scaling capability string
3508 * @mgr: Pointer to rotator manager
3509 * @caps: Pointer to capability string buffer; NULL to return maximum length
3510 * @len: length of capability string buffer
3511 * return: length of capability string
3512 */
3513static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
3514 char *caps, int len)
3515{
3516 struct sde_hw_rotator *rot;
3517 int rc = 0;
3518
3519 if (!mgr || !mgr->hw_data) {
3520 SDEROT_ERR("null parameters\n");
3521 return -EINVAL;
3522 }
3523
3524 rot = mgr->hw_data;
3525
3526 if (rot->downscale_caps) {
3527 if (caps)
3528 rc = snprintf(caps, len, "%s", rot->downscale_caps);
3529 else
3530 rc = strlen(rot->downscale_caps);
3531 }
3532
3533 return rc;
3534}
3535
3536/*
Alan Kwongb6c049c2017-03-31 12:50:27 -07003537 * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
3538 * @mgr: Pointer to rotator manager
3539 * return: maximum line width supported by hardware
3540 */
3541static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
3542{
3543 struct sde_hw_rotator *rot;
3544
3545 if (!mgr || !mgr->hw_data) {
3546 SDEROT_ERR("null parameters\n");
3547 return -EINVAL;
3548 }
3549
3550 rot = mgr->hw_data;
3551
3552 return rot->maxlinewidth;
3553}
3554
3555/*
Clarence Ipcd140292017-09-22 16:24:08 -04003556 * sde_hw_rotator_dump_status - dump status to debug output
3557 * @mgr: Pointer to rotator manager
3558 * return: none
3559 */
3560static void sde_hw_rotator_dump_status(struct sde_rot_mgr *mgr)
3561{
3562 if (!mgr || !mgr->hw_data) {
3563 SDEROT_ERR("null parameters\n");
3564 return;
3565 }
3566
3567 _sde_hw_rotator_dump_status(mgr->hw_data, NULL);
3568}
3569
3570/*
Alan Kwong9487de22016-01-16 22:06:36 -05003571 * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
3572 * @hw_data: Pointer to rotator hw
3573 * @dev: Pointer to platform device
3574 */
3575static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
3576 struct platform_device *dev)
3577{
3578 int ret = 0;
3579 u32 data;
3580
3581 if (!hw_data || !dev)
3582 return -EINVAL;
3583
3584 ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
3585 &data);
3586 if (ret) {
3587 SDEROT_DBG("default to regdma off\n");
3588 ret = 0;
3589 hw_data->mode = ROT_REGDMA_OFF;
3590 } else if (data < ROT_REGDMA_MAX) {
3591 SDEROT_DBG("set to regdma mode %d\n", data);
3592 hw_data->mode = data;
3593 } else {
3594 SDEROT_ERR("regdma mode out of range. default to regdma off\n");
3595 hw_data->mode = ROT_REGDMA_OFF;
3596 }
3597
3598 ret = of_property_read_u32(dev->dev.of_node,
3599 "qcom,mdss-highest-bank-bit", &data);
3600 if (ret) {
3601 SDEROT_DBG("default to A5X bank\n");
3602 ret = 0;
3603 hw_data->highest_bank = 2;
3604 } else {
3605 SDEROT_DBG("set highest bank bit to %d\n", data);
3606 hw_data->highest_bank = data;
3607 }
3608
Alan Kwong6bc64622017-02-04 17:36:03 -08003609 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwongfb8eeb22017-02-06 15:00:03 -08003610 "qcom,sde-ubwc-malsize", &data);
3611 if (ret) {
3612 ret = 0;
3613 hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
3614 } else {
3615 SDEROT_DBG("set ubwc malsize to %d\n", data);
3616 hw_data->ubwc_malsize = data;
3617 }
3618
3619 ret = of_property_read_u32(dev->dev.of_node,
3620 "qcom,sde-ubwc_swizzle", &data);
3621 if (ret) {
3622 ret = 0;
3623 hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
3624 } else {
3625 SDEROT_DBG("set ubwc swizzle to %d\n", data);
3626 hw_data->ubwc_swizzle = data;
3627 }
3628
3629 ret = of_property_read_u32(dev->dev.of_node,
Alan Kwong6bc64622017-02-04 17:36:03 -08003630 "qcom,mdss-sbuf-headroom", &data);
3631 if (ret) {
3632 ret = 0;
3633 hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
3634 } else {
3635 SDEROT_DBG("set sbuf headroom to %d\n", data);
3636 hw_data->sbuf_headroom = data;
3637 }
3638
Alan Kwongb6c049c2017-03-31 12:50:27 -07003639 ret = of_property_read_u32(dev->dev.of_node,
3640 "qcom,mdss-rot-linewidth", &data);
3641 if (ret) {
3642 ret = 0;
3643 hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
3644 } else {
3645 SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
3646 hw_data->maxlinewidth = data;
3647 }
3648
Alan Kwong9487de22016-01-16 22:06:36 -05003649 return ret;
3650}
3651
3652/*
3653 * sde_rotator_r3_init - initialize the r3 module
3654 * @mgr: Pointer to rotator manager
3655 *
3656 * This function setup r3 callback functions, parses r3 specific
3657 * device tree settings, installs r3 specific interrupt handler,
3658 * as well as initializes r3 internal data structure.
3659 */
3660int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
3661{
3662 struct sde_hw_rotator *rot;
3663 struct sde_rot_data_type *mdata = sde_rot_get_mdata();
3664 int i;
3665 int ret;
3666
3667 rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
3668 if (!rot)
3669 return -ENOMEM;
3670
3671 mgr->hw_data = rot;
3672 mgr->queue_count = ROT_QUEUE_MAX;
3673
3674 rot->mdss_base = mdata->sde_io.base;
3675 rot->pdev = mgr->pdev;
Alan Kwong6bc64622017-02-04 17:36:03 -08003676 rot->koff_timeout = KOFF_TIMEOUT;
3677 rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
3678 rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
Alan Kwong9487de22016-01-16 22:06:36 -05003679
3680 /* Assign ops */
3681 mgr->ops_hw_destroy = sde_hw_rotator_destroy;
3682 mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
3683 mgr->ops_hw_free = sde_hw_rotator_free_ext;
3684 mgr->ops_config_hw = sde_hw_rotator_config;
Clarence Ip3ce07c02017-08-11 16:21:45 -04003685 mgr->ops_cancel_hw = sde_hw_rotator_cancel;
Clarence Ip19339b32017-10-14 20:59:00 -04003686 mgr->ops_abort_hw = sde_hw_rotator_abort_kickoff;
Alan Kwong9487de22016-01-16 22:06:36 -05003687 mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
3688 mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
3689 mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
3690 mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
3691 mgr->ops_hw_show_state = sde_hw_rotator_show_state;
3692 mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
Alan Kwongda16e442016-08-14 20:47:18 -04003693 mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
3694 mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
Benjamin Chan0f9e61d2016-09-16 16:01:09 -04003695 mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
3696 mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
Alan Kwong6bc64622017-02-04 17:36:03 -08003697 mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
Alan Kwongb6c049c2017-03-31 12:50:27 -07003698 mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
Clarence Ipcd140292017-09-22 16:24:08 -04003699 mgr->ops_hw_dump_status = sde_hw_rotator_dump_status;
Alan Kwong9487de22016-01-16 22:06:36 -05003700
3701 ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
3702 if (ret)
3703 goto error_parse_dt;
3704
3705 rot->irq_num = platform_get_irq(mgr->pdev, 0);
Lloyd Atkinson073635b2017-08-17 17:03:59 -04003706 if (rot->irq_num == -EPROBE_DEFER) {
3707 SDEROT_INFO("irq master master not ready, defer probe\n");
3708 return -EPROBE_DEFER;
3709 } else if (rot->irq_num < 0) {
3710 SDEROT_ERR("fail to get rotator irq, fallback to polling\n");
Alan Kwong9487de22016-01-16 22:06:36 -05003711 } else {
3712 if (rot->mode == ROT_REGDMA_OFF)
3713 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3714 rot->irq_num,
3715 sde_hw_rotator_rotirq_handler,
3716 NULL, 0, "sde_rotator_r3", rot);
3717 else
3718 ret = devm_request_threaded_irq(&mgr->pdev->dev,
3719 rot->irq_num,
3720 sde_hw_rotator_regdmairq_handler,
3721 NULL, 0, "sde_rotator_r3", rot);
3722 if (ret) {
3723 SDEROT_ERR("fail to request irq r:%d\n", ret);
3724 rot->irq_num = -1;
3725 } else {
3726 disable_irq(rot->irq_num);
3727 }
3728 }
Alan Kwong818b7fc2016-07-24 22:07:41 -04003729 atomic_set(&rot->irq_enabled, 0);
Alan Kwong9487de22016-01-16 22:06:36 -05003730
3731 setup_rotator_ops(&rot->ops, rot->mode);
3732
3733 spin_lock_init(&rot->rotctx_lock);
3734 spin_lock_init(&rot->rotisr_lock);
3735
3736 /* REGDMA initialization */
3737 if (rot->mode == ROT_REGDMA_OFF) {
3738 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
Benjamin Chanda32f8b2017-09-20 17:11:27 -04003739 rot->cmd_wr_ptr[0][i] = (char __iomem *)(
3740 &rot->cmd_queue[
3741 SDE_HW_ROT_REGDMA_SEG_SIZE * i]);
Alan Kwong9487de22016-01-16 22:06:36 -05003742 } else {
3743 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3744 rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
Benjamin Chanda32f8b2017-09-20 17:11:27 -04003745 rot->mdss_base +
Alan Kwong9487de22016-01-16 22:06:36 -05003746 REGDMA_RAM_REGDMA_CMD_RAM +
Benjamin Chanda32f8b2017-09-20 17:11:27 -04003747 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i;
Alan Kwong9487de22016-01-16 22:06:36 -05003748
3749 for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
3750 rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
Benjamin Chanda32f8b2017-09-20 17:11:27 -04003751 rot->mdss_base +
Alan Kwong9487de22016-01-16 22:06:36 -05003752 REGDMA_RAM_REGDMA_CMD_RAM +
3753 SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
Benjamin Chanda32f8b2017-09-20 17:11:27 -04003754 (i + SDE_HW_ROT_REGDMA_TOTAL_CTX);
Alan Kwong9487de22016-01-16 22:06:36 -05003755 }
3756
Alan Kwong6bc64622017-02-04 17:36:03 -08003757 for (i = 0; i < ROT_QUEUE_MAX; i++) {
3758 atomic_set(&rot->timestamp[i], 0);
3759 INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
3760 }
Alan Kwong9487de22016-01-16 22:06:36 -05003761
3762 ret = sde_rotator_hw_rev_init(rot);
3763 if (ret)
3764 goto error_hw_rev_init;
3765
Alan Kwong315cd772016-08-03 22:29:42 -04003766 /* set rotator CBCR to shutoff memory/periphery on clock off.*/
Clarence Ip77c053d2017-04-24 19:26:37 -07003767 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003768 CLKFLAG_NORETAIN_MEM);
Clarence Ip77c053d2017-04-24 19:26:37 -07003769 clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
Alan Kwong315cd772016-08-03 22:29:42 -04003770 CLKFLAG_NORETAIN_PERIPH);
3771
Benjamin Chan53e3bce2016-08-31 14:43:29 -04003772 mdata->sde_rot_hw = rot;
Alan Kwong9487de22016-01-16 22:06:36 -05003773 return 0;
3774error_hw_rev_init:
3775 if (rot->irq_num >= 0)
3776 devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
3777 devm_kfree(&mgr->pdev->dev, mgr->hw_data);
3778error_parse_dt:
3779 return ret;
3780}