blob: 426ecf1e9768fd351da7012e95c65612dd0c00c2 [file] [log] [blame]
Dhaval Patel13485f12017-01-11 12:55:22 -08001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include "sde_hwio.h"
Clarence Ipc475b082016-06-26 09:27:23 -040015#include "sde_hw_ctl.h"
Lloyd Atkinson113aefd2016-10-23 13:15:18 -040016#include "sde_dbg.h"
Lloyd Atkinson652e59b2017-05-03 11:20:30 -040017#include "sde_kms.h"
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -070018#include "sde_reg_dma.h"
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070019
20#define CTL_LAYER(lm) \
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040021 (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070022#define CTL_LAYER_EXT(lm) \
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040023 (0x40 + (((lm) - LM_0) * 0x004))
Dhaval Patel13485f12017-01-11 12:55:22 -080024#define CTL_LAYER_EXT2(lm) \
25 (0x70 + (((lm) - LM_0) * 0x004))
Jeykumar Sankaran2e655032017-02-04 14:05:45 -080026#define CTL_LAYER_EXT3(lm) \
27 (0xA0 + (((lm) - LM_0) * 0x004))
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070028#define CTL_TOP 0x014
29#define CTL_FLUSH 0x018
30#define CTL_START 0x01C
Dhaval Patel0e558f42017-04-30 00:51:40 -070031#define CTL_PREPARE 0x0d0
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070032#define CTL_SW_RESET 0x030
Clarence Ip569d5af2017-10-14 21:09:01 -040033#define CTL_SW_RESET_OVERRIDE 0x060
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070034#define CTL_LAYER_EXTN_OFFSET 0x40
Alan Kwong4dd64c82017-02-04 18:41:51 -080035#define CTL_ROT_TOP 0x0C0
36#define CTL_ROT_FLUSH 0x0C4
37#define CTL_ROT_START 0x0CC
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070038
Clarence Ip9c65f7b2017-03-20 06:48:15 -070039#define CTL_MIXER_BORDER_OUT BIT(24)
Clarence Ipf6b530a2017-08-21 19:39:18 -040040#define CTL_FLUSH_MASK_ROT BIT(27)
Clarence Ip9c65f7b2017-03-20 06:48:15 -070041#define CTL_FLUSH_MASK_CTL BIT(17)
42
Clarence Ipc5a7d922017-07-24 19:12:12 -040043#define SDE_REG_RESET_TIMEOUT_US 2000
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070044
Chandan Uddaraju9bb109a2017-10-29 18:08:51 -070045#define MDP_CTL_FLUSH(n) ((0x2000) + (0x200*n) + CTL_FLUSH)
46#define CTL_FLUSH_LM_BIT(n) (6 + n)
47#define CTL_TOP_LM_OFFSET(index, lm) (0x2000 + (0x200 * index) + (lm * 0x4))
48
49int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
50 void __iomem *mmio)
51{
52 int i, j;
53 u32 op_mode;
54
55 if (!data) {
56 pr_err("invalid splash data\n");
57 return -EINVAL;
58 }
59
60 for (i = 0; i < data->ctl_top_cnt; i++) {
61 struct ctl_top *top = &data->top[i];
62 u8 ctl_id = data->ctl_ids[i] - CTL_0;
63 u32 regval = 0;
64
65 op_mode = readl_relaxed(mmio + MDP_CTL_FLUSH(ctl_id));
66
67 /* Set border fill*/
68 regval |= CTL_MIXER_BORDER_OUT;
69
70 for (j = 0; j < top->ctl_lm_cnt; j++) {
71 u8 lm_id = top->lm[j].lm_id - LM_0;
72
73 writel_relaxed(regval,
74 mmio + CTL_TOP_LM_OFFSET(ctl_id, lm_id));
75
76 op_mode |= BIT(CTL_FLUSH_LM_BIT(lm_id));
77 }
78 op_mode |= CTL_FLUSH_MASK_CTL;
79
80 writel_relaxed(op_mode, mmio + MDP_CTL_FLUSH(ctl_id));
81 }
82 return 0;
83
84}
85
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070086static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
87 struct sde_mdss_cfg *m,
88 void __iomem *addr,
89 struct sde_hw_blk_reg_map *b)
90{
91 int i;
92
93 for (i = 0; i < m->ctl_count; i++) {
94 if (ctl == m->ctl[i].id) {
95 b->base_off = addr;
96 b->blk_off = m->ctl[i].base;
Lloyd Atkinson77158732016-10-23 13:02:00 -040097 b->length = m->ctl[i].len;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070098 b->hwversion = m->hwversion;
Clarence Ip4ce59322016-06-26 22:27:51 -040099 b->log_mask = SDE_DBG_MASK_CTL;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700100 return &m->ctl[i];
101 }
102 }
103 return ERR_PTR(-ENOMEM);
104}
105
106static int _mixer_stages(const struct sde_lm_cfg *mixer, int count,
107 enum sde_lm lm)
108{
109 int i;
110 int stages = -EINVAL;
111
112 for (i = 0; i < count; i++) {
113 if (lm == mixer[i].id) {
114 stages = mixer[i].sblk->maxblendstages;
115 break;
116 }
117 }
118
119 return stages;
120}
121
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400122static inline void sde_hw_ctl_trigger_start(struct sde_hw_ctl *ctx)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700123{
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400124 SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
125}
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700126
Dhaval Patelcaf0e6d2017-11-08 16:17:06 -0800127static inline int sde_hw_ctl_get_start_state(struct sde_hw_ctl *ctx)
128{
129 return SDE_REG_READ(&ctx->hw, CTL_START);
130}
131
Dhaval Patel0e558f42017-04-30 00:51:40 -0700132static inline void sde_hw_ctl_trigger_pending(struct sde_hw_ctl *ctx)
133{
134 SDE_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
135}
136
Alan Kwong4dd64c82017-02-04 18:41:51 -0800137static inline void sde_hw_ctl_trigger_rot_start(struct sde_hw_ctl *ctx)
138{
Clarence Ip95f530b2017-09-06 17:31:41 -0400139 /* ROT flush bit is latched during ROT start, so set it first */
140 if (CTL_FLUSH_MASK_ROT & ctx->pending_flush_mask) {
141 ctx->pending_flush_mask &= ~CTL_FLUSH_MASK_ROT;
142 SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, CTL_FLUSH_MASK_ROT);
143 }
Alan Kwong4dd64c82017-02-04 18:41:51 -0800144 SDE_REG_WRITE(&ctx->hw, CTL_ROT_START, BIT(0));
145}
146
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400147static inline void sde_hw_ctl_clear_pending_flush(struct sde_hw_ctl *ctx)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400148{
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400149 ctx->pending_flush_mask = 0x0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700150}
151
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400152static inline void sde_hw_ctl_update_pending_flush(struct sde_hw_ctl *ctx,
153 u32 flushbits)
154{
155 ctx->pending_flush_mask |= flushbits;
156}
157
Clarence Ip110d15c2016-08-16 14:44:41 -0400158static u32 sde_hw_ctl_get_pending_flush(struct sde_hw_ctl *ctx)
159{
160 if (!ctx)
161 return 0x0;
162
163 return ctx->pending_flush_mask;
164}
165
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400166static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
167{
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -0700168
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400169 SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
170}
171
Lloyd Atkinson6340a372017-04-05 13:04:22 -0700172static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx)
173{
174 struct sde_hw_blk_reg_map *c = &ctx->hw;
Clarence Ipf6b530a2017-08-21 19:39:18 -0400175 u32 rot_op_mode;
176
177 rot_op_mode = SDE_REG_READ(c, CTL_ROT_TOP) & 0x3;
178
179 /* rotate flush bit is undefined if offline mode, so ignore it */
180 if (rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
181 return SDE_REG_READ(c, CTL_FLUSH) & ~CTL_FLUSH_MASK_ROT;
Lloyd Atkinson6340a372017-04-05 13:04:22 -0700182
183 return SDE_REG_READ(c, CTL_FLUSH);
184}
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400185
Dhaval Patel48c76022016-09-01 17:51:23 -0700186static inline uint32_t sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx,
187 enum sde_sspp sspp)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700188{
Dhaval Patel48c76022016-09-01 17:51:23 -0700189 uint32_t flushbits = 0;
190
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700191 switch (sspp) {
192 case SSPP_VIG0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700193 flushbits = BIT(0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700194 break;
195 case SSPP_VIG1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700196 flushbits = BIT(1);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700197 break;
198 case SSPP_VIG2:
Dhaval Patel48c76022016-09-01 17:51:23 -0700199 flushbits = BIT(2);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700200 break;
201 case SSPP_VIG3:
Dhaval Patel48c76022016-09-01 17:51:23 -0700202 flushbits = BIT(18);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700203 break;
204 case SSPP_RGB0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700205 flushbits = BIT(3);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700206 break;
207 case SSPP_RGB1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700208 flushbits = BIT(4);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700209 break;
210 case SSPP_RGB2:
Dhaval Patel48c76022016-09-01 17:51:23 -0700211 flushbits = BIT(5);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700212 break;
213 case SSPP_RGB3:
Dhaval Patel48c76022016-09-01 17:51:23 -0700214 flushbits = BIT(19);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700215 break;
216 case SSPP_DMA0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700217 flushbits = BIT(11);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700218 break;
219 case SSPP_DMA1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700220 flushbits = BIT(12);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700221 break;
Dhaval Patel13485f12017-01-11 12:55:22 -0800222 case SSPP_DMA2:
223 flushbits = BIT(24);
224 break;
225 case SSPP_DMA3:
226 flushbits = BIT(25);
227 break;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700228 case SSPP_CURSOR0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700229 flushbits = BIT(22);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700230 break;
231 case SSPP_CURSOR1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700232 flushbits = BIT(23);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700233 break;
234 default:
Dhaval Patel48c76022016-09-01 17:51:23 -0700235 break;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700236 }
Dhaval Patel48c76022016-09-01 17:51:23 -0700237
238 return flushbits;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700239}
240
Dhaval Patel48c76022016-09-01 17:51:23 -0700241static inline uint32_t sde_hw_ctl_get_bitmask_mixer(struct sde_hw_ctl *ctx,
242 enum sde_lm lm)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700243{
Dhaval Patel48c76022016-09-01 17:51:23 -0700244 uint32_t flushbits = 0;
245
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700246 switch (lm) {
247 case LM_0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700248 flushbits = BIT(6);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700249 break;
250 case LM_1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700251 flushbits = BIT(7);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700252 break;
253 case LM_2:
Dhaval Patel48c76022016-09-01 17:51:23 -0700254 flushbits = BIT(8);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700255 break;
256 case LM_3:
Dhaval Patel48c76022016-09-01 17:51:23 -0700257 flushbits = BIT(9);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700258 break;
259 case LM_4:
Dhaval Patel48c76022016-09-01 17:51:23 -0700260 flushbits = BIT(10);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700261 break;
262 case LM_5:
Dhaval Patel48c76022016-09-01 17:51:23 -0700263 flushbits = BIT(20);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700264 break;
265 default:
266 return -EINVAL;
267 }
Dhaval Patel48c76022016-09-01 17:51:23 -0700268
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700269 flushbits |= CTL_FLUSH_MASK_CTL;
Dhaval Patel48c76022016-09-01 17:51:23 -0700270
271 return flushbits;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700272}
273
274static inline int sde_hw_ctl_get_bitmask_dspp(struct sde_hw_ctl *ctx,
275 u32 *flushbits, enum sde_dspp dspp)
276{
277 switch (dspp) {
278 case DSPP_0:
279 *flushbits |= BIT(13);
280 break;
281 case DSPP_1:
282 *flushbits |= BIT(14);
283 break;
284 default:
285 return -EINVAL;
286 }
287 return 0;
288}
289
Xu Yange9b20772017-08-23 11:16:04 +0800290static inline int sde_hw_ctl_get_bitmask_dspp_pavlut(struct sde_hw_ctl *ctx,
291 u32 *flushbits, enum sde_dspp dspp)
292{
293 switch (dspp) {
294 case DSPP_0:
295 *flushbits |= BIT(3);
296 break;
297 case DSPP_1:
298 *flushbits |= BIT(4);
299 break;
300 default:
301 return -EINVAL;
302 }
303 return 0;
304}
305
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700306static inline int sde_hw_ctl_get_bitmask_intf(struct sde_hw_ctl *ctx,
307 u32 *flushbits, enum sde_intf intf)
308{
309 switch (intf) {
310 case INTF_0:
311 *flushbits |= BIT(31);
312 break;
313 case INTF_1:
314 *flushbits |= BIT(30);
315 break;
316 case INTF_2:
317 *flushbits |= BIT(29);
318 break;
319 case INTF_3:
320 *flushbits |= BIT(28);
321 break;
322 default:
323 return -EINVAL;
324 }
325 return 0;
326}
327
Alan Kwong3232ca52016-07-29 02:27:47 -0400328static inline int sde_hw_ctl_get_bitmask_wb(struct sde_hw_ctl *ctx,
329 u32 *flushbits, enum sde_wb wb)
330{
331 switch (wb) {
332 case WB_0:
333 case WB_1:
334 case WB_2:
335 *flushbits |= BIT(16);
336 break;
337 default:
338 return -EINVAL;
339 }
340 return 0;
341}
342
Alan Kwong4dd64c82017-02-04 18:41:51 -0800343static inline int sde_hw_ctl_get_bitmask_rot(struct sde_hw_ctl *ctx,
344 u32 *flushbits, enum sde_rot rot)
345{
346 switch (rot) {
347 case ROT_0:
Clarence Ipf6b530a2017-08-21 19:39:18 -0400348 *flushbits |= CTL_FLUSH_MASK_ROT;
Alan Kwong4dd64c82017-02-04 18:41:51 -0800349 break;
350 default:
351 return -EINVAL;
352 }
353 return 0;
354}
355
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700356static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
357 u32 *flushbits, enum sde_cdm cdm)
358{
359 switch (cdm) {
360 case CDM_0:
361 *flushbits |= BIT(26);
362 break;
363 default:
364 return -EINVAL;
365 }
366 return 0;
367}
368
Clarence Ipc5a7d922017-07-24 19:12:12 -0400369static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 timeout_us)
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500370{
371 struct sde_hw_blk_reg_map *c = &ctx->hw;
Clarence Ipc5a7d922017-07-24 19:12:12 -0400372 ktime_t timeout;
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500373 u32 status;
374
Clarence Ipc5a7d922017-07-24 19:12:12 -0400375 timeout = ktime_add_us(ktime_get(), timeout_us);
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500376
377 /*
378 * it takes around 30us to have mdp finish resetting its ctl path
379 * poll every 50us so that reset should be completed at 1st poll
380 */
381 do {
382 status = SDE_REG_READ(c, CTL_SW_RESET);
Clarence Ipc5a7d922017-07-24 19:12:12 -0400383 status &= 0x1;
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500384 if (status)
385 usleep_range(20, 50);
Clarence Ipc5a7d922017-07-24 19:12:12 -0400386 } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500387
388 return status;
389}
390
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700391static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
392{
393 struct sde_hw_blk_reg_map *c = &ctx->hw;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700394
Clarence Ip569d5af2017-10-14 21:09:01 -0400395 pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx - CTL_0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700396 SDE_REG_WRITE(c, CTL_SW_RESET, 0x1);
Clarence Ipc5a7d922017-07-24 19:12:12 -0400397 if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_US))
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500398 return -EINVAL;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700399
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500400 return 0;
401}
402
Clarence Ip569d5af2017-10-14 21:09:01 -0400403static void sde_hw_ctl_hard_reset(struct sde_hw_ctl *ctx, bool enable)
404{
405 struct sde_hw_blk_reg_map *c = &ctx->hw;
406
407 pr_debug("hw ctl hard reset for ctl:%d, %d\n",
408 ctx->idx - CTL_0, enable);
409 SDE_REG_WRITE(c, CTL_SW_RESET_OVERRIDE, enable);
410}
411
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500412static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx)
413{
414 struct sde_hw_blk_reg_map *c = &ctx->hw;
415 u32 status;
416
417 status = SDE_REG_READ(c, CTL_SW_RESET);
418 status &= 0x01;
419 if (!status)
420 return 0;
421
422 pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
Clarence Ipc5a7d922017-07-24 19:12:12 -0400423 if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_US)) {
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500424 pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
425 return -EINVAL;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700426 }
427
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500428 return 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700429}
430
Lloyd Atkinsone5ec30d2016-08-23 14:32:32 -0400431static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
432{
433 struct sde_hw_blk_reg_map *c = &ctx->hw;
434 int i;
435
436 for (i = 0; i < ctx->mixer_count; i++) {
Jeykumar Sankaran32c5f602017-09-13 14:03:10 -0700437 int mixer_id = ctx->mixer_hw_caps[i].id;
438
439 SDE_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
440 SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
441 SDE_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
442 SDE_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
Lloyd Atkinsone5ec30d2016-08-23 14:32:32 -0400443 }
444}
445
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700446static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
Dhaval Patel572cfd22017-06-12 19:33:39 -0700447 enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700448{
449 struct sde_hw_blk_reg_map *c = &ctx->hw;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800450 u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
451 u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700452 int i, j;
453 u8 stages;
454 int pipes_per_stage;
455
456 stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
Clarence Ipc475b082016-06-26 09:27:23 -0400457 if (stages < 0)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700458 return;
459
460 if (test_bit(SDE_MIXER_SOURCESPLIT,
461 &ctx->mixer_hw_caps->features))
Dhaval Patel48c76022016-09-01 17:51:23 -0700462 pipes_per_stage = PIPES_PER_STAGE;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700463 else
464 pipes_per_stage = 1;
465
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700466 mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
467
468 if (!stage_cfg)
469 goto exit;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400470
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700471 for (i = 0; i <= stages; i++) {
Clarence Ip7b493572015-12-21 17:57:48 -0500472 /* overflow to ext register if 'i + 1 > 7' */
473 mix = (i + 1) & 0x7;
474 ext = i >= 7;
475
Dhaval Patel48c76022016-09-01 17:51:23 -0700476 for (j = 0 ; j < pipes_per_stage; j++) {
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800477 enum sde_sspp_multirect_index rect_index =
Dhaval Patel572cfd22017-06-12 19:33:39 -0700478 stage_cfg->multirect_index[i][j];
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800479
Dhaval Patel572cfd22017-06-12 19:33:39 -0700480 switch (stage_cfg->stage[i][j]) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700481 case SSPP_VIG0:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800482 if (rect_index == SDE_SSPP_RECT_1) {
483 mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
484 } else {
485 mixercfg |= mix << 0;
486 mixercfg_ext |= ext << 0;
487 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700488 break;
489 case SSPP_VIG1:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800490 if (rect_index == SDE_SSPP_RECT_1) {
491 mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
492 } else {
493 mixercfg |= mix << 3;
494 mixercfg_ext |= ext << 2;
495 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700496 break;
497 case SSPP_VIG2:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800498 if (rect_index == SDE_SSPP_RECT_1) {
499 mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
500 } else {
501 mixercfg |= mix << 6;
502 mixercfg_ext |= ext << 4;
503 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700504 break;
505 case SSPP_VIG3:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800506 if (rect_index == SDE_SSPP_RECT_1) {
507 mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
508 } else {
509 mixercfg |= mix << 26;
510 mixercfg_ext |= ext << 6;
511 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700512 break;
513 case SSPP_RGB0:
Clarence Ip7b493572015-12-21 17:57:48 -0500514 mixercfg |= mix << 9;
515 mixercfg_ext |= ext << 8;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700516 break;
517 case SSPP_RGB1:
Clarence Ip7b493572015-12-21 17:57:48 -0500518 mixercfg |= mix << 12;
519 mixercfg_ext |= ext << 10;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700520 break;
521 case SSPP_RGB2:
Clarence Ip7b493572015-12-21 17:57:48 -0500522 mixercfg |= mix << 15;
523 mixercfg_ext |= ext << 12;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700524 break;
525 case SSPP_RGB3:
Clarence Ip7b493572015-12-21 17:57:48 -0500526 mixercfg |= mix << 29;
527 mixercfg_ext |= ext << 14;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700528 break;
529 case SSPP_DMA0:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800530 if (rect_index == SDE_SSPP_RECT_1) {
531 mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
532 } else {
533 mixercfg |= mix << 18;
534 mixercfg_ext |= ext << 16;
535 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700536 break;
537 case SSPP_DMA1:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800538 if (rect_index == SDE_SSPP_RECT_1) {
539 mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
540 } else {
541 mixercfg |= mix << 21;
542 mixercfg_ext |= ext << 18;
543 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700544 break;
Dhaval Patel13485f12017-01-11 12:55:22 -0800545 case SSPP_DMA2:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800546 if (rect_index == SDE_SSPP_RECT_1) {
547 mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
548 } else {
549 mix |= (i + 1) & 0xF;
550 mixercfg_ext2 |= mix << 0;
551 }
Dhaval Patel13485f12017-01-11 12:55:22 -0800552 break;
553 case SSPP_DMA3:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800554 if (rect_index == SDE_SSPP_RECT_1) {
555 mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
556 } else {
557 mix |= (i + 1) & 0xF;
558 mixercfg_ext2 |= mix << 4;
559 }
Dhaval Patel13485f12017-01-11 12:55:22 -0800560 break;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700561 case SSPP_CURSOR0:
Clarence Ip7b493572015-12-21 17:57:48 -0500562 mixercfg_ext |= ((i + 1) & 0xF) << 20;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700563 break;
564 case SSPP_CURSOR1:
Clarence Ip7b493572015-12-21 17:57:48 -0500565 mixercfg_ext |= ((i + 1) & 0xF) << 26;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700566 break;
567 default:
568 break;
569 }
570 }
571 }
572
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700573exit:
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700574 SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
575 SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
Dhaval Patel13485f12017-01-11 12:55:22 -0800576 SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800577 SDE_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700578}
579
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400580static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
581 struct sde_hw_intf_cfg *cfg)
582{
583 struct sde_hw_blk_reg_map *c = &ctx->hw;
584 u32 intf_cfg = 0;
585
586 intf_cfg |= (cfg->intf & 0xF) << 4;
587
588 if (cfg->wb)
589 intf_cfg |= (cfg->wb & 0x3) + 2;
590
591 if (cfg->mode_3d) {
592 intf_cfg |= BIT(19);
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400593 intf_cfg |= (cfg->mode_3d - 0x1) << 20;
594 }
595
596 switch (cfg->intf_mode_sel) {
597 case SDE_CTL_MODE_SEL_VID:
598 intf_cfg &= ~BIT(17);
599 intf_cfg &= ~(0x3 << 15);
600 break;
601 case SDE_CTL_MODE_SEL_CMD:
602 intf_cfg |= BIT(17);
603 intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
604 break;
605 default:
606 pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
607 return;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400608 }
609
610 SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
611}
612
Alan Kwong4dd64c82017-02-04 18:41:51 -0800613static void sde_hw_ctl_setup_sbuf_cfg(struct sde_hw_ctl *ctx,
614 struct sde_ctl_sbuf_cfg *cfg)
615{
616 struct sde_hw_blk_reg_map *c = &ctx->hw;
617 u32 val;
618
619 val = cfg->rot_op_mode & 0x3;
620
621 SDE_REG_WRITE(c, CTL_ROT_TOP, val);
622}
623
Gopikrishnaiah Anandand9e78262017-10-17 17:16:21 -0700624static void sde_hw_reg_dma_flush(struct sde_hw_ctl *ctx, bool blocking)
Gopikrishnaiah Anandan38726842017-08-23 17:56:35 -0700625{
626 struct sde_hw_reg_dma_ops *ops = sde_reg_dma_get_ops();
627
628 if (ops && ops->last_command)
Gopikrishnaiah Anandand9e78262017-10-17 17:16:21 -0700629 ops->last_command(ctx, DMA_CTL_QUEUE0,
630 (blocking ? REG_DMA_WAIT4_COMP : REG_DMA_NOWAIT));
Gopikrishnaiah Anandan38726842017-08-23 17:56:35 -0700631}
632
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700633static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
634 unsigned long cap)
635{
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400636 ops->clear_pending_flush = sde_hw_ctl_clear_pending_flush;
637 ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
Clarence Ip110d15c2016-08-16 14:44:41 -0400638 ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400639 ops->trigger_flush = sde_hw_ctl_trigger_flush;
Lloyd Atkinson6340a372017-04-05 13:04:22 -0700640 ops->get_flush_register = sde_hw_ctl_get_flush_register;
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400641 ops->trigger_start = sde_hw_ctl_trigger_start;
Dhaval Patel0e558f42017-04-30 00:51:40 -0700642 ops->trigger_pending = sde_hw_ctl_trigger_pending;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400643 ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700644 ops->reset = sde_hw_ctl_reset_control;
Clarence Ip569d5af2017-10-14 21:09:01 -0400645 ops->hard_reset = sde_hw_ctl_hard_reset;
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500646 ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
Lloyd Atkinsone5ec30d2016-08-23 14:32:32 -0400647 ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400648 ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700649 ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp;
650 ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer;
651 ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp;
Xu Yange9b20772017-08-23 11:16:04 +0800652 ops->get_bitmask_dspp_pavlut = sde_hw_ctl_get_bitmask_dspp_pavlut;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700653 ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
654 ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
Alan Kwong3232ca52016-07-29 02:27:47 -0400655 ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
Gopikrishnaiah Anandan38726842017-08-23 17:56:35 -0700656 ops->reg_dma_flush = sde_hw_reg_dma_flush;
Dhaval Patelcaf0e6d2017-11-08 16:17:06 -0800657 ops->get_start_state = sde_hw_ctl_get_start_state;
Gopikrishnaiah Anandan38726842017-08-23 17:56:35 -0700658
Alan Kwong4dd64c82017-02-04 18:41:51 -0800659 if (cap & BIT(SDE_CTL_SBUF)) {
660 ops->get_bitmask_rot = sde_hw_ctl_get_bitmask_rot;
661 ops->setup_sbuf_cfg = sde_hw_ctl_setup_sbuf_cfg;
662 ops->trigger_rot_start = sde_hw_ctl_trigger_rot_start;
663 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700664};
665
Shashank Babu Chinta Venkata5d641d42017-09-29 12:16:28 -0700666#define CTL_BASE_OFFSET 0x2000
667#define CTL_TOP_OFFSET(index) (CTL_BASE_OFFSET + (0x200 * (index)) + CTL_TOP)
668
669void sde_get_ctl_top_for_cont_splash(void __iomem *mmio,
670 struct ctl_top *top, int index)
671{
672 if (!mmio || !top) {
673 SDE_ERROR("invalid input parameters\n");
674 return;
675 }
676
677 top->value = readl_relaxed(mmio + CTL_TOP_OFFSET(index));
678 top->intf_sel = (top->value >> 4) & 0xf;
679 top->pp_sel = (top->value >> 8) & 0x7;
680 top->dspp_sel = (top->value >> 11) & 0x3;
681 top->mode_sel = (top->value >> 17) & 0x1;
682
683 SDE_DEBUG("ctl[%d]_top->0x%x,pp_sel=0x%x,dspp_sel=0x%x,intf_sel=0x%x\n",
684 index, top->value, top->pp_sel, top->dspp_sel, top->intf_sel);
685}
686
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400687static struct sde_hw_blk_ops sde_hw_ops = {
688 .start = NULL,
689 .stop = NULL,
690};
691
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700692struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
693 void __iomem *addr,
694 struct sde_mdss_cfg *m)
695{
696 struct sde_hw_ctl *c;
697 struct sde_ctl_cfg *cfg;
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400698 int rc;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700699
700 c = kzalloc(sizeof(*c), GFP_KERNEL);
701 if (!c)
702 return ERR_PTR(-ENOMEM);
703
704 cfg = _ctl_offset(idx, m, addr, &c->hw);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400705 if (IS_ERR_OR_NULL(cfg)) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700706 kfree(c);
Lloyd Atkinson23491262016-05-19 09:37:02 -0400707 pr_err("failed to create sde_hw_ctl %d\n", idx);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700708 return ERR_PTR(-EINVAL);
709 }
710
711 c->caps = cfg;
712 _setup_ctl_ops(&c->ops, c->caps->features);
713 c->idx = idx;
714 c->mixer_count = m->mixer_count;
715 c->mixer_hw_caps = m->mixer;
716
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400717 rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_CTL, idx, &sde_hw_ops);
718 if (rc) {
719 SDE_ERROR("failed to init hw blk %d\n", rc);
720 goto blk_init_error;
721 }
722
Lloyd Atkinson113aefd2016-10-23 13:15:18 -0400723 sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
724 c->hw.blk_off + c->hw.length, c->hw.xin_id);
725
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700726 return c;
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400727
728blk_init_error:
729 kzfree(c);
730
731 return ERR_PTR(rc);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700732}
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400733
734void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx)
735{
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400736 if (ctx)
737 sde_hw_blk_destroy(&ctx->base);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400738 kfree(ctx);
739}