blob: 621a172b43ad1341fd71b5cc59b7b2cf915ce89d [file] [log] [blame]
Dhaval Patel13485f12017-01-11 12:55:22 -08001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include "sde_hwio.h"
Clarence Ipc475b082016-06-26 09:27:23 -040015#include "sde_hw_ctl.h"
Lloyd Atkinson113aefd2016-10-23 13:15:18 -040016#include "sde_dbg.h"
Lloyd Atkinson652e59b2017-05-03 11:20:30 -040017#include "sde_kms.h"
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -070018#include "sde_reg_dma.h"
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070019
20#define CTL_LAYER(lm) \
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040021 (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070022#define CTL_LAYER_EXT(lm) \
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040023 (0x40 + (((lm) - LM_0) * 0x004))
Dhaval Patel13485f12017-01-11 12:55:22 -080024#define CTL_LAYER_EXT2(lm) \
25 (0x70 + (((lm) - LM_0) * 0x004))
Jeykumar Sankaran2e655032017-02-04 14:05:45 -080026#define CTL_LAYER_EXT3(lm) \
27 (0xA0 + (((lm) - LM_0) * 0x004))
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070028#define CTL_TOP 0x014
29#define CTL_FLUSH 0x018
30#define CTL_START 0x01C
Dhaval Patel0e558f42017-04-30 00:51:40 -070031#define CTL_PREPARE 0x0d0
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070032#define CTL_SW_RESET 0x030
33#define CTL_LAYER_EXTN_OFFSET 0x40
Alan Kwong4dd64c82017-02-04 18:41:51 -080034#define CTL_ROT_TOP 0x0C0
35#define CTL_ROT_FLUSH 0x0C4
36#define CTL_ROT_START 0x0CC
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070037
Clarence Ip9c65f7b2017-03-20 06:48:15 -070038#define CTL_MIXER_BORDER_OUT BIT(24)
Clarence Ipf6b530a2017-08-21 19:39:18 -040039#define CTL_FLUSH_MASK_ROT BIT(27)
Clarence Ip9c65f7b2017-03-20 06:48:15 -070040#define CTL_FLUSH_MASK_CTL BIT(17)
41
Clarence Ipc5a7d922017-07-24 19:12:12 -040042#define SDE_REG_RESET_TIMEOUT_US 2000
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070043
44static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
45 struct sde_mdss_cfg *m,
46 void __iomem *addr,
47 struct sde_hw_blk_reg_map *b)
48{
49 int i;
50
51 for (i = 0; i < m->ctl_count; i++) {
52 if (ctl == m->ctl[i].id) {
53 b->base_off = addr;
54 b->blk_off = m->ctl[i].base;
Lloyd Atkinson77158732016-10-23 13:02:00 -040055 b->length = m->ctl[i].len;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070056 b->hwversion = m->hwversion;
Clarence Ip4ce59322016-06-26 22:27:51 -040057 b->log_mask = SDE_DBG_MASK_CTL;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070058 return &m->ctl[i];
59 }
60 }
61 return ERR_PTR(-ENOMEM);
62}
63
64static int _mixer_stages(const struct sde_lm_cfg *mixer, int count,
65 enum sde_lm lm)
66{
67 int i;
68 int stages = -EINVAL;
69
70 for (i = 0; i < count; i++) {
71 if (lm == mixer[i].id) {
72 stages = mixer[i].sblk->maxblendstages;
73 break;
74 }
75 }
76
77 return stages;
78}
79
Lloyd Atkinson5d722782016-05-30 14:09:41 -040080static inline void sde_hw_ctl_trigger_start(struct sde_hw_ctl *ctx)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070081{
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040082 SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
83}
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070084
Dhaval Patel0e558f42017-04-30 00:51:40 -070085static inline void sde_hw_ctl_trigger_pending(struct sde_hw_ctl *ctx)
86{
87 SDE_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
88}
89
Alan Kwong4dd64c82017-02-04 18:41:51 -080090static inline void sde_hw_ctl_trigger_rot_start(struct sde_hw_ctl *ctx)
91{
92 SDE_REG_WRITE(&ctx->hw, CTL_ROT_START, BIT(0));
93}
94
Lloyd Atkinson5d722782016-05-30 14:09:41 -040095static inline void sde_hw_ctl_clear_pending_flush(struct sde_hw_ctl *ctx)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040096{
Lloyd Atkinson5d722782016-05-30 14:09:41 -040097 ctx->pending_flush_mask = 0x0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070098}
99
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400100static inline void sde_hw_ctl_update_pending_flush(struct sde_hw_ctl *ctx,
101 u32 flushbits)
102{
103 ctx->pending_flush_mask |= flushbits;
104}
105
Clarence Ip110d15c2016-08-16 14:44:41 -0400106static u32 sde_hw_ctl_get_pending_flush(struct sde_hw_ctl *ctx)
107{
108 if (!ctx)
109 return 0x0;
110
111 return ctx->pending_flush_mask;
112}
113
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400114static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
115{
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -0700116
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400117 SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
118}
119
Lloyd Atkinson6340a372017-04-05 13:04:22 -0700120static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx)
121{
122 struct sde_hw_blk_reg_map *c = &ctx->hw;
Clarence Ipf6b530a2017-08-21 19:39:18 -0400123 u32 rot_op_mode;
124
125 rot_op_mode = SDE_REG_READ(c, CTL_ROT_TOP) & 0x3;
126
127 /* rotate flush bit is undefined if offline mode, so ignore it */
128 if (rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
129 return SDE_REG_READ(c, CTL_FLUSH) & ~CTL_FLUSH_MASK_ROT;
Lloyd Atkinson6340a372017-04-05 13:04:22 -0700130
131 return SDE_REG_READ(c, CTL_FLUSH);
132}
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400133
Dhaval Patel48c76022016-09-01 17:51:23 -0700134static inline uint32_t sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx,
135 enum sde_sspp sspp)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700136{
Dhaval Patel48c76022016-09-01 17:51:23 -0700137 uint32_t flushbits = 0;
138
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700139 switch (sspp) {
140 case SSPP_VIG0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700141 flushbits = BIT(0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700142 break;
143 case SSPP_VIG1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700144 flushbits = BIT(1);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700145 break;
146 case SSPP_VIG2:
Dhaval Patel48c76022016-09-01 17:51:23 -0700147 flushbits = BIT(2);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700148 break;
149 case SSPP_VIG3:
Dhaval Patel48c76022016-09-01 17:51:23 -0700150 flushbits = BIT(18);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700151 break;
152 case SSPP_RGB0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700153 flushbits = BIT(3);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700154 break;
155 case SSPP_RGB1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700156 flushbits = BIT(4);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700157 break;
158 case SSPP_RGB2:
Dhaval Patel48c76022016-09-01 17:51:23 -0700159 flushbits = BIT(5);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700160 break;
161 case SSPP_RGB3:
Dhaval Patel48c76022016-09-01 17:51:23 -0700162 flushbits = BIT(19);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700163 break;
164 case SSPP_DMA0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700165 flushbits = BIT(11);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700166 break;
167 case SSPP_DMA1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700168 flushbits = BIT(12);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700169 break;
Dhaval Patel13485f12017-01-11 12:55:22 -0800170 case SSPP_DMA2:
171 flushbits = BIT(24);
172 break;
173 case SSPP_DMA3:
174 flushbits = BIT(25);
175 break;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700176 case SSPP_CURSOR0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700177 flushbits = BIT(22);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700178 break;
179 case SSPP_CURSOR1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700180 flushbits = BIT(23);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700181 break;
182 default:
Dhaval Patel48c76022016-09-01 17:51:23 -0700183 break;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700184 }
Dhaval Patel48c76022016-09-01 17:51:23 -0700185
186 return flushbits;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700187}
188
Dhaval Patel48c76022016-09-01 17:51:23 -0700189static inline uint32_t sde_hw_ctl_get_bitmask_mixer(struct sde_hw_ctl *ctx,
190 enum sde_lm lm)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700191{
Dhaval Patel48c76022016-09-01 17:51:23 -0700192 uint32_t flushbits = 0;
193
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700194 switch (lm) {
195 case LM_0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700196 flushbits = BIT(6);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700197 break;
198 case LM_1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700199 flushbits = BIT(7);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700200 break;
201 case LM_2:
Dhaval Patel48c76022016-09-01 17:51:23 -0700202 flushbits = BIT(8);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700203 break;
204 case LM_3:
Dhaval Patel48c76022016-09-01 17:51:23 -0700205 flushbits = BIT(9);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700206 break;
207 case LM_4:
Dhaval Patel48c76022016-09-01 17:51:23 -0700208 flushbits = BIT(10);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700209 break;
210 case LM_5:
Dhaval Patel48c76022016-09-01 17:51:23 -0700211 flushbits = BIT(20);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700212 break;
213 default:
214 return -EINVAL;
215 }
Dhaval Patel48c76022016-09-01 17:51:23 -0700216
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700217 flushbits |= CTL_FLUSH_MASK_CTL;
Dhaval Patel48c76022016-09-01 17:51:23 -0700218
219 return flushbits;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700220}
221
222static inline int sde_hw_ctl_get_bitmask_dspp(struct sde_hw_ctl *ctx,
223 u32 *flushbits, enum sde_dspp dspp)
224{
225 switch (dspp) {
226 case DSPP_0:
227 *flushbits |= BIT(13);
228 break;
229 case DSPP_1:
230 *flushbits |= BIT(14);
231 break;
232 default:
233 return -EINVAL;
234 }
235 return 0;
236}
237
238static inline int sde_hw_ctl_get_bitmask_intf(struct sde_hw_ctl *ctx,
239 u32 *flushbits, enum sde_intf intf)
240{
241 switch (intf) {
242 case INTF_0:
243 *flushbits |= BIT(31);
244 break;
245 case INTF_1:
246 *flushbits |= BIT(30);
247 break;
248 case INTF_2:
249 *flushbits |= BIT(29);
250 break;
251 case INTF_3:
252 *flushbits |= BIT(28);
253 break;
254 default:
255 return -EINVAL;
256 }
257 return 0;
258}
259
Alan Kwong3232ca52016-07-29 02:27:47 -0400260static inline int sde_hw_ctl_get_bitmask_wb(struct sde_hw_ctl *ctx,
261 u32 *flushbits, enum sde_wb wb)
262{
263 switch (wb) {
264 case WB_0:
265 case WB_1:
266 case WB_2:
267 *flushbits |= BIT(16);
268 break;
269 default:
270 return -EINVAL;
271 }
272 return 0;
273}
274
Alan Kwong4dd64c82017-02-04 18:41:51 -0800275static inline int sde_hw_ctl_get_bitmask_rot(struct sde_hw_ctl *ctx,
276 u32 *flushbits, enum sde_rot rot)
277{
278 switch (rot) {
279 case ROT_0:
Clarence Ipf6b530a2017-08-21 19:39:18 -0400280 *flushbits |= CTL_FLUSH_MASK_ROT;
Alan Kwong4dd64c82017-02-04 18:41:51 -0800281 break;
282 default:
283 return -EINVAL;
284 }
285 return 0;
286}
287
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700288static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
289 u32 *flushbits, enum sde_cdm cdm)
290{
291 switch (cdm) {
292 case CDM_0:
293 *flushbits |= BIT(26);
294 break;
295 default:
296 return -EINVAL;
297 }
298 return 0;
299}
300
Clarence Ipc5a7d922017-07-24 19:12:12 -0400301static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 timeout_us)
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500302{
303 struct sde_hw_blk_reg_map *c = &ctx->hw;
Clarence Ipc5a7d922017-07-24 19:12:12 -0400304 ktime_t timeout;
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500305 u32 status;
306
Clarence Ipc5a7d922017-07-24 19:12:12 -0400307 timeout = ktime_add_us(ktime_get(), timeout_us);
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500308
309 /*
310 * it takes around 30us to have mdp finish resetting its ctl path
311 * poll every 50us so that reset should be completed at 1st poll
312 */
313 do {
314 status = SDE_REG_READ(c, CTL_SW_RESET);
Clarence Ipc5a7d922017-07-24 19:12:12 -0400315 status &= 0x1;
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500316 if (status)
317 usleep_range(20, 50);
Clarence Ipc5a7d922017-07-24 19:12:12 -0400318 } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500319
320 return status;
321}
322
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700323static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
324{
325 struct sde_hw_blk_reg_map *c = &ctx->hw;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700326
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500327 pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700328 SDE_REG_WRITE(c, CTL_SW_RESET, 0x1);
Clarence Ipc5a7d922017-07-24 19:12:12 -0400329 if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_US))
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500330 return -EINVAL;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700331
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500332 return 0;
333}
334
335static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx)
336{
337 struct sde_hw_blk_reg_map *c = &ctx->hw;
338 u32 status;
339
340 status = SDE_REG_READ(c, CTL_SW_RESET);
341 status &= 0x01;
342 if (!status)
343 return 0;
344
345 pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
Clarence Ipc5a7d922017-07-24 19:12:12 -0400346 if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_US)) {
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500347 pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
348 return -EINVAL;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700349 }
350
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500351 return 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700352}
353
Lloyd Atkinsone5ec30d2016-08-23 14:32:32 -0400354static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
355{
356 struct sde_hw_blk_reg_map *c = &ctx->hw;
357 int i;
358
359 for (i = 0; i < ctx->mixer_count; i++) {
360 SDE_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
361 SDE_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700362 SDE_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
Steve Cohen52d4c0a2017-08-03 12:40:37 -0400363 SDE_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
Lloyd Atkinsone5ec30d2016-08-23 14:32:32 -0400364 }
365}
366
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700367static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
Dhaval Patel572cfd22017-06-12 19:33:39 -0700368 enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700369{
370 struct sde_hw_blk_reg_map *c = &ctx->hw;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800371 u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
372 u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700373 int i, j;
374 u8 stages;
375 int pipes_per_stage;
376
377 stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
Clarence Ipc475b082016-06-26 09:27:23 -0400378 if (stages < 0)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700379 return;
380
381 if (test_bit(SDE_MIXER_SOURCESPLIT,
382 &ctx->mixer_hw_caps->features))
Dhaval Patel48c76022016-09-01 17:51:23 -0700383 pipes_per_stage = PIPES_PER_STAGE;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700384 else
385 pipes_per_stage = 1;
386
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700387 mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
388
389 if (!stage_cfg)
390 goto exit;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400391
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700392 for (i = 0; i <= stages; i++) {
Clarence Ip7b493572015-12-21 17:57:48 -0500393 /* overflow to ext register if 'i + 1 > 7' */
394 mix = (i + 1) & 0x7;
395 ext = i >= 7;
396
Dhaval Patel48c76022016-09-01 17:51:23 -0700397 for (j = 0 ; j < pipes_per_stage; j++) {
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800398 enum sde_sspp_multirect_index rect_index =
Dhaval Patel572cfd22017-06-12 19:33:39 -0700399 stage_cfg->multirect_index[i][j];
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800400
Dhaval Patel572cfd22017-06-12 19:33:39 -0700401 switch (stage_cfg->stage[i][j]) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700402 case SSPP_VIG0:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800403 if (rect_index == SDE_SSPP_RECT_1) {
404 mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
405 } else {
406 mixercfg |= mix << 0;
407 mixercfg_ext |= ext << 0;
408 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700409 break;
410 case SSPP_VIG1:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800411 if (rect_index == SDE_SSPP_RECT_1) {
412 mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
413 } else {
414 mixercfg |= mix << 3;
415 mixercfg_ext |= ext << 2;
416 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700417 break;
418 case SSPP_VIG2:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800419 if (rect_index == SDE_SSPP_RECT_1) {
420 mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
421 } else {
422 mixercfg |= mix << 6;
423 mixercfg_ext |= ext << 4;
424 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700425 break;
426 case SSPP_VIG3:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800427 if (rect_index == SDE_SSPP_RECT_1) {
428 mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
429 } else {
430 mixercfg |= mix << 26;
431 mixercfg_ext |= ext << 6;
432 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700433 break;
434 case SSPP_RGB0:
Clarence Ip7b493572015-12-21 17:57:48 -0500435 mixercfg |= mix << 9;
436 mixercfg_ext |= ext << 8;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700437 break;
438 case SSPP_RGB1:
Clarence Ip7b493572015-12-21 17:57:48 -0500439 mixercfg |= mix << 12;
440 mixercfg_ext |= ext << 10;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700441 break;
442 case SSPP_RGB2:
Clarence Ip7b493572015-12-21 17:57:48 -0500443 mixercfg |= mix << 15;
444 mixercfg_ext |= ext << 12;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700445 break;
446 case SSPP_RGB3:
Clarence Ip7b493572015-12-21 17:57:48 -0500447 mixercfg |= mix << 29;
448 mixercfg_ext |= ext << 14;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700449 break;
450 case SSPP_DMA0:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800451 if (rect_index == SDE_SSPP_RECT_1) {
452 mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
453 } else {
454 mixercfg |= mix << 18;
455 mixercfg_ext |= ext << 16;
456 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700457 break;
458 case SSPP_DMA1:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800459 if (rect_index == SDE_SSPP_RECT_1) {
460 mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
461 } else {
462 mixercfg |= mix << 21;
463 mixercfg_ext |= ext << 18;
464 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700465 break;
Dhaval Patel13485f12017-01-11 12:55:22 -0800466 case SSPP_DMA2:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800467 if (rect_index == SDE_SSPP_RECT_1) {
468 mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
469 } else {
470 mix |= (i + 1) & 0xF;
471 mixercfg_ext2 |= mix << 0;
472 }
Dhaval Patel13485f12017-01-11 12:55:22 -0800473 break;
474 case SSPP_DMA3:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800475 if (rect_index == SDE_SSPP_RECT_1) {
476 mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
477 } else {
478 mix |= (i + 1) & 0xF;
479 mixercfg_ext2 |= mix << 4;
480 }
Dhaval Patel13485f12017-01-11 12:55:22 -0800481 break;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700482 case SSPP_CURSOR0:
Clarence Ip7b493572015-12-21 17:57:48 -0500483 mixercfg_ext |= ((i + 1) & 0xF) << 20;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700484 break;
485 case SSPP_CURSOR1:
Clarence Ip7b493572015-12-21 17:57:48 -0500486 mixercfg_ext |= ((i + 1) & 0xF) << 26;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700487 break;
488 default:
489 break;
490 }
491 }
492 }
493
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700494exit:
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700495 SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
496 SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
Dhaval Patel13485f12017-01-11 12:55:22 -0800497 SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800498 SDE_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700499}
500
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400501static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
502 struct sde_hw_intf_cfg *cfg)
503{
504 struct sde_hw_blk_reg_map *c = &ctx->hw;
505 u32 intf_cfg = 0;
506
507 intf_cfg |= (cfg->intf & 0xF) << 4;
508
509 if (cfg->wb)
510 intf_cfg |= (cfg->wb & 0x3) + 2;
511
512 if (cfg->mode_3d) {
513 intf_cfg |= BIT(19);
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400514 intf_cfg |= (cfg->mode_3d - 0x1) << 20;
515 }
516
517 switch (cfg->intf_mode_sel) {
518 case SDE_CTL_MODE_SEL_VID:
519 intf_cfg &= ~BIT(17);
520 intf_cfg &= ~(0x3 << 15);
521 break;
522 case SDE_CTL_MODE_SEL_CMD:
523 intf_cfg |= BIT(17);
524 intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
525 break;
526 default:
527 pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
528 return;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400529 }
530
531 SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
532}
533
Alan Kwong4dd64c82017-02-04 18:41:51 -0800534static void sde_hw_ctl_setup_sbuf_cfg(struct sde_hw_ctl *ctx,
535 struct sde_ctl_sbuf_cfg *cfg)
536{
537 struct sde_hw_blk_reg_map *c = &ctx->hw;
538 u32 val;
539
540 val = cfg->rot_op_mode & 0x3;
541
542 SDE_REG_WRITE(c, CTL_ROT_TOP, val);
543}
544
Gopikrishnaiah Anandan38726842017-08-23 17:56:35 -0700545static void sde_hw_reg_dma_flush(struct sde_hw_ctl *ctx)
546{
547 struct sde_hw_reg_dma_ops *ops = sde_reg_dma_get_ops();
548
549 if (ops && ops->last_command)
550 ops->last_command(ctx, DMA_CTL_QUEUE0);
551}
552
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700553static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
554 unsigned long cap)
555{
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400556 ops->clear_pending_flush = sde_hw_ctl_clear_pending_flush;
557 ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
Clarence Ip110d15c2016-08-16 14:44:41 -0400558 ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400559 ops->trigger_flush = sde_hw_ctl_trigger_flush;
Lloyd Atkinson6340a372017-04-05 13:04:22 -0700560 ops->get_flush_register = sde_hw_ctl_get_flush_register;
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400561 ops->trigger_start = sde_hw_ctl_trigger_start;
Dhaval Patel0e558f42017-04-30 00:51:40 -0700562 ops->trigger_pending = sde_hw_ctl_trigger_pending;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400563 ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700564 ops->reset = sde_hw_ctl_reset_control;
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500565 ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
Lloyd Atkinsone5ec30d2016-08-23 14:32:32 -0400566 ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400567 ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700568 ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp;
569 ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer;
570 ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp;
571 ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
572 ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
Alan Kwong3232ca52016-07-29 02:27:47 -0400573 ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
Gopikrishnaiah Anandan38726842017-08-23 17:56:35 -0700574 ops->reg_dma_flush = sde_hw_reg_dma_flush;
575
Alan Kwong4dd64c82017-02-04 18:41:51 -0800576 if (cap & BIT(SDE_CTL_SBUF)) {
577 ops->get_bitmask_rot = sde_hw_ctl_get_bitmask_rot;
578 ops->setup_sbuf_cfg = sde_hw_ctl_setup_sbuf_cfg;
579 ops->trigger_rot_start = sde_hw_ctl_trigger_rot_start;
580 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700581};
582
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400583static struct sde_hw_blk_ops sde_hw_ops = {
584 .start = NULL,
585 .stop = NULL,
586};
587
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700588struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
589 void __iomem *addr,
590 struct sde_mdss_cfg *m)
591{
592 struct sde_hw_ctl *c;
593 struct sde_ctl_cfg *cfg;
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400594 int rc;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700595
596 c = kzalloc(sizeof(*c), GFP_KERNEL);
597 if (!c)
598 return ERR_PTR(-ENOMEM);
599
600 cfg = _ctl_offset(idx, m, addr, &c->hw);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400601 if (IS_ERR_OR_NULL(cfg)) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700602 kfree(c);
Lloyd Atkinson23491262016-05-19 09:37:02 -0400603 pr_err("failed to create sde_hw_ctl %d\n", idx);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700604 return ERR_PTR(-EINVAL);
605 }
606
607 c->caps = cfg;
608 _setup_ctl_ops(&c->ops, c->caps->features);
609 c->idx = idx;
610 c->mixer_count = m->mixer_count;
611 c->mixer_hw_caps = m->mixer;
612
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400613 rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_CTL, idx, &sde_hw_ops);
614 if (rc) {
615 SDE_ERROR("failed to init hw blk %d\n", rc);
616 goto blk_init_error;
617 }
618
Lloyd Atkinson113aefd2016-10-23 13:15:18 -0400619 sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
620 c->hw.blk_off + c->hw.length, c->hw.xin_id);
621
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700622 return c;
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400623
624blk_init_error:
625 kzfree(c);
626
627 return ERR_PTR(rc);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700628}
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400629
630void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx)
631{
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400632 if (ctx)
633 sde_hw_blk_destroy(&ctx->base);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400634 kfree(ctx);
635}