blob: 606fdebf55cef2b33df56291302ef3a8f9231835 [file] [log] [blame]
Dhaval Patel13485f12017-01-11 12:55:22 -08001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Narendra Muppalla1b0b3352015-09-29 10:16:51 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include "sde_hwio.h"
Clarence Ipc475b082016-06-26 09:27:23 -040015#include "sde_hw_ctl.h"
Lloyd Atkinson113aefd2016-10-23 13:15:18 -040016#include "sde_dbg.h"
Lloyd Atkinson652e59b2017-05-03 11:20:30 -040017#include "sde_kms.h"
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -070018#include "sde_reg_dma.h"
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070019
20#define CTL_LAYER(lm) \
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040021 (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070022#define CTL_LAYER_EXT(lm) \
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040023 (0x40 + (((lm) - LM_0) * 0x004))
Dhaval Patel13485f12017-01-11 12:55:22 -080024#define CTL_LAYER_EXT2(lm) \
25 (0x70 + (((lm) - LM_0) * 0x004))
Jeykumar Sankaran2e655032017-02-04 14:05:45 -080026#define CTL_LAYER_EXT3(lm) \
27 (0xA0 + (((lm) - LM_0) * 0x004))
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070028#define CTL_TOP 0x014
29#define CTL_FLUSH 0x018
30#define CTL_START 0x01C
Dhaval Patel0e558f42017-04-30 00:51:40 -070031#define CTL_PREPARE 0x0d0
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070032#define CTL_SW_RESET 0x030
33#define CTL_LAYER_EXTN_OFFSET 0x40
Alan Kwong4dd64c82017-02-04 18:41:51 -080034#define CTL_ROT_TOP 0x0C0
35#define CTL_ROT_FLUSH 0x0C4
36#define CTL_ROT_START 0x0CC
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070037
Clarence Ip9c65f7b2017-03-20 06:48:15 -070038#define CTL_MIXER_BORDER_OUT BIT(24)
Clarence Ipf6b530a2017-08-21 19:39:18 -040039#define CTL_FLUSH_MASK_ROT BIT(27)
Clarence Ip9c65f7b2017-03-20 06:48:15 -070040#define CTL_FLUSH_MASK_CTL BIT(17)
41
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070042#define SDE_REG_RESET_TIMEOUT_COUNT 20
43
44static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
45 struct sde_mdss_cfg *m,
46 void __iomem *addr,
47 struct sde_hw_blk_reg_map *b)
48{
49 int i;
50
51 for (i = 0; i < m->ctl_count; i++) {
52 if (ctl == m->ctl[i].id) {
53 b->base_off = addr;
54 b->blk_off = m->ctl[i].base;
Lloyd Atkinson77158732016-10-23 13:02:00 -040055 b->length = m->ctl[i].len;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070056 b->hwversion = m->hwversion;
Clarence Ip4ce59322016-06-26 22:27:51 -040057 b->log_mask = SDE_DBG_MASK_CTL;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070058 return &m->ctl[i];
59 }
60 }
61 return ERR_PTR(-ENOMEM);
62}
63
64static int _mixer_stages(const struct sde_lm_cfg *mixer, int count,
65 enum sde_lm lm)
66{
67 int i;
68 int stages = -EINVAL;
69
70 for (i = 0; i < count; i++) {
71 if (lm == mixer[i].id) {
72 stages = mixer[i].sblk->maxblendstages;
73 break;
74 }
75 }
76
77 return stages;
78}
79
Lloyd Atkinson5d722782016-05-30 14:09:41 -040080static inline void sde_hw_ctl_trigger_start(struct sde_hw_ctl *ctx)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070081{
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040082 SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
83}
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070084
Dhaval Patel0e558f42017-04-30 00:51:40 -070085static inline void sde_hw_ctl_trigger_pending(struct sde_hw_ctl *ctx)
86{
87 SDE_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
88}
89
Alan Kwong4dd64c82017-02-04 18:41:51 -080090static inline void sde_hw_ctl_trigger_rot_start(struct sde_hw_ctl *ctx)
91{
92 SDE_REG_WRITE(&ctx->hw, CTL_ROT_START, BIT(0));
93}
94
Lloyd Atkinson5d722782016-05-30 14:09:41 -040095static inline void sde_hw_ctl_clear_pending_flush(struct sde_hw_ctl *ctx)
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -040096{
Lloyd Atkinson5d722782016-05-30 14:09:41 -040097 ctx->pending_flush_mask = 0x0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -070098}
99
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400100static inline void sde_hw_ctl_update_pending_flush(struct sde_hw_ctl *ctx,
101 u32 flushbits)
102{
103 ctx->pending_flush_mask |= flushbits;
104}
105
Clarence Ip110d15c2016-08-16 14:44:41 -0400106static u32 sde_hw_ctl_get_pending_flush(struct sde_hw_ctl *ctx)
107{
108 if (!ctx)
109 return 0x0;
110
111 return ctx->pending_flush_mask;
112}
113
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400114static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
115{
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -0700116 struct sde_hw_reg_dma_ops *ops = sde_reg_dma_get_ops();
117
118 if (ops && ops->last_command)
119 ops->last_command(ctx, DMA_CTL_QUEUE0);
120
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400121 SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
122}
123
Lloyd Atkinson6340a372017-04-05 13:04:22 -0700124static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx)
125{
126 struct sde_hw_blk_reg_map *c = &ctx->hw;
Clarence Ipf6b530a2017-08-21 19:39:18 -0400127 u32 rot_op_mode;
128
129 rot_op_mode = SDE_REG_READ(c, CTL_ROT_TOP) & 0x3;
130
131 /* rotate flush bit is undefined if offline mode, so ignore it */
132 if (rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
133 return SDE_REG_READ(c, CTL_FLUSH) & ~CTL_FLUSH_MASK_ROT;
Lloyd Atkinson6340a372017-04-05 13:04:22 -0700134
135 return SDE_REG_READ(c, CTL_FLUSH);
136}
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400137
Dhaval Patel48c76022016-09-01 17:51:23 -0700138static inline uint32_t sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx,
139 enum sde_sspp sspp)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700140{
Dhaval Patel48c76022016-09-01 17:51:23 -0700141 uint32_t flushbits = 0;
142
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700143 switch (sspp) {
144 case SSPP_VIG0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700145 flushbits = BIT(0);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700146 break;
147 case SSPP_VIG1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700148 flushbits = BIT(1);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700149 break;
150 case SSPP_VIG2:
Dhaval Patel48c76022016-09-01 17:51:23 -0700151 flushbits = BIT(2);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700152 break;
153 case SSPP_VIG3:
Dhaval Patel48c76022016-09-01 17:51:23 -0700154 flushbits = BIT(18);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700155 break;
156 case SSPP_RGB0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700157 flushbits = BIT(3);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700158 break;
159 case SSPP_RGB1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700160 flushbits = BIT(4);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700161 break;
162 case SSPP_RGB2:
Dhaval Patel48c76022016-09-01 17:51:23 -0700163 flushbits = BIT(5);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700164 break;
165 case SSPP_RGB3:
Dhaval Patel48c76022016-09-01 17:51:23 -0700166 flushbits = BIT(19);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700167 break;
168 case SSPP_DMA0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700169 flushbits = BIT(11);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700170 break;
171 case SSPP_DMA1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700172 flushbits = BIT(12);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700173 break;
Dhaval Patel13485f12017-01-11 12:55:22 -0800174 case SSPP_DMA2:
175 flushbits = BIT(24);
176 break;
177 case SSPP_DMA3:
178 flushbits = BIT(25);
179 break;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700180 case SSPP_CURSOR0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700181 flushbits = BIT(22);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700182 break;
183 case SSPP_CURSOR1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700184 flushbits = BIT(23);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700185 break;
186 default:
Dhaval Patel48c76022016-09-01 17:51:23 -0700187 break;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700188 }
Dhaval Patel48c76022016-09-01 17:51:23 -0700189
190 return flushbits;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700191}
192
Dhaval Patel48c76022016-09-01 17:51:23 -0700193static inline uint32_t sde_hw_ctl_get_bitmask_mixer(struct sde_hw_ctl *ctx,
194 enum sde_lm lm)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700195{
Dhaval Patel48c76022016-09-01 17:51:23 -0700196 uint32_t flushbits = 0;
197
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700198 switch (lm) {
199 case LM_0:
Dhaval Patel48c76022016-09-01 17:51:23 -0700200 flushbits = BIT(6);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700201 break;
202 case LM_1:
Dhaval Patel48c76022016-09-01 17:51:23 -0700203 flushbits = BIT(7);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700204 break;
205 case LM_2:
Dhaval Patel48c76022016-09-01 17:51:23 -0700206 flushbits = BIT(8);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700207 break;
208 case LM_3:
Dhaval Patel48c76022016-09-01 17:51:23 -0700209 flushbits = BIT(9);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700210 break;
211 case LM_4:
Dhaval Patel48c76022016-09-01 17:51:23 -0700212 flushbits = BIT(10);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700213 break;
214 case LM_5:
Dhaval Patel48c76022016-09-01 17:51:23 -0700215 flushbits = BIT(20);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700216 break;
217 default:
218 return -EINVAL;
219 }
Dhaval Patel48c76022016-09-01 17:51:23 -0700220
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700221 flushbits |= CTL_FLUSH_MASK_CTL;
Dhaval Patel48c76022016-09-01 17:51:23 -0700222
223 return flushbits;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700224}
225
226static inline int sde_hw_ctl_get_bitmask_dspp(struct sde_hw_ctl *ctx,
227 u32 *flushbits, enum sde_dspp dspp)
228{
229 switch (dspp) {
230 case DSPP_0:
231 *flushbits |= BIT(13);
232 break;
233 case DSPP_1:
234 *flushbits |= BIT(14);
235 break;
236 default:
237 return -EINVAL;
238 }
239 return 0;
240}
241
242static inline int sde_hw_ctl_get_bitmask_intf(struct sde_hw_ctl *ctx,
243 u32 *flushbits, enum sde_intf intf)
244{
245 switch (intf) {
246 case INTF_0:
247 *flushbits |= BIT(31);
248 break;
249 case INTF_1:
250 *flushbits |= BIT(30);
251 break;
252 case INTF_2:
253 *flushbits |= BIT(29);
254 break;
255 case INTF_3:
256 *flushbits |= BIT(28);
257 break;
258 default:
259 return -EINVAL;
260 }
261 return 0;
262}
263
Alan Kwong3232ca52016-07-29 02:27:47 -0400264static inline int sde_hw_ctl_get_bitmask_wb(struct sde_hw_ctl *ctx,
265 u32 *flushbits, enum sde_wb wb)
266{
267 switch (wb) {
268 case WB_0:
269 case WB_1:
270 case WB_2:
271 *flushbits |= BIT(16);
272 break;
273 default:
274 return -EINVAL;
275 }
276 return 0;
277}
278
Alan Kwong4dd64c82017-02-04 18:41:51 -0800279static inline int sde_hw_ctl_get_bitmask_rot(struct sde_hw_ctl *ctx,
280 u32 *flushbits, enum sde_rot rot)
281{
282 switch (rot) {
283 case ROT_0:
Clarence Ipf6b530a2017-08-21 19:39:18 -0400284 *flushbits |= CTL_FLUSH_MASK_ROT;
Alan Kwong4dd64c82017-02-04 18:41:51 -0800285 break;
286 default:
287 return -EINVAL;
288 }
289 return 0;
290}
291
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700292static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
293 u32 *flushbits, enum sde_cdm cdm)
294{
295 switch (cdm) {
296 case CDM_0:
297 *flushbits |= BIT(26);
298 break;
299 default:
300 return -EINVAL;
301 }
302 return 0;
303}
304
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500305static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 count)
306{
307 struct sde_hw_blk_reg_map *c = &ctx->hw;
308 u32 status;
309
310 /* protect to do at least one iteration */
311 if (!count)
312 count = 1;
313
314 /*
315 * it takes around 30us to have mdp finish resetting its ctl path
316 * poll every 50us so that reset should be completed at 1st poll
317 */
318 do {
319 status = SDE_REG_READ(c, CTL_SW_RESET);
320 status &= 0x01;
321 if (status)
322 usleep_range(20, 50);
323 } while (status && --count > 0);
324
325 return status;
326}
327
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700328static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
329{
330 struct sde_hw_blk_reg_map *c = &ctx->hw;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700331
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500332 pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700333 SDE_REG_WRITE(c, CTL_SW_RESET, 0x1);
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500334 if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_COUNT))
335 return -EINVAL;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700336
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500337 return 0;
338}
339
340static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx)
341{
342 struct sde_hw_blk_reg_map *c = &ctx->hw;
343 u32 status;
344
345 status = SDE_REG_READ(c, CTL_SW_RESET);
346 status &= 0x01;
347 if (!status)
348 return 0;
349
350 pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
351 if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_COUNT)) {
352 pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
353 return -EINVAL;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700354 }
355
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500356 return 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700357}
358
Lloyd Atkinsone5ec30d2016-08-23 14:32:32 -0400359static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
360{
361 struct sde_hw_blk_reg_map *c = &ctx->hw;
362 int i;
363
364 for (i = 0; i < ctx->mixer_count; i++) {
365 SDE_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
366 SDE_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700367 SDE_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
Steve Cohen52d4c0a2017-08-03 12:40:37 -0400368 SDE_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
Lloyd Atkinsone5ec30d2016-08-23 14:32:32 -0400369 }
370}
371
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700372static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
Dhaval Patel572cfd22017-06-12 19:33:39 -0700373 enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700374{
375 struct sde_hw_blk_reg_map *c = &ctx->hw;
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800376 u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
377 u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700378 int i, j;
379 u8 stages;
380 int pipes_per_stage;
381
382 stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
Clarence Ipc475b082016-06-26 09:27:23 -0400383 if (stages < 0)
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700384 return;
385
386 if (test_bit(SDE_MIXER_SOURCESPLIT,
387 &ctx->mixer_hw_caps->features))
Dhaval Patel48c76022016-09-01 17:51:23 -0700388 pipes_per_stage = PIPES_PER_STAGE;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700389 else
390 pipes_per_stage = 1;
391
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700392 mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
393
394 if (!stage_cfg)
395 goto exit;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400396
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700397 for (i = 0; i <= stages; i++) {
Clarence Ip7b493572015-12-21 17:57:48 -0500398 /* overflow to ext register if 'i + 1 > 7' */
399 mix = (i + 1) & 0x7;
400 ext = i >= 7;
401
Dhaval Patel48c76022016-09-01 17:51:23 -0700402 for (j = 0 ; j < pipes_per_stage; j++) {
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800403 enum sde_sspp_multirect_index rect_index =
Dhaval Patel572cfd22017-06-12 19:33:39 -0700404 stage_cfg->multirect_index[i][j];
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800405
Dhaval Patel572cfd22017-06-12 19:33:39 -0700406 switch (stage_cfg->stage[i][j]) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700407 case SSPP_VIG0:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800408 if (rect_index == SDE_SSPP_RECT_1) {
409 mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
410 } else {
411 mixercfg |= mix << 0;
412 mixercfg_ext |= ext << 0;
413 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700414 break;
415 case SSPP_VIG1:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800416 if (rect_index == SDE_SSPP_RECT_1) {
417 mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
418 } else {
419 mixercfg |= mix << 3;
420 mixercfg_ext |= ext << 2;
421 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700422 break;
423 case SSPP_VIG2:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800424 if (rect_index == SDE_SSPP_RECT_1) {
425 mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
426 } else {
427 mixercfg |= mix << 6;
428 mixercfg_ext |= ext << 4;
429 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700430 break;
431 case SSPP_VIG3:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800432 if (rect_index == SDE_SSPP_RECT_1) {
433 mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
434 } else {
435 mixercfg |= mix << 26;
436 mixercfg_ext |= ext << 6;
437 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700438 break;
439 case SSPP_RGB0:
Clarence Ip7b493572015-12-21 17:57:48 -0500440 mixercfg |= mix << 9;
441 mixercfg_ext |= ext << 8;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700442 break;
443 case SSPP_RGB1:
Clarence Ip7b493572015-12-21 17:57:48 -0500444 mixercfg |= mix << 12;
445 mixercfg_ext |= ext << 10;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700446 break;
447 case SSPP_RGB2:
Clarence Ip7b493572015-12-21 17:57:48 -0500448 mixercfg |= mix << 15;
449 mixercfg_ext |= ext << 12;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700450 break;
451 case SSPP_RGB3:
Clarence Ip7b493572015-12-21 17:57:48 -0500452 mixercfg |= mix << 29;
453 mixercfg_ext |= ext << 14;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700454 break;
455 case SSPP_DMA0:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800456 if (rect_index == SDE_SSPP_RECT_1) {
457 mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
458 } else {
459 mixercfg |= mix << 18;
460 mixercfg_ext |= ext << 16;
461 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700462 break;
463 case SSPP_DMA1:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800464 if (rect_index == SDE_SSPP_RECT_1) {
465 mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
466 } else {
467 mixercfg |= mix << 21;
468 mixercfg_ext |= ext << 18;
469 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700470 break;
Dhaval Patel13485f12017-01-11 12:55:22 -0800471 case SSPP_DMA2:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800472 if (rect_index == SDE_SSPP_RECT_1) {
473 mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
474 } else {
475 mix |= (i + 1) & 0xF;
476 mixercfg_ext2 |= mix << 0;
477 }
Dhaval Patel13485f12017-01-11 12:55:22 -0800478 break;
479 case SSPP_DMA3:
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800480 if (rect_index == SDE_SSPP_RECT_1) {
481 mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
482 } else {
483 mix |= (i + 1) & 0xF;
484 mixercfg_ext2 |= mix << 4;
485 }
Dhaval Patel13485f12017-01-11 12:55:22 -0800486 break;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700487 case SSPP_CURSOR0:
Clarence Ip7b493572015-12-21 17:57:48 -0500488 mixercfg_ext |= ((i + 1) & 0xF) << 20;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700489 break;
490 case SSPP_CURSOR1:
Clarence Ip7b493572015-12-21 17:57:48 -0500491 mixercfg_ext |= ((i + 1) & 0xF) << 26;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700492 break;
493 default:
494 break;
495 }
496 }
497 }
498
Clarence Ip9c65f7b2017-03-20 06:48:15 -0700499exit:
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700500 SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
501 SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
Dhaval Patel13485f12017-01-11 12:55:22 -0800502 SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
Jeykumar Sankaran2e655032017-02-04 14:05:45 -0800503 SDE_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700504}
505
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400506static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
507 struct sde_hw_intf_cfg *cfg)
508{
509 struct sde_hw_blk_reg_map *c = &ctx->hw;
510 u32 intf_cfg = 0;
511
512 intf_cfg |= (cfg->intf & 0xF) << 4;
513
514 if (cfg->wb)
515 intf_cfg |= (cfg->wb & 0x3) + 2;
516
517 if (cfg->mode_3d) {
518 intf_cfg |= BIT(19);
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400519 intf_cfg |= (cfg->mode_3d - 0x1) << 20;
520 }
521
522 switch (cfg->intf_mode_sel) {
523 case SDE_CTL_MODE_SEL_VID:
524 intf_cfg &= ~BIT(17);
525 intf_cfg &= ~(0x3 << 15);
526 break;
527 case SDE_CTL_MODE_SEL_CMD:
528 intf_cfg |= BIT(17);
529 intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
530 break;
531 default:
532 pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
533 return;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400534 }
535
536 SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
537}
538
Alan Kwong4dd64c82017-02-04 18:41:51 -0800539static void sde_hw_ctl_setup_sbuf_cfg(struct sde_hw_ctl *ctx,
540 struct sde_ctl_sbuf_cfg *cfg)
541{
542 struct sde_hw_blk_reg_map *c = &ctx->hw;
543 u32 val;
544
545 val = cfg->rot_op_mode & 0x3;
546
547 SDE_REG_WRITE(c, CTL_ROT_TOP, val);
548}
549
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700550static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
551 unsigned long cap)
552{
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400553 ops->clear_pending_flush = sde_hw_ctl_clear_pending_flush;
554 ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
Clarence Ip110d15c2016-08-16 14:44:41 -0400555 ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400556 ops->trigger_flush = sde_hw_ctl_trigger_flush;
Lloyd Atkinson6340a372017-04-05 13:04:22 -0700557 ops->get_flush_register = sde_hw_ctl_get_flush_register;
Lloyd Atkinson5d722782016-05-30 14:09:41 -0400558 ops->trigger_start = sde_hw_ctl_trigger_start;
Dhaval Patel0e558f42017-04-30 00:51:40 -0700559 ops->trigger_pending = sde_hw_ctl_trigger_pending;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400560 ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700561 ops->reset = sde_hw_ctl_reset_control;
Lloyd Atkinson6cc9de32016-11-17 17:56:13 -0500562 ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
Lloyd Atkinsone5ec30d2016-08-23 14:32:32 -0400563 ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400564 ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700565 ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp;
566 ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer;
567 ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp;
568 ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
569 ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
Alan Kwong3232ca52016-07-29 02:27:47 -0400570 ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
Alan Kwong4dd64c82017-02-04 18:41:51 -0800571 if (cap & BIT(SDE_CTL_SBUF)) {
572 ops->get_bitmask_rot = sde_hw_ctl_get_bitmask_rot;
573 ops->setup_sbuf_cfg = sde_hw_ctl_setup_sbuf_cfg;
574 ops->trigger_rot_start = sde_hw_ctl_trigger_rot_start;
575 }
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700576};
577
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400578static struct sde_hw_blk_ops sde_hw_ops = {
579 .start = NULL,
580 .stop = NULL,
581};
582
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700583struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
584 void __iomem *addr,
585 struct sde_mdss_cfg *m)
586{
587 struct sde_hw_ctl *c;
588 struct sde_ctl_cfg *cfg;
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400589 int rc;
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700590
591 c = kzalloc(sizeof(*c), GFP_KERNEL);
592 if (!c)
593 return ERR_PTR(-ENOMEM);
594
595 cfg = _ctl_offset(idx, m, addr, &c->hw);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400596 if (IS_ERR_OR_NULL(cfg)) {
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700597 kfree(c);
Lloyd Atkinson23491262016-05-19 09:37:02 -0400598 pr_err("failed to create sde_hw_ctl %d\n", idx);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700599 return ERR_PTR(-EINVAL);
600 }
601
602 c->caps = cfg;
603 _setup_ctl_ops(&c->ops, c->caps->features);
604 c->idx = idx;
605 c->mixer_count = m->mixer_count;
606 c->mixer_hw_caps = m->mixer;
607
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400608 rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_CTL, idx, &sde_hw_ops);
609 if (rc) {
610 SDE_ERROR("failed to init hw blk %d\n", rc);
611 goto blk_init_error;
612 }
613
Lloyd Atkinson113aefd2016-10-23 13:15:18 -0400614 sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
615 c->hw.blk_off + c->hw.length, c->hw.xin_id);
616
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700617 return c;
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400618
619blk_init_error:
620 kzfree(c);
621
622 return ERR_PTR(rc);
Narendra Muppalla1b0b3352015-09-29 10:16:51 -0700623}
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400624
625void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx)
626{
Lloyd Atkinson652e59b2017-05-03 11:20:30 -0400627 if (ctx)
628 sde_hw_blk_destroy(&ctx->base);
Abhijit Kulkarni3e3e0d22016-06-24 17:56:13 -0400629 kfree(ctx);
630}