blob: 919904858912db9e1ead9c944f59545075f3fc1a [file] [log] [blame]
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include "sde_hw_mdss.h"
13#include "sde_hw_ctl.h"
14#include "sde_hw_reg_dma_v1.h"
15#include "msm_drv.h"
Jordan Croused8e96522017-02-13 10:14:16 -070016#include "msm_mmu.h"
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -080017
18#define GUARD_BYTES (BIT(8) - 1)
19#define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
20#define ADDR_ALIGN BIT(8)
21#define MAX_RELATIVE_OFF (BIT(20) - 1)
22
23#define DECODE_SEL_OP (BIT(HW_BLK_SELECT))
24#define REG_WRITE_OP ((BIT(REG_SINGLE_WRITE)) | (BIT(REG_BLK_WRITE_SINGLE)) | \
25 (BIT(REG_BLK_WRITE_INC)) | (BIT(REG_BLK_WRITE_MULTIPLE)))
26
27#define REG_DMA_OPS (DECODE_SEL_OP | REG_WRITE_OP)
28#define IS_OP_ALLOWED(op, buf_op) (BIT(op) & buf_op)
29
30#define REG_DMA_OP_MODE_OFF 0x4
31
32#define REG_DMA_CTL0_QUEUE_0_CMD0_OFF 0x14
33#define REG_DMA_CTL0_RESET_OFF 0xE4
34#define REG_DMA_CTL_TRIGGER_OFF 0xD4
35
36#define SET_UP_REG_DMA_REG(hw, reg_dma) \
37 do { \
38 (hw).base_off = (reg_dma)->addr; \
39 (hw).blk_off = (reg_dma)->caps->base; \
40 (hw).hwversion = (reg_dma)->caps->version; \
41} while (0)
42
43#define SIZE_DWORD(x) ((x) / (sizeof(u32)))
44#define NOT_WORD_ALIGNED(x) ((x) & 0x3)
45
46
47#define GRP_VIG_HW_BLK_SELECT (VIG0 | VIG1 | VIG2 | VIG3)
48#define GRP_DSPP_HW_BLK_SELECT (DSPP0 | DSPP1 | DSPP2 | DSPP3)
49#define BUFFER_SPACE_LEFT(cfg) ((cfg)->dma_buf->buffer_size - \
50 (cfg)->dma_buf->index)
51
52#define REG_DMA_DECODE_SEL 0x180AC060
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -070053#define REG_DMA_LAST_CMD 0x180AC004
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -080054#define SINGLE_REG_WRITE_OPCODE (BIT(28))
55#define REL_ADDR_OPCODE (BIT(27))
56#define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
57#define AUTO_INC_REG_WRITE_OPCODE (BIT(30))
58#define BLK_REG_WRITE_OPCODE (BIT(30) | BIT(28))
59
60#define WRAP_MIN_SIZE 2
61#define WRAP_MAX_SIZE (BIT(4) - 1)
62#define MAX_DWORDS_SZ (BIT(14) - 1)
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -070063#define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -080064
65typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
66
67static struct sde_hw_reg_dma *reg_dma;
68static u32 ops_mem_size[REG_DMA_SETUP_OPS_MAX] = {
69 [REG_BLK_WRITE_SINGLE] = sizeof(u32) * 2,
70 [REG_BLK_WRITE_INC] = sizeof(u32) * 2,
71 [REG_BLK_WRITE_MULTIPLE] = sizeof(u32) * 2,
72 [HW_BLK_SELECT] = sizeof(u32) * 2,
Gopikrishnaiah Anandanfbf75392017-01-16 10:43:36 -080073 [REG_SINGLE_WRITE] = sizeof(u32) * 2
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -080074};
75
76static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
77 [DMA_CTL_QUEUE0] = BIT(0),
78 [DMA_CTL_QUEUE1] = BIT(4),
79};
80
81static u32 reg_dma_ctl_queue_off[CTL_MAX];
82static u32 dspp_read_sel[DSPP_HIST_MAX] = {
83 [DSPP0_HIST] = 0,
84 [DSPP1_HIST] = 1,
85 [DSPP2_HIST] = 2,
86 [DSPP3_HIST] = 3,
87};
88
89static u32 v1_supported[REG_DMA_FEATURES_MAX] = {
90 [GAMUT] = GRP_VIG_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT,
91 [VLUT] = GRP_DSPP_HW_BLK_SELECT,
Gopikrishnaiah Anandanfbf75392017-01-16 10:43:36 -080092 [GC] = GRP_DSPP_HW_BLK_SELECT,
Rajesh Yadavec93afb2017-06-08 19:28:33 +053093 [IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT,
Rajesh Yadavd490cb62017-07-04 13:20:42 +053094 [PCC] = GRP_DSPP_HW_BLK_SELECT,
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -080095};
96
97static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
98static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
99static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
100static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -0700101static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800102static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
103static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
104static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
105static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
106static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -0700107static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800108static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
109static int check_support_v1(enum sde_reg_dma_features feature,
110 enum sde_reg_dma_blk blk, bool *is_supported);
111static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
112static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
113static int reset_v1(struct sde_hw_ctl *ctl);
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -0700114static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800115static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
116static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
117
118static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
119 [HW_BLK_SELECT] = write_decode_sel,
120 [REG_SINGLE_WRITE] = write_single_reg,
121 [REG_BLK_WRITE_SINGLE] = write_multi_reg_inc,
122 [REG_BLK_WRITE_INC] = write_multi_reg_index,
123 [REG_BLK_WRITE_MULTIPLE] = write_multi_lut_reg,
124};
125
126static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
127 [HW_BLK_SELECT] = validate_write_decode_sel,
128 [REG_SINGLE_WRITE] = validate_write_reg,
129 [REG_BLK_WRITE_SINGLE] = validate_write_reg,
130 [REG_BLK_WRITE_INC] = validate_write_reg,
131 [REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
132};
133
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -0700134static struct sde_reg_dma_buffer *last_cmd_buf;
135
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800136static void get_decode_sel(unsigned long blk, u32 *decode_sel)
137{
138 int i = 0;
139
140 *decode_sel = 0;
141 for_each_set_bit(i, &blk, 31) {
142 switch (BIT(i)) {
143 case VIG0:
144 *decode_sel |= BIT(0);
145 break;
146 case VIG1:
147 *decode_sel |= BIT(1);
148 break;
149 case VIG2:
150 *decode_sel |= BIT(2);
151 break;
152 case VIG3:
153 *decode_sel |= BIT(3);
154 break;
155 case DSPP0:
156 *decode_sel |= BIT(17);
157 break;
158 case DSPP1:
159 *decode_sel |= BIT(18);
160 break;
161 case DSPP2:
162 *decode_sel |= BIT(19);
163 break;
164 case DSPP3:
165 *decode_sel |= BIT(20);
166 break;
167 case SSPP_IGC:
168 *decode_sel |= BIT(4);
169 break;
170 case DSPP_IGC:
171 *decode_sel |= BIT(21);
172 break;
173 default:
174 DRM_ERROR("block not supported %zx\n", BIT(i));
175 break;
176 }
177 }
178}
179
180static int write_multi_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
181{
182 u8 *loc = NULL;
183
184 loc = (u8 *)cfg->dma_buf->vaddr + cfg->dma_buf->index;
185 memcpy(loc, cfg->data, cfg->data_size);
186 cfg->dma_buf->index += cfg->data_size;
Rajesh Yadavec93afb2017-06-08 19:28:33 +0530187 cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800188 cfg->dma_buf->ops_completed |= REG_WRITE_OP;
189
190 return 0;
191}
192
193int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg)
194{
195 u32 *loc = NULL;
196
197 loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
198 cfg->dma_buf->index);
199 loc[0] = HW_INDEX_REG_WRITE_OPCODE;
200 loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
201 loc[1] = SIZE_DWORD(cfg->data_size);
202 cfg->dma_buf->index += ops_mem_size[cfg->ops];
203
204 return write_multi_reg(cfg);
205}
206
207int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg)
208{
209 u32 *loc = NULL;
210
211 loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
212 cfg->dma_buf->index);
213 loc[0] = AUTO_INC_REG_WRITE_OPCODE;
214 loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
215 loc[1] = SIZE_DWORD(cfg->data_size);
216 cfg->dma_buf->index += ops_mem_size[cfg->ops];
217
218 return write_multi_reg(cfg);
219}
220
221static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
222{
223 u32 *loc = NULL;
224
225 loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
226 cfg->dma_buf->index);
227 loc[0] = BLK_REG_WRITE_OPCODE;
228 loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
229 loc[1] = (cfg->inc) ? 0 : BIT(31);
Gopikrishnaiah Anandanfbf75392017-01-16 10:43:36 -0800230 loc[1] |= (cfg->wrap_size & WRAP_MAX_SIZE) << 16;
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800231 loc[1] |= ((SIZE_DWORD(cfg->data_size)) & MAX_DWORDS_SZ);
232 cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
233 cfg->dma_buf->index += ops_mem_size[cfg->ops];
234
235 return write_multi_reg(cfg);
236}
237
238static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
239{
240 u32 *loc = NULL;
241
242 loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
243 cfg->dma_buf->index);
244 loc[0] = SINGLE_REG_WRITE_OPCODE;
245 loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
246 loc[1] = *cfg->data;
247 cfg->dma_buf->index += ops_mem_size[cfg->ops];
248 cfg->dma_buf->ops_completed |= REG_WRITE_OP;
Rajesh Yadavec93afb2017-06-08 19:28:33 +0530249 cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800250
251 return 0;
252}
253
254static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
255{
256 u32 *loc = NULL;
257
258 loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
259 cfg->dma_buf->index);
260 loc[0] = REG_DMA_DECODE_SEL;
261 get_decode_sel(cfg->blk, &loc[1]);
262 cfg->dma_buf->index += sizeof(u32) * 2;
263 cfg->dma_buf->ops_completed |= DECODE_SEL_OP;
264 cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
265
266 return 0;
267}
268
269static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
270{
271 int rc;
272
273 rc = validate_write_reg(cfg);
274 if (rc)
275 return rc;
276
277 if (cfg->wrap_size < WRAP_MIN_SIZE || cfg->wrap_size > WRAP_MAX_SIZE) {
278 DRM_ERROR("invalid wrap sz %d min %d max %zd\n",
279 cfg->wrap_size, WRAP_MIN_SIZE, WRAP_MAX_SIZE);
280 rc = -EINVAL;
281 }
282
283 return rc;
284}
285
286static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
287{
288 u32 remain_len, write_len;
289
290 remain_len = BUFFER_SPACE_LEFT(cfg);
291 write_len = ops_mem_size[cfg->ops] + cfg->data_size;
292 if (remain_len < write_len) {
293 DRM_ERROR("buffer is full sz %d needs %d bytes\n",
294 remain_len, write_len);
295 return -EINVAL;
296 }
297
298 if (!cfg->data) {
299 DRM_ERROR("invalid data %pK size %d exp sz %d\n", cfg->data,
300 cfg->data_size, write_len);
301 return -EINVAL;
302 }
303 if ((SIZE_DWORD(cfg->data_size)) > MAX_DWORDS_SZ ||
304 NOT_WORD_ALIGNED(cfg->data_size)) {
305 DRM_ERROR("Invalid data size %d max %zd align %x\n",
306 cfg->data_size, MAX_DWORDS_SZ,
307 NOT_WORD_ALIGNED(cfg->data_size));
308 return -EINVAL;
309 }
310
311 if (cfg->blk_offset > MAX_RELATIVE_OFF ||
312 NOT_WORD_ALIGNED(cfg->blk_offset)) {
313 DRM_ERROR("invalid offset %d max %zd align %x\n",
314 cfg->blk_offset, MAX_RELATIVE_OFF,
315 NOT_WORD_ALIGNED(cfg->blk_offset));
316 return -EINVAL;
317 }
318
319 return 0;
320}
321
322static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
323{
324 u32 remain_len;
325
326 remain_len = BUFFER_SPACE_LEFT(cfg);
327 if (remain_len < ops_mem_size[HW_BLK_SELECT]) {
328 DRM_ERROR("buffer is full needs %d bytes\n",
329 ops_mem_size[HW_BLK_SELECT]);
330 return -EINVAL;
331 }
332
333 if (!cfg->blk) {
334 DRM_ERROR("blk set as 0\n");
335 return -EINVAL;
336 }
337 /* DSPP and VIG can't be combined */
338 if ((cfg->blk & GRP_VIG_HW_BLK_SELECT) &&
339 (cfg->blk & GRP_DSPP_HW_BLK_SELECT)) {
340 DRM_ERROR("invalid blk combination %x\n",
341 cfg->blk);
342 return -EINVAL;
343 }
344
345 return 0;
346}
347
348static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg)
349{
350 int rc = 0;
351 bool supported;
352
353 if (!cfg || cfg->ops >= REG_DMA_SETUP_OPS_MAX || !cfg->dma_buf) {
354 DRM_ERROR("invalid param cfg %pK ops %d dma_buf %pK\n",
355 cfg, ((cfg) ? cfg->ops : REG_DMA_SETUP_OPS_MAX),
356 ((cfg) ? cfg->dma_buf : NULL));
357 return -EINVAL;
358 }
359
360 rc = check_support_v1(cfg->feature, cfg->blk, &supported);
361 if (rc || !supported) {
362 DRM_ERROR("check support failed rc %d supported %d\n",
363 rc, supported);
364 rc = -EINVAL;
365 return rc;
366 }
367
368 if (cfg->dma_buf->index >= cfg->dma_buf->buffer_size ||
369 NOT_WORD_ALIGNED(cfg->dma_buf->index)) {
370 DRM_ERROR("Buf Overflow index %d max size %d align %x\n",
371 cfg->dma_buf->index, cfg->dma_buf->buffer_size,
372 NOT_WORD_ALIGNED(cfg->dma_buf->index));
373 return -EINVAL;
374 }
375
376 if (cfg->dma_buf->iova & GUARD_BYTES || !cfg->dma_buf->vaddr) {
377 DRM_ERROR("iova not aligned to %zx iova %x kva %pK",
378 ADDR_ALIGN, cfg->dma_buf->iova,
379 cfg->dma_buf->vaddr);
380 return -EINVAL;
381 }
382 if (!IS_OP_ALLOWED(cfg->ops, cfg->dma_buf->next_op_allowed)) {
383 DRM_ERROR("invalid op %x allowed %x\n", cfg->ops,
384 cfg->dma_buf->next_op_allowed);
385 return -EINVAL;
386 }
387
388 if (!validate_dma_op_params[cfg->ops] ||
389 !write_dma_op_params[cfg->ops]) {
390 DRM_ERROR("invalid op %d validate %pK write %pK\n", cfg->ops,
391 validate_dma_op_params[cfg->ops],
392 write_dma_op_params[cfg->ops]);
393 return -EINVAL;
394 }
395 return rc;
396}
397
398static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
399{
400
401 if (!cfg || !cfg->ctl || !cfg->dma_buf) {
402 DRM_ERROR("invalid cfg %pK ctl %pK dma_buf %pK\n",
403 cfg, ((!cfg) ? NULL : cfg->ctl),
404 ((!cfg) ? NULL : cfg->dma_buf));
405 return -EINVAL;
406 }
407
408 if (cfg->ctl->idx < CTL_0 && cfg->ctl->idx >= CTL_MAX) {
409 DRM_ERROR("invalid ctl idx %d\n", cfg->ctl->idx);
410 return -EINVAL;
411 }
412
413 if (cfg->op >= REG_DMA_OP_MAX) {
414 DRM_ERROR("invalid op %d\n", cfg->op);
415 return -EINVAL;
416 }
417
418 if ((cfg->op == REG_DMA_WRITE) &&
419 (!(cfg->dma_buf->ops_completed & DECODE_SEL_OP) ||
420 !(cfg->dma_buf->ops_completed & REG_WRITE_OP))) {
421 DRM_ERROR("incomplete write ops %x\n",
422 cfg->dma_buf->ops_completed);
423 return -EINVAL;
424 }
425
426 if (cfg->op == REG_DMA_READ && cfg->block_select >= DSPP_HIST_MAX) {
427 DRM_ERROR("invalid block for read %d\n", cfg->block_select);
428 return -EINVAL;
429 }
430
431 /* Only immediate triggers are supported now hence hardcode */
432 cfg->trigger_mode = (cfg->op == REG_DMA_READ) ? (READ_TRIGGER) :
433 (WRITE_TRIGGER);
434
435 if (cfg->dma_buf->iova & GUARD_BYTES) {
436 DRM_ERROR("Address is not aligned to %zx iova %x", ADDR_ALIGN,
437 cfg->dma_buf->iova);
438 return -EINVAL;
439 }
440
441 if (cfg->queue_select >= DMA_CTL_QUEUE_MAX) {
442 DRM_ERROR("invalid queue selected %d\n", cfg->queue_select);
443 return -EINVAL;
444 }
445
446 if (SIZE_DWORD(cfg->dma_buf->index) > MAX_DWORDS_SZ ||
447 !cfg->dma_buf->index) {
448 DRM_ERROR("invalid dword size %zd max %zd\n",
449 SIZE_DWORD(cfg->dma_buf->index), MAX_DWORDS_SZ);
450 return -EINVAL;
451 }
452 return 0;
453}
454
455static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
456{
457 u32 cmd1;
458 struct sde_hw_blk_reg_map hw;
459
Narendra Muppallaec11a0a2017-06-15 15:35:17 -0700460 memset(&hw, 0, sizeof(hw));
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800461 cmd1 = (cfg->op == REG_DMA_READ) ?
462 (dspp_read_sel[cfg->block_select] << 30) : 0;
463 cmd1 |= (cfg->last_command) ? BIT(24) : 0;
464 cmd1 |= (cfg->op == REG_DMA_READ) ? (2 << 22) : 0;
465 cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
466 cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
467
468 SET_UP_REG_DMA_REG(hw, reg_dma);
469 SDE_REG_WRITE(&hw, REG_DMA_OP_MODE_OFF, BIT(0));
470 SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
471 cfg->dma_buf->iova);
472 SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
473 cmd1);
474 SDE_REG_WRITE(&cfg->ctl->hw, REG_DMA_CTL_TRIGGER_OFF,
475 queue_sel[cfg->queue_select]);
476
477 return 0;
478}
479
480int init_v1(struct sde_hw_reg_dma *cfg)
481{
482 int i = 0;
483
484 if (!cfg)
485 return -EINVAL;
486
487 reg_dma = cfg;
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -0700488 if (!last_cmd_buf) {
489 last_cmd_buf = alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
490 if (IS_ERR_OR_NULL(last_cmd_buf))
491 return -EINVAL;
492 }
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800493 reg_dma->ops.check_support = check_support_v1;
494 reg_dma->ops.setup_payload = setup_payload_v1;
495 reg_dma->ops.kick_off = kick_off_v1;
496 reg_dma->ops.reset = reset_v1;
497 reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1;
498 reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
499 reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -0700500 reg_dma->ops.last_command = last_cmd_v1;
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800501
502 reg_dma_ctl_queue_off[CTL_0] = REG_DMA_CTL0_QUEUE_0_CMD0_OFF;
503 for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
504 reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
505 (sizeof(u32) * 4);
506
507 return 0;
508}
509
510static int check_support_v1(enum sde_reg_dma_features feature,
511 enum sde_reg_dma_blk blk,
512 bool *is_supported)
513{
514 int ret = 0;
515
516 if (!is_supported)
517 return -EINVAL;
518
519 if (feature >= REG_DMA_FEATURES_MAX || blk >= MDSS) {
520 *is_supported = false;
521 return ret;
522 }
523
524 *is_supported = (blk & v1_supported[feature]) ? true : false;
525 return ret;
526}
527
528static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg)
529{
530 int rc = 0;
531
532 rc = validate_dma_cfg(cfg);
533
534 if (!rc)
535 rc = validate_dma_op_params[cfg->ops](cfg);
536
537 if (!rc)
538 rc = write_dma_op_params[cfg->ops](cfg);
539
540 return rc;
541}
542
543
544static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
545{
546 int rc = 0;
547
548 rc = validate_kick_off_v1(cfg);
549 if (rc)
550 return rc;
551
552 rc = write_kick_off_v1(cfg);
553 return rc;
554}
555
556int reset_v1(struct sde_hw_ctl *ctl)
557{
558 struct sde_hw_blk_reg_map hw;
559 u32 index, val;
560
561 if (!ctl || ctl->idx > CTL_MAX) {
562 DRM_ERROR("invalid ctl %pK ctl idx %d\n",
563 ctl, ((ctl) ? ctl->idx : 0));
564 return -EINVAL;
565 }
566
Narendra Muppallaec11a0a2017-06-15 15:35:17 -0700567 memset(&hw, 0, sizeof(hw));
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800568 index = ctl->idx - CTL_0;
569 SET_UP_REG_DMA_REG(hw, reg_dma);
570 SDE_REG_WRITE(&hw, REG_DMA_OP_MODE_OFF, BIT(0));
571 SDE_REG_WRITE(&hw, (REG_DMA_CTL0_RESET_OFF + index * sizeof(u32)),
572 BIT(0));
573
574 index = 0;
575 do {
576 udelay(1000);
577 index++;
578 val = SDE_REG_READ(&hw,
579 (REG_DMA_CTL0_RESET_OFF + index * sizeof(u32)));
580 } while (index < 2 && val);
581
582 return 0;
583}
584
Abhijit Kulkarni12cef9c2017-07-13 11:19:03 -0700585static void sde_reg_dma_aspace_cb(void *cb_data, bool attach)
586{
587 struct sde_reg_dma_buffer *dma_buf = NULL;
588 struct msm_gem_address_space *aspace = NULL;
589 u32 iova_aligned, offset;
590 int rc;
591
592 if (!cb_data) {
593 DRM_ERROR("aspace cb called with invalid dma_buf\n");
594 return;
595 }
596
597 dma_buf = (struct sde_reg_dma_buffer *)cb_data;
598 aspace = dma_buf->aspace;
599
600 if (attach) {
601 rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
602 if (rc) {
603 DRM_ERROR("failed to get the iova rc %d\n", rc);
604 return;
605 }
606
607 dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
608 if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
609 DRM_ERROR("failed to get va rc %d\n", rc);
610 return;
611 }
612
613 iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
614 offset = iova_aligned - dma_buf->iova;
615 dma_buf->iova = dma_buf->iova + offset;
616 dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
617 dma_buf->next_op_allowed = DECODE_SEL_OP;
618 } else {
619 /* invalidate the stored iova */
620 dma_buf->iova = 0;
621
622 /* return the virtual address mapping */
623 msm_gem_put_vaddr(dma_buf->buf);
624 msm_gem_vunmap(dma_buf->buf);
625 }
626}
627
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800628static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
629{
630 struct sde_reg_dma_buffer *dma_buf = NULL;
631 u32 iova_aligned, offset;
632 u32 rsize = size + GUARD_BYTES;
Jordan Croused8e96522017-02-13 10:14:16 -0700633 struct msm_gem_address_space *aspace = NULL;
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800634 int rc = 0;
635
636 if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
637 DRM_ERROR("invalid buffer size %d\n", size);
638 return ERR_PTR(-EINVAL);
639 }
640
641 dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
642 if (!dma_buf)
643 return ERR_PTR(-ENOMEM);
644
645 mutex_lock(&reg_dma->drm_dev->struct_mutex);
646 dma_buf->buf = msm_gem_new(reg_dma->drm_dev,
647 rsize, MSM_BO_UNCACHED);
648 mutex_unlock(&reg_dma->drm_dev->struct_mutex);
649 if (IS_ERR_OR_NULL(dma_buf->buf)) {
650 rc = -EINVAL;
651 goto fail;
652 }
653
Jordan Croused8e96522017-02-13 10:14:16 -0700654 aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
655 MSM_SMMU_DOMAIN_UNSECURE);
656 if (!aspace) {
657 DRM_ERROR("failed to get aspace\n");
658 rc = -EINVAL;
659 goto free_gem;
660 }
661
Abhijit Kulkarni12cef9c2017-07-13 11:19:03 -0700662 /* register to aspace */
663 rc = msm_gem_address_space_register_cb(aspace,
664 sde_reg_dma_aspace_cb,
665 (void *)dma_buf);
666 if (rc) {
667 DRM_ERROR("failed to register callback %d", rc);
668 goto free_gem;
669 }
670
671 dma_buf->aspace = aspace;
Jordan Croused8e96522017-02-13 10:14:16 -0700672 rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800673 if (rc) {
674 DRM_ERROR("failed to get the iova rc %d\n", rc);
Abhijit Kulkarni12cef9c2017-07-13 11:19:03 -0700675 goto free_aspace_cb;
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800676 }
677
678 dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
679 if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
680 DRM_ERROR("failed to get va rc %d\n", rc);
681 rc = -EINVAL;
682 goto put_iova;
683 }
684
685 dma_buf->buffer_size = size;
686 iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
687 offset = iova_aligned - dma_buf->iova;
688 dma_buf->iova = dma_buf->iova + offset;
689 dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
690 dma_buf->next_op_allowed = DECODE_SEL_OP;
691
692 return dma_buf;
693
694put_iova:
Jordan Croused8e96522017-02-13 10:14:16 -0700695 msm_gem_put_iova(dma_buf->buf, aspace);
Abhijit Kulkarni12cef9c2017-07-13 11:19:03 -0700696free_aspace_cb:
697 msm_gem_address_space_unregister_cb(aspace, sde_reg_dma_aspace_cb,
698 dma_buf);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800699free_gem:
700 msm_gem_free_object(dma_buf->buf);
701fail:
702 kfree(dma_buf);
703 return ERR_PTR(rc);
704}
705
706static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *dma_buf)
707{
708 if (!dma_buf) {
709 DRM_ERROR("invalid param reg_buf %pK\n", dma_buf);
710 return -EINVAL;
711 }
712
713 if (dma_buf->buf) {
714 msm_gem_put_iova(dma_buf->buf, 0);
Abhijit Kulkarni12cef9c2017-07-13 11:19:03 -0700715 msm_gem_address_space_unregister_cb(dma_buf->aspace,
716 sde_reg_dma_aspace_cb, dma_buf);
Gopikrishnaiah Anandan7e3e3f52016-12-22 11:13:05 -0800717 mutex_lock(&reg_dma->drm_dev->struct_mutex);
718 msm_gem_free_object(dma_buf->buf);
719 mutex_unlock(&reg_dma->drm_dev->struct_mutex);
720 }
721
722 kfree(dma_buf);
723 return 0;
724}
725
726static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf)
727{
728 if (!lut_buf)
729 return -EINVAL;
730
731 lut_buf->index = 0;
732 lut_buf->ops_completed = 0;
733 lut_buf->next_op_allowed = DECODE_SEL_OP;
734 return 0;
735}
Gopikrishnaiah Anandandb90fa12017-05-09 17:56:08 -0700736
737static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
738{
739 u32 remain_len, write_len;
740
741 remain_len = BUFFER_SPACE_LEFT(cfg);
742 write_len = sizeof(u32);
743 if (remain_len < write_len) {
744 DRM_ERROR("buffer is full sz %d needs %d bytes\n",
745 remain_len, write_len);
746 return -EINVAL;
747 }
748 return 0;
749}
750
751static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
752{
753 u32 *loc = NULL;
754
755 loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
756 cfg->dma_buf->index);
757 loc[0] = REG_DMA_LAST_CMD;
758 loc[1] = BIT(0);
759 cfg->dma_buf->index = sizeof(u32) * 2;
760 cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP;
761 cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
762
763 return 0;
764}
765
766static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q)
767{
768 struct sde_reg_dma_setup_ops_cfg cfg;
769 struct sde_reg_dma_kickoff_cfg kick_off;
770
771 if (!last_cmd_buf || !ctl || q >= DMA_CTL_QUEUE_MAX) {
772 DRM_ERROR("invalid param buf %pK ctl %pK q %d\n", last_cmd_buf,
773 ctl, q);
774 return -EINVAL;
775 }
776
777 cfg.dma_buf = last_cmd_buf;
778 reset_reg_dma_buffer_v1(last_cmd_buf);
779 if (validate_last_cmd(&cfg)) {
780 DRM_ERROR("validate buf failed\n");
781 return -EINVAL;
782 }
783
784 if (write_last_cmd(&cfg)) {
785 DRM_ERROR("write buf failed\n");
786 return -EINVAL;
787 }
788
789 kick_off.ctl = ctl;
790 kick_off.queue_select = q;
791 kick_off.trigger_mode = WRITE_IMMEDIATE;
792 kick_off.last_command = 1;
793 kick_off.op = REG_DMA_WRITE;
794 kick_off.dma_buf = last_cmd_buf;
795 if (kick_off_v1(&kick_off)) {
796 DRM_ERROR("kick off last cmd failed\n");
797 return -EINVAL;
798 }
799
800 return 0;
801}
802
803void deinit_v1(void)
804{
805 if (last_cmd_buf)
806 dealloc_reg_dma_v1(last_cmd_buf);
807 last_cmd_buf = NULL;
808}