blob: 1aea21ca27844bac4496477110e34d68f6010dfd [file] [log] [blame]
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/pm_opp.h>
15
16#include "adreno.h"
17#include "a6xx_reg.h"
18#include "adreno_cp_parser.h"
19#include "adreno_trace.h"
20#include "adreno_pm4types.h"
21#include "adreno_perfcounter.h"
22#include "adreno_ringbuffer.h"
23#include "kgsl_sharedmem.h"
24#include "kgsl_log.h"
25#include "kgsl.h"
26#include <linux/msm_kgsl.h>
27
28#define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
29 (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
30
31#define MIN_HBB 13
32
33static const struct adreno_vbif_data a630_vbif[] = {
34 {A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009},
35 {A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
36 {0, 0},
37};
38
39static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
40 { adreno_is_a630, a630_vbif },
41};
42
43static struct a6xx_protected_regs {
44 unsigned int base;
45 unsigned int count;
46 int read_protect;
47} a6xx_protected_regs_group[] = {
48 { 0x600, 0x51, 0 },
49 { 0xAE50, 0x2, 1 },
50 { 0x9624, 0x13, 1 },
51 { 0x8630, 0x8, 1 },
52 { 0x9E70, 0x1, 1 },
53 { 0x9E78, 0x187, 1 },
54 { 0xF000, 0x810, 1 },
55 { 0xFC00, 0x3, 0 },
56 { 0x50E, 0x0, 1 },
57 { 0x50F, 0x0, 0 },
58 { 0x510, 0x0, 1 },
59 { 0x0, 0x4F9, 0 },
60 { 0x501, 0xA, 0 },
61 { 0x511, 0x44, 0 },
62 { 0xE00, 0xE, 1 },
63 { 0x8E00, 0x0, 1 },
64 { 0x8E50, 0xF, 1 },
65 { 0xBE02, 0x0, 1 },
66 { 0xBE20, 0x11F3, 1 },
67 { 0x800, 0x82, 1 },
68 { 0x8A0, 0x8, 1 },
69 { 0x8AB, 0x19, 1 },
70 { 0x900, 0x4D, 1 },
71 { 0x98D, 0x76, 1 },
72 { 0x8D0, 0x23, 0 },
73 { 0x980, 0x4, 0 },
74 { 0xA630, 0x0, 1 },
75};
76
77/* Print some key registers if a spin-for-idle times out */
78static void spin_idle_debug(struct kgsl_device *device,
79 const char *str)
80{
81 unsigned int rptr, wptr;
82 unsigned int status, status3, intstatus;
83 unsigned int hwfault;
84
85 dev_err(device->dev, str);
86
87 kgsl_regread(device, A6XX_CP_RB_RPTR, &rptr);
88 kgsl_regread(device, A6XX_CP_RB_WPTR, &wptr);
89
90 kgsl_regread(device, A6XX_RBBM_STATUS, &status);
91 kgsl_regread(device, A6XX_RBBM_STATUS3, &status3);
92 kgsl_regread(device, A6XX_RBBM_INT_0_STATUS, &intstatus);
93 kgsl_regread(device, A6XX_CP_HW_FAULT, &hwfault);
94
95 dev_err(device->dev,
96 " rb=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
97 rptr, wptr, status, status3, intstatus);
98 dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
99}
100
101static void a6xx_platform_setup(struct adreno_device *adreno_dev)
102{
103 uint64_t addr;
104
105 /* Calculate SP local and private mem addresses */
106 addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
107 adreno_dev->sp_local_gpuaddr = addr;
108 adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
109}
110
111/**
112 * a6xx_protect_init() - Initializes register protection on a6xx
113 * @device: Pointer to the device structure
114 * Performs register writes to enable protected access to sensitive
115 * registers
116 */
117static void a6xx_protect_init(struct adreno_device *adreno_dev)
118{
119 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
120 int i;
121
122 /* enable access protection to privileged registers */
123 kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000007);
124
125 if (ARRAY_SIZE(a6xx_protected_regs_group) >
126 adreno_dev->gpucore->num_protected_regs)
127 WARN(1, "Size exceeds the num of protection regs available\n");
128
129 for (i = 0; i < ARRAY_SIZE(a6xx_protected_regs_group); i++) {
130 struct a6xx_protected_regs *regs =
131 &a6xx_protected_regs_group[i];
132
133 kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
134 regs->base | (regs->count << 18) |
135 (regs->read_protect << 31));
136 }
137
138}
139
140static void a6xx_enable_64bit(struct adreno_device *adreno_dev)
141{
142 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
143
144 kgsl_regwrite(device, A6XX_CP_ADDR_MODE_CNTL, 0x1);
145 kgsl_regwrite(device, A6XX_VSC_ADDR_MODE_CNTL, 0x1);
146 kgsl_regwrite(device, A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
147 kgsl_regwrite(device, A6XX_RB_ADDR_MODE_CNTL, 0x1);
148 kgsl_regwrite(device, A6XX_PC_ADDR_MODE_CNTL, 0x1);
149 kgsl_regwrite(device, A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
150 kgsl_regwrite(device, A6XX_VFD_ADDR_MODE_CNTL, 0x1);
151 kgsl_regwrite(device, A6XX_VPC_ADDR_MODE_CNTL, 0x1);
152 kgsl_regwrite(device, A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
153 kgsl_regwrite(device, A6XX_SP_ADDR_MODE_CNTL, 0x1);
154 kgsl_regwrite(device, A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
155 kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
156}
157
158/*
159 * a6xx_start() - Device start
160 * @adreno_dev: Pointer to adreno device
161 *
162 * a6xx device start
163 */
164static void a6xx_start(struct adreno_device *adreno_dev)
165{
166 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
167 unsigned int bit, mal, mode;
168 unsigned int amsbc = 0;
169
170 adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
171 ARRAY_SIZE(a6xx_vbif_platforms));
172 /*
173 * Set UCHE_WRITE_THRU_BASE to the UCHE_TRAP_BASE effectively
174 * disabling L2 bypass
175 */
176 kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
177 kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
178 kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
179 kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
180 kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
181 kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
182
183 /* Program the GMEM VA range for the UCHE path */
184 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
185 ADRENO_UCHE_GMEM_BASE);
186 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
187 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
188 ADRENO_UCHE_GMEM_BASE +
189 adreno_dev->gmem_size - 1);
190 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
191
192 kgsl_regwrite(device, A6XX_UCHE_FILTER_CNTL, 0x804);
193 kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4);
194
195 kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0);
196 kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
197
198 /* Setting the mem pool size */
199 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 128);
200
201 /* Setting the primFifo thresholds default values */
202 kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
203
204 /* Disable secured mode */
205 kgsl_regwrite(device, A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
206
207 /* Set the AHB default slave response to "ERROR" */
208 kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);
209
210 if (of_property_read_u32(device->pdev->dev.of_node,
211 "qcom,highest-bank-bit", &bit))
212 bit = MIN_HBB;
213
214 if (of_property_read_u32(device->pdev->dev.of_node,
215 "qcom,min-access-length", &mal))
216 mal = 32;
217
218 if (of_property_read_u32(device->pdev->dev.of_node,
219 "qcom,ubwc-mode", &mode))
220 mode = 0;
221
222 switch (mode) {
223 case KGSL_UBWC_1_0:
224 mode = 1;
225 break;
226 case KGSL_UBWC_2_0:
227 mode = 0;
228 break;
229 case KGSL_UBWC_3_0:
230 mode = 0;
231 amsbc = 1; /* Only valid for A640 and A680 */
232 break;
233 default:
234 break;
235 }
236
237 if (bit >= 13 && bit <= 16)
238 bit = (bit - 13) & 0x03;
239 else
240 bit = 0;
241
242 mal = (mal == 64) ? 1 : 0;
243
244 kgsl_regwrite(device, A6XX_RB_NC_MODE_CNTL, (amsbc << 4) | (mal << 3) |
245 (bit << 1) | mode);
246 kgsl_regwrite(device, A6XX_TPL1_NC_MODE_CNTL, (mal << 3) |
247 (bit << 1) | mode);
248 kgsl_regwrite(device, A6XX_SP_NC_MODE_CNTL, (mal << 3) | (bit << 1) |
249 mode);
250
251 /* (1 << 29)globalInvFlushFilterDis bit needs to be set for A630 V1 */
252 kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (1 << 29) | (mal << 23) |
253 (bit << 21));
254
255 kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
256 (1 << 30) | 0x4000);
257
258 a6xx_protect_init(adreno_dev);
259}
260
261/*
262 * a6xx_microcode_load() - Load microcode
263 * @adreno_dev: Pointer to adreno device
264 */
265static int a6xx_microcode_load(struct adreno_device *adreno_dev)
266{
267 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
268 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
269 uint64_t gpuaddr;
270
271 gpuaddr = fw->memdesc.gpuaddr;
272 kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
273 lower_32_bits(gpuaddr));
274 kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
275 upper_32_bits(gpuaddr));
276
277 return 0;
278}
279
280
281/*
282 * CP_INIT_MAX_CONTEXT bit tells if the multiple hardware contexts can
283 * be used at once of if they should be serialized
284 */
285#define CP_INIT_MAX_CONTEXT BIT(0)
286
287/* Enables register protection mode */
288#define CP_INIT_ERROR_DETECTION_CONTROL BIT(1)
289
290/* Header dump information */
291#define CP_INIT_HEADER_DUMP BIT(2) /* Reserved */
292
293/* Default Reset states enabled for PFP and ME */
294#define CP_INIT_DEFAULT_RESET_STATE BIT(3)
295
296/* Drawcall filter range */
297#define CP_INIT_DRAWCALL_FILTER_RANGE BIT(4)
298
299/* Ucode workaround masks */
300#define CP_INIT_UCODE_WORKAROUND_MASK BIT(5)
301
302#define CP_INIT_MASK (CP_INIT_MAX_CONTEXT | \
303 CP_INIT_ERROR_DETECTION_CONTROL | \
304 CP_INIT_HEADER_DUMP | \
305 CP_INIT_DEFAULT_RESET_STATE | \
306 CP_INIT_UCODE_WORKAROUND_MASK)
307
308static void _set_ordinals(struct adreno_device *adreno_dev,
309 unsigned int *cmds, unsigned int count)
310{
311 unsigned int *start = cmds;
312
313 /* Enabled ordinal mask */
314 *cmds++ = CP_INIT_MASK;
315
316 if (CP_INIT_MASK & CP_INIT_MAX_CONTEXT)
317 *cmds++ = 0x00000003;
318
319 if (CP_INIT_MASK & CP_INIT_ERROR_DETECTION_CONTROL)
320 *cmds++ = 0x20000000;
321
322 if (CP_INIT_MASK & CP_INIT_HEADER_DUMP) {
323 /* Header dump address */
324 *cmds++ = 0x00000000;
325 /* Header dump enable and dump size */
326 *cmds++ = 0x00000000;
327 }
328
329 if (CP_INIT_MASK & CP_INIT_DRAWCALL_FILTER_RANGE) {
330 /* Start range */
331 *cmds++ = 0x00000000;
332 /* End range (inclusive) */
333 *cmds++ = 0x00000000;
334 }
335
336 if (CP_INIT_MASK & CP_INIT_UCODE_WORKAROUND_MASK)
337 *cmds++ = 0x00000000;
338
339 /* Pad rest of the cmds with 0's */
340 while ((unsigned int)(cmds - start) < count)
341 *cmds++ = 0x0;
342}
343
344/*
345 * a6xx_send_cp_init() - Initialize ringbuffer
346 * @adreno_dev: Pointer to adreno device
347 * @rb: Pointer to the ringbuffer of device
348 *
349 * Submit commands for ME initialization,
350 */
351static int a6xx_send_cp_init(struct adreno_device *adreno_dev,
352 struct adreno_ringbuffer *rb)
353{
354 unsigned int *cmds;
355 int ret;
356
357 cmds = adreno_ringbuffer_allocspace(rb, 9);
358 if (IS_ERR(cmds))
359 return PTR_ERR(cmds);
360
361 *cmds++ = cp_type7_packet(CP_ME_INIT, 8);
362
363 _set_ordinals(adreno_dev, cmds, 8);
364
365 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
366 if (ret)
367 spin_idle_debug(KGSL_DEVICE(adreno_dev),
368 "CP initialization failed to idle\n");
369
370 return ret;
371}
372
373/*
374 * a6xx_rb_start() - Start the ringbuffer
375 * @adreno_dev: Pointer to adreno device
376 * @start_type: Warm or cold start
377 */
378static int a6xx_rb_start(struct adreno_device *adreno_dev,
379 unsigned int start_type)
380{
381 struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
382 struct kgsl_device *device = &adreno_dev->dev;
383 uint64_t addr;
384 int ret;
385
386 addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
387
388 adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
389 ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
390
391 /*
392 * The size of the ringbuffer in the hardware is the log2
393 * representation of the size in quadwords (sizedwords / 2).
394 */
395 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
396 A6XX_CP_RB_CNTL_DEFAULT);
397
398 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
399 rb->buffer_desc.gpuaddr);
400
401 ret = a6xx_microcode_load(adreno_dev);
402 if (ret)
403 return ret;
404
405 /* Clear the SQE_HALT to start the CP engine */
406 kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
407
408 return a6xx_send_cp_init(adreno_dev, rb);
409}
410
411static int _load_firmware(struct kgsl_device *device, const char *fwfile,
412 struct adreno_firmware *firmware)
413{
414 const struct firmware *fw = NULL;
415 int ret;
416
417 ret = request_firmware(&fw, fwfile, device->dev);
418
419 if (ret) {
420 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
421 fwfile, ret);
422 return ret;
423 }
424
425 ret = kgsl_allocate_global(device, &firmware->memdesc, fw->size - 4,
426 KGSL_MEMFLAGS_GPUREADONLY, 0, "ucode");
427
428 if (!ret) {
429 memcpy(firmware->memdesc.hostptr, &fw->data[4], fw->size - 4);
430 firmware->size = (fw->size - 4) / sizeof(uint32_t);
431 firmware->version = *(unsigned int *)&fw->data[4];
432 }
433
434 release_firmware(fw);
435
436 return ret;
437}
438
439#define SPTPRAC_POWER_CONTROL_OFFSET 0x204
440#define SPTPRAC_PWR_CLK_STATUS_OFFSET 0x14340
441#define SPTPRAC_POWERON_CTRL_MASK 0x00778000
442#define SPTPRAC_POWEROFF_CTRL_MASK 0x00778001
443#define SPTPRAC_POWERON_STATUS_MASK BIT(3)
444#define SPTPRAC_CTRL_TIMEOUT 10 /* ms */
445
446static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
447{
448 void __iomem *gmu_reg;
449 unsigned long t;
450 unsigned int val;
451 int ret;
452
453 gmu_reg = ioremap(0x506a000, 0x26000);
454
455 __raw_writel(SPTPRAC_POWERON_CTRL_MASK,
456 gmu_reg + SPTPRAC_POWER_CONTROL_OFFSET);
457
458 /* Make sure the above write is observed before the reads below */
459 wmb();
460
461 t = jiffies + msecs_to_jiffies(SPTPRAC_CTRL_TIMEOUT);
462
463 ret = -EINVAL;
464 while (!time_after(jiffies, t)) {
465 val = __raw_readl(gmu_reg + SPTPRAC_PWR_CLK_STATUS_OFFSET);
466 /*
467 * Make sure the above read completes before polling the
468 * register again
469 */
470 rmb();
471
472 if ((val & SPTPRAC_POWERON_STATUS_MASK) ==
473 SPTPRAC_POWERON_STATUS_MASK) {
474 ret = 0;
475 break;
476 }
477 cpu_relax();
478 }
479
480 iounmap(gmu_reg);
481
482 return ret;
483}
484
485static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
486{
487 void __iomem *gmu_reg;
488
489 gmu_reg = ioremap(0x506a000, 0x26000);
490
491 __raw_writel(SPTPRAC_POWEROFF_CTRL_MASK,
492 gmu_reg + SPTPRAC_POWER_CONTROL_OFFSET);
493 /* Make sure the above write posts before moving on */
494 wmb();
495
496 iounmap(gmu_reg);
497}
498
499/*
500 * a6xx_microcode_read() - Read microcode
501 * @adreno_dev: Pointer to adreno device
502 */
503static int a6xx_microcode_read(struct adreno_device *adreno_dev)
504{
505 return _load_firmware(KGSL_DEVICE(adreno_dev),
506 adreno_dev->gpucore->sqefw_name,
507 ADRENO_FW(adreno_dev, ADRENO_FW_SQE));
508}
509
510static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
511{
512 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
513 unsigned int status1, status2;
514
515 kgsl_regread(device, A6XX_CP_INTERRUPT_STATUS, &status1);
516
517 if (status1 & BIT(A6XX_CP_OPCODE_ERROR))
518 KGSL_DRV_CRIT_RATELIMIT(device, "CP opcode error interrupt\n");
519 if (status1 & BIT(A6XX_CP_UCODE_ERROR))
520 KGSL_DRV_CRIT_RATELIMIT(device, "CP ucode error interrupt\n");
521 if (status1 & BIT(A6XX_CP_HW_FAULT_ERROR)) {
522 kgsl_regread(device, A6XX_CP_HW_FAULT, &status2);
523 KGSL_DRV_CRIT_RATELIMIT(device,
524 "CP | Ringbuffer HW fault | status=%x\n",
525 status2);
526 }
527 if (status1 & BIT(A6XX_CP_REGISTER_PROTECTION_ERROR)) {
528 kgsl_regread(device, A6XX_CP_PROTECT_STATUS, &status2);
529 KGSL_DRV_CRIT_RATELIMIT(device,
530 "CP | Protected mode error | %s | addr=%x | status=%x\n",
531 status2 & (1 << 20) ? "READ" : "WRITE",
532 (status2 & 0x3FFFF) >> 2, status2);
533 }
534 if (status1 & BIT(A6XX_CP_AHB_ERROR))
535 KGSL_DRV_CRIT_RATELIMIT(device,
536 "CP AHB error interrupt\n");
537 if (status1 & BIT(A6XX_CP_VSD_PARITY_ERROR))
538 KGSL_DRV_CRIT_RATELIMIT(device,
539 "CP VSD decoder parity error\n");
540 if (status1 & BIT(A6XX_CP_ILLEGAL_INSTR_ERROR))
541 KGSL_DRV_CRIT_RATELIMIT(device,
542 "CP Illegal instruction error\n");
543
544}
545
546static void a6xx_err_callback(struct adreno_device *adreno_dev, int bit)
547{
548 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
549
550 switch (bit) {
551 case A6XX_INT_CP_AHB_ERROR:
552 KGSL_DRV_CRIT_RATELIMIT(device, "CP: AHB bus error\n");
553 break;
554 case A6XX_INT_ATB_ASYNCFIFO_OVERFLOW:
555 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB ASYNC overflow\n");
556 break;
557 case A6XX_INT_RBBM_ATB_BUS_OVERFLOW:
558 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB bus overflow\n");
559 break;
560 case A6XX_INT_UCHE_OOB_ACCESS:
561 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Out of bounds access\n");
562 break;
563 case A6XX_INT_UCHE_TRAP_INTR:
564 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Trap interrupt\n");
565 break;
566 default:
567 KGSL_DRV_CRIT_RATELIMIT(device, "Unknown interrupt %d\n", bit);
568 }
569}
570
571#define A6XX_INT_MASK \
572 ((1 << A6XX_INT_CP_AHB_ERROR) | \
573 (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \
574 (1 << A6XX_INT_RBBM_GPC_ERROR) | \
575 (1 << A6XX_INT_CP_SW) | \
576 (1 << A6XX_INT_CP_HW_ERROR) | \
577 (1 << A6XX_INT_CP_IB2) | \
578 (1 << A6XX_INT_CP_IB1) | \
579 (1 << A6XX_INT_CP_RB) | \
580 (1 << A6XX_INT_CP_CACHE_FLUSH_TS) | \
581 (1 << A6XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
582 (1 << A6XX_INT_RBBM_HANG_DETECT) | \
583 (1 << A6XX_INT_UCHE_OOB_ACCESS) | \
584 (1 << A6XX_INT_UCHE_TRAP_INTR))
585
586static struct adreno_irq_funcs a6xx_irq_funcs[32] = {
587 ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
588 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 1 - RBBM_AHB_ERROR */
589 ADRENO_IRQ_CALLBACK(NULL), /* 2 - UNUSED */
590 ADRENO_IRQ_CALLBACK(NULL), /* 3 - UNUSED */
591 ADRENO_IRQ_CALLBACK(NULL), /* 4 - UNUSED */
592 ADRENO_IRQ_CALLBACK(NULL), /* 5 - UNUSED */
593 /* 6 - RBBM_ATB_ASYNC_OVERFLOW */
594 ADRENO_IRQ_CALLBACK(a6xx_err_callback),
595 ADRENO_IRQ_CALLBACK(NULL), /* 7 - GPC_ERR */
596 ADRENO_IRQ_CALLBACK(NULL),/* 8 - CP_SW */
597 ADRENO_IRQ_CALLBACK(a6xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
598 ADRENO_IRQ_CALLBACK(NULL), /* 10 - CP_CCU_FLUSH_DEPTH_TS */
599 ADRENO_IRQ_CALLBACK(NULL), /* 11 - CP_CCU_FLUSH_COLOR_TS */
600 ADRENO_IRQ_CALLBACK(NULL), /* 12 - CP_CCU_RESOLVE_TS */
601 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 13 - CP_IB2_INT */
602 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 14 - CP_IB1_INT */
603 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 15 - CP_RB_INT */
604 ADRENO_IRQ_CALLBACK(NULL), /* 16 - UNUSED */
605 ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
606 ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_WT_DONE_TS */
607 ADRENO_IRQ_CALLBACK(NULL), /* 19 - UNUSED */
608 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
609 ADRENO_IRQ_CALLBACK(NULL), /* 21 - UNUSED */
610 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 22 - RBBM_ATB_BUS_OVERFLOW */
611 /* 23 - MISC_HANG_DETECT */
612 ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
613 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 24 - UCHE_OOB_ACCESS */
614 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 25 - UCHE_TRAP_INTR */
615 ADRENO_IRQ_CALLBACK(NULL), /* 26 - DEBBUS_INTR_0 */
616 ADRENO_IRQ_CALLBACK(NULL), /* 27 - DEBBUS_INTR_1 */
617 ADRENO_IRQ_CALLBACK(NULL), /* 28 - UNUSED */
618 ADRENO_IRQ_CALLBACK(NULL), /* 29 - UNUSED */
619 ADRENO_IRQ_CALLBACK(NULL), /* 30 - ISDB_CPU_IRQ */
620 ADRENO_IRQ_CALLBACK(NULL), /* 31 - ISDB_UNDER_DEBUG */
621};
622
623static struct adreno_irq a6xx_irq = {
624 .funcs = a6xx_irq_funcs,
625 .mask = A6XX_INT_MASK,
626};
627
628/* Register offset defines for A6XX, in order of enum adreno_regs */
629static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
630
631 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A6XX_CP_RB_BASE),
632 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
633 A6XX_CP_RB_RPTR_ADDR_LO),
634 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
635 A6XX_CP_RB_RPTR_ADDR_HI),
636 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A6XX_CP_RB_RPTR),
637 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A6XX_CP_RB_WPTR),
638 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A6XX_CP_RB_CNTL),
639 ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A6XX_CP_MISC_CNTL),
640 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
641 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
642
643 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A6XX_RBBM_INT_0_MASK),
644 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A6XX_RBBM_INT_0_STATUS),
645 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A6XX_RBBM_CLOCK_CNTL),
646 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
647 A6XX_RBBM_INT_CLEAR_CMD),
648 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A6XX_RBBM_SW_RESET_CMD),
649 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD,
650 A6XX_RBBM_BLOCK_SW_RESET_CMD),
651 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
652 A6XX_RBBM_BLOCK_SW_RESET_CMD2),
653 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
654 A6XX_CP_ALWAYS_ON_COUNTER_LO),
655 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
656 A6XX_CP_ALWAYS_ON_COUNTER_HI),
657 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION, A6XX_VBIF_VERSION),
658 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
659 A6XX_VBIF_XIN_HALT_CTRL0),
660 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
661 A6XX_VBIF_XIN_HALT_CTRL1),
662
663};
664
665static const struct adreno_reg_offsets a6xx_reg_offsets = {
666 .offsets = a6xx_register_offsets,
667 .offset_0 = ADRENO_REG_REGISTER_MAX,
668};
669
670struct adreno_gpudev adreno_a6xx_gpudev = {
671 .reg_offsets = &a6xx_reg_offsets,
672 .start = a6xx_start,
673 .irq = &a6xx_irq,
674 .irq_trace = trace_kgsl_a5xx_irq_status,
675 .num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
676 .platform_setup = a6xx_platform_setup,
677 .rb_start = a6xx_rb_start,
678 .regulator_enable = a6xx_sptprac_enable,
679 .regulator_disable = a6xx_sptprac_disable,
680 .microcode_read = a6xx_microcode_read,
681 .enable_64bit = a6xx_enable_64bit,
682};