blob: 7043d330c9759b05980a5f7f14929944962aa516 [file] [log] [blame]
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
Carter Cooper4a313ae2017-02-23 11:11:56 -070014#include <soc/qcom/subsystem_restart.h>
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070015#include <linux/pm_opp.h>
16
17#include "adreno.h"
18#include "a6xx_reg.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080019#include "adreno_a6xx.h"
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070020#include "adreno_cp_parser.h"
21#include "adreno_trace.h"
22#include "adreno_pm4types.h"
23#include "adreno_perfcounter.h"
24#include "adreno_ringbuffer.h"
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060025#include "adreno_llc.h"
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070026#include "kgsl_sharedmem.h"
27#include "kgsl_log.h"
28#include "kgsl.h"
Kyle Pieferb1027b02017-02-10 13:58:58 -080029#include "kgsl_gmu.h"
30#include "kgsl_trace.h"
31
32#define OOB_REQUEST_TIMEOUT 10 /* ms */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070033
34#define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
35 (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
36
37#define MIN_HBB 13
38
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060039#define A6XX_LLC_NUM_GPU_SCIDS 5
40#define A6XX_GPU_LLC_SCID_NUM_BITS 5
41#define A6XX_GPU_LLC_SCID_MASK \
42 ((1 << (A6XX_LLC_NUM_GPU_SCIDS * A6XX_GPU_LLC_SCID_NUM_BITS)) - 1)
Sushmita Susheelendra906564d2017-01-10 15:53:55 -070043#define A6XX_GPUHTW_LLC_SCID_SHIFT 25
44#define A6XX_GPUHTW_LLC_SCID_MASK \
45 (((1 << A6XX_GPU_LLC_SCID_NUM_BITS) - 1) << A6XX_GPUHTW_LLC_SCID_SHIFT)
46
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060047#define A6XX_GPU_CX_REG_BASE 0x509E000
48#define A6XX_GPU_CX_REG_SIZE 0x1000
49
Kyle Pieferb1027b02017-02-10 13:58:58 -080050static int _load_gmu_firmware(struct kgsl_device *device);
51
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070052static const struct adreno_vbif_data a630_vbif[] = {
53 {A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009},
54 {A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
55 {0, 0},
56};
57
58static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
59 { adreno_is_a630, a630_vbif },
60};
61
62static struct a6xx_protected_regs {
63 unsigned int base;
64 unsigned int count;
65 int read_protect;
66} a6xx_protected_regs_group[] = {
67 { 0x600, 0x51, 0 },
68 { 0xAE50, 0x2, 1 },
69 { 0x9624, 0x13, 1 },
70 { 0x8630, 0x8, 1 },
71 { 0x9E70, 0x1, 1 },
72 { 0x9E78, 0x187, 1 },
73 { 0xF000, 0x810, 1 },
74 { 0xFC00, 0x3, 0 },
75 { 0x50E, 0x0, 1 },
76 { 0x50F, 0x0, 0 },
77 { 0x510, 0x0, 1 },
78 { 0x0, 0x4F9, 0 },
79 { 0x501, 0xA, 0 },
80 { 0x511, 0x44, 0 },
81 { 0xE00, 0xE, 1 },
82 { 0x8E00, 0x0, 1 },
83 { 0x8E50, 0xF, 1 },
84 { 0xBE02, 0x0, 1 },
85 { 0xBE20, 0x11F3, 1 },
86 { 0x800, 0x82, 1 },
87 { 0x8A0, 0x8, 1 },
88 { 0x8AB, 0x19, 1 },
89 { 0x900, 0x4D, 1 },
90 { 0x98D, 0x76, 1 },
91 { 0x8D0, 0x23, 0 },
92 { 0x980, 0x4, 0 },
93 { 0xA630, 0x0, 1 },
94};
95
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070096static void a6xx_platform_setup(struct adreno_device *adreno_dev)
97{
98 uint64_t addr;
99
100 /* Calculate SP local and private mem addresses */
101 addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
102 adreno_dev->sp_local_gpuaddr = addr;
103 adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
104}
105
Shrenuj Bansal41665402016-12-16 15:25:54 -0800106static void a6xx_init(struct adreno_device *adreno_dev)
107{
108 a6xx_crashdump_init(adreno_dev);
109}
110
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700111/**
112 * a6xx_protect_init() - Initializes register protection on a6xx
113 * @device: Pointer to the device structure
114 * Performs register writes to enable protected access to sensitive
115 * registers
116 */
117static void a6xx_protect_init(struct adreno_device *adreno_dev)
118{
119 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Tarun Karra9f945502017-03-23 12:28:03 -0700120 struct kgsl_protected_registers *mmu_prot =
121 kgsl_mmu_get_prot_regs(&device->mmu);
122 int i, num_sets;
123 int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
124 int max_sets = adreno_dev->gpucore->num_protected_regs;
125 unsigned int mmu_base = 0, mmu_range = 0, cur_range;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700126
127 /* enable access protection to privileged registers */
128 kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000007);
129
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530130 if (mmu_prot) {
131 mmu_base = mmu_prot->base;
132 mmu_range = 1 << mmu_prot->range;
Tarun Karra9f945502017-03-23 12:28:03 -0700133 req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530134 }
135
Tarun Karra9f945502017-03-23 12:28:03 -0700136 if (req_sets > max_sets)
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530137 WARN(1, "Size exceeds the num of protection regs available\n");
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530138
Tarun Karra9f945502017-03-23 12:28:03 -0700139 /* Protect GPU registers */
140 num_sets = min_t(unsigned int,
141 ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
142 for (i = 0; i < num_sets; i++) {
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700143 struct a6xx_protected_regs *regs =
144 &a6xx_protected_regs_group[i];
145
146 kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
147 regs->base | (regs->count << 18) |
148 (regs->read_protect << 31));
149 }
150
Tarun Karra9f945502017-03-23 12:28:03 -0700151 /* Protect MMU registers */
152 if (mmu_prot) {
153 while ((i < max_sets) && (mmu_range > 0)) {
154 cur_range = min_t(unsigned int, mmu_range,
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530155 0x2000);
Tarun Karra9f945502017-03-23 12:28:03 -0700156 kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
157 mmu_base | ((cur_range - 1) << 18) | (1 << 31));
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530158
Tarun Karra9f945502017-03-23 12:28:03 -0700159 mmu_base += cur_range;
160 mmu_range -= cur_range;
161 i++;
162 }
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530163 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700164}
165
166static void a6xx_enable_64bit(struct adreno_device *adreno_dev)
167{
168 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
169
170 kgsl_regwrite(device, A6XX_CP_ADDR_MODE_CNTL, 0x1);
171 kgsl_regwrite(device, A6XX_VSC_ADDR_MODE_CNTL, 0x1);
172 kgsl_regwrite(device, A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
173 kgsl_regwrite(device, A6XX_RB_ADDR_MODE_CNTL, 0x1);
174 kgsl_regwrite(device, A6XX_PC_ADDR_MODE_CNTL, 0x1);
175 kgsl_regwrite(device, A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
176 kgsl_regwrite(device, A6XX_VFD_ADDR_MODE_CNTL, 0x1);
177 kgsl_regwrite(device, A6XX_VPC_ADDR_MODE_CNTL, 0x1);
178 kgsl_regwrite(device, A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
179 kgsl_regwrite(device, A6XX_SP_ADDR_MODE_CNTL, 0x1);
180 kgsl_regwrite(device, A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
181 kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
182}
183
184/*
185 * a6xx_start() - Device start
186 * @adreno_dev: Pointer to adreno device
187 *
188 * a6xx device start
189 */
190static void a6xx_start(struct adreno_device *adreno_dev)
191{
192 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700193 unsigned int bit, mal, mode, glbl_inv;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700194 unsigned int amsbc = 0;
195
Oleg Perelet62d5cec2017-03-27 16:14:52 -0700196 /* runtime adjust callbacks based on feature sets */
197 if (!kgsl_gmu_isenabled(device))
198 /* Legacy idle management if gmu is disabled */
199 ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
200
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700201 adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
202 ARRAY_SIZE(a6xx_vbif_platforms));
203 /*
204 * Set UCHE_WRITE_THRU_BASE to the UCHE_TRAP_BASE effectively
205 * disabling L2 bypass
206 */
207 kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
208 kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
209 kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
210 kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
211 kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
212 kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
213
214 /* Program the GMEM VA range for the UCHE path */
215 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
216 ADRENO_UCHE_GMEM_BASE);
217 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
218 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
219 ADRENO_UCHE_GMEM_BASE +
220 adreno_dev->gmem_size - 1);
221 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
222
223 kgsl_regwrite(device, A6XX_UCHE_FILTER_CNTL, 0x804);
224 kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4);
225
226 kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0);
227 kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
228
229 /* Setting the mem pool size */
230 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 128);
231
232 /* Setting the primFifo thresholds default values */
233 kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
234
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700235 /* Set the AHB default slave response to "ERROR" */
236 kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);
237
238 if (of_property_read_u32(device->pdev->dev.of_node,
239 "qcom,highest-bank-bit", &bit))
240 bit = MIN_HBB;
241
242 if (of_property_read_u32(device->pdev->dev.of_node,
243 "qcom,min-access-length", &mal))
244 mal = 32;
245
246 if (of_property_read_u32(device->pdev->dev.of_node,
247 "qcom,ubwc-mode", &mode))
248 mode = 0;
249
250 switch (mode) {
251 case KGSL_UBWC_1_0:
252 mode = 1;
253 break;
254 case KGSL_UBWC_2_0:
255 mode = 0;
256 break;
257 case KGSL_UBWC_3_0:
258 mode = 0;
259 amsbc = 1; /* Only valid for A640 and A680 */
260 break;
261 default:
262 break;
263 }
264
265 if (bit >= 13 && bit <= 16)
266 bit = (bit - 13) & 0x03;
267 else
268 bit = 0;
269
270 mal = (mal == 64) ? 1 : 0;
271
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700272 /* (1 << 29)globalInvFlushFilterDis bit needs to be set for A630 V1 */
273 glbl_inv = (adreno_is_a630v1(adreno_dev)) ? 1 : 0;
274
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700275 kgsl_regwrite(device, A6XX_RB_NC_MODE_CNTL, (amsbc << 4) | (mal << 3) |
276 (bit << 1) | mode);
277 kgsl_regwrite(device, A6XX_TPL1_NC_MODE_CNTL, (mal << 3) |
278 (bit << 1) | mode);
279 kgsl_regwrite(device, A6XX_SP_NC_MODE_CNTL, (mal << 3) | (bit << 1) |
280 mode);
281
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700282 kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (glbl_inv << 29) |
283 (mal << 23) | (bit << 21));
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700284
285 kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
286 (1 << 30) | 0x4000);
287
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530288 kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
289
Lynus Vaz85c8cee2017-03-07 11:31:02 +0530290 /* Set TWOPASSUSEWFI in A6XX_PC_DBG_ECO_CNTL if requested */
291 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
292 kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
293
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700294 a6xx_protect_init(adreno_dev);
295}
296
297/*
298 * a6xx_microcode_load() - Load microcode
299 * @adreno_dev: Pointer to adreno device
300 */
301static int a6xx_microcode_load(struct adreno_device *adreno_dev)
302{
303 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
304 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
305 uint64_t gpuaddr;
Carter Cooper4a313ae2017-02-23 11:11:56 -0700306 static void *zap;
307 int ret = 0;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700308
309 gpuaddr = fw->memdesc.gpuaddr;
310 kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
311 lower_32_bits(gpuaddr));
312 kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
313 upper_32_bits(gpuaddr));
314
Carter Cooper4a313ae2017-02-23 11:11:56 -0700315 /* Load the zap shader firmware through PIL if its available */
316 if (adreno_dev->gpucore->zap_name && !zap) {
317 zap = subsystem_get(adreno_dev->gpucore->zap_name);
318
319 /* Return error if the zap shader cannot be loaded */
320 if (IS_ERR_OR_NULL(zap)) {
321 ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
322 zap = NULL;
323 }
324 }
325
326 return ret;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700327}
328
329
330/*
331 * CP_INIT_MAX_CONTEXT bit tells if the multiple hardware contexts can
332 * be used at once of if they should be serialized
333 */
334#define CP_INIT_MAX_CONTEXT BIT(0)
335
336/* Enables register protection mode */
337#define CP_INIT_ERROR_DETECTION_CONTROL BIT(1)
338
339/* Header dump information */
340#define CP_INIT_HEADER_DUMP BIT(2) /* Reserved */
341
342/* Default Reset states enabled for PFP and ME */
343#define CP_INIT_DEFAULT_RESET_STATE BIT(3)
344
345/* Drawcall filter range */
346#define CP_INIT_DRAWCALL_FILTER_RANGE BIT(4)
347
348/* Ucode workaround masks */
349#define CP_INIT_UCODE_WORKAROUND_MASK BIT(5)
350
351#define CP_INIT_MASK (CP_INIT_MAX_CONTEXT | \
352 CP_INIT_ERROR_DETECTION_CONTROL | \
353 CP_INIT_HEADER_DUMP | \
354 CP_INIT_DEFAULT_RESET_STATE | \
355 CP_INIT_UCODE_WORKAROUND_MASK)
356
357static void _set_ordinals(struct adreno_device *adreno_dev,
358 unsigned int *cmds, unsigned int count)
359{
360 unsigned int *start = cmds;
361
362 /* Enabled ordinal mask */
363 *cmds++ = CP_INIT_MASK;
364
365 if (CP_INIT_MASK & CP_INIT_MAX_CONTEXT)
366 *cmds++ = 0x00000003;
367
368 if (CP_INIT_MASK & CP_INIT_ERROR_DETECTION_CONTROL)
369 *cmds++ = 0x20000000;
370
371 if (CP_INIT_MASK & CP_INIT_HEADER_DUMP) {
372 /* Header dump address */
373 *cmds++ = 0x00000000;
374 /* Header dump enable and dump size */
375 *cmds++ = 0x00000000;
376 }
377
378 if (CP_INIT_MASK & CP_INIT_DRAWCALL_FILTER_RANGE) {
379 /* Start range */
380 *cmds++ = 0x00000000;
381 /* End range (inclusive) */
382 *cmds++ = 0x00000000;
383 }
384
385 if (CP_INIT_MASK & CP_INIT_UCODE_WORKAROUND_MASK)
386 *cmds++ = 0x00000000;
387
388 /* Pad rest of the cmds with 0's */
389 while ((unsigned int)(cmds - start) < count)
390 *cmds++ = 0x0;
391}
392
393/*
394 * a6xx_send_cp_init() - Initialize ringbuffer
395 * @adreno_dev: Pointer to adreno device
396 * @rb: Pointer to the ringbuffer of device
397 *
398 * Submit commands for ME initialization,
399 */
400static int a6xx_send_cp_init(struct adreno_device *adreno_dev,
401 struct adreno_ringbuffer *rb)
402{
403 unsigned int *cmds;
404 int ret;
405
406 cmds = adreno_ringbuffer_allocspace(rb, 9);
407 if (IS_ERR(cmds))
408 return PTR_ERR(cmds);
409
410 *cmds++ = cp_type7_packet(CP_ME_INIT, 8);
411
412 _set_ordinals(adreno_dev, cmds, 8);
413
414 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
415 if (ret)
Carter Cooper8567af02017-03-15 14:22:03 -0600416 adreno_spin_idle_debug(adreno_dev,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700417 "CP initialization failed to idle\n");
418
419 return ret;
420}
421
422/*
423 * a6xx_rb_start() - Start the ringbuffer
424 * @adreno_dev: Pointer to adreno device
425 * @start_type: Warm or cold start
426 */
427static int a6xx_rb_start(struct adreno_device *adreno_dev,
428 unsigned int start_type)
429{
430 struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
431 struct kgsl_device *device = &adreno_dev->dev;
432 uint64_t addr;
433 int ret;
434
435 addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
436
437 adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
438 ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
439
440 /*
441 * The size of the ringbuffer in the hardware is the log2
442 * representation of the size in quadwords (sizedwords / 2).
443 */
444 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
445 A6XX_CP_RB_CNTL_DEFAULT);
446
447 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
448 rb->buffer_desc.gpuaddr);
449
450 ret = a6xx_microcode_load(adreno_dev);
451 if (ret)
452 return ret;
453
454 /* Clear the SQE_HALT to start the CP engine */
455 kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
456
Carter Cooper4a313ae2017-02-23 11:11:56 -0700457 ret = a6xx_send_cp_init(adreno_dev, rb);
458 if (ret)
459 return ret;
460
461 /* GPU comes up in secured mode, make it unsecured by default */
462 return adreno_set_unsecured_mode(adreno_dev, rb);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700463}
464
465static int _load_firmware(struct kgsl_device *device, const char *fwfile,
466 struct adreno_firmware *firmware)
467{
468 const struct firmware *fw = NULL;
469 int ret;
470
471 ret = request_firmware(&fw, fwfile, device->dev);
472
473 if (ret) {
474 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
475 fwfile, ret);
476 return ret;
477 }
478
479 ret = kgsl_allocate_global(device, &firmware->memdesc, fw->size - 4,
480 KGSL_MEMFLAGS_GPUREADONLY, 0, "ucode");
481
482 if (!ret) {
483 memcpy(firmware->memdesc.hostptr, &fw->data[4], fw->size - 4);
484 firmware->size = (fw->size - 4) / sizeof(uint32_t);
485 firmware->version = *(unsigned int *)&fw->data[4];
486 }
487
488 release_firmware(fw);
489
Kyle Pieferb1027b02017-02-10 13:58:58 -0800490 ret = _load_gmu_firmware(device);
491
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700492 return ret;
493}
494
Kyle Pieferb1027b02017-02-10 13:58:58 -0800495#define RSC_CMD_OFFSET 2
496#define PDC_CMD_OFFSET 4
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700497
Kyle Pieferb1027b02017-02-10 13:58:58 -0800498static void _regwrite(void __iomem *regbase,
499 unsigned int offsetwords, unsigned int value)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700500{
Kyle Pieferb1027b02017-02-10 13:58:58 -0800501 void __iomem *reg;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700502
Kyle Pieferb1027b02017-02-10 13:58:58 -0800503 reg = regbase + (offsetwords << 2);
504 __raw_writel(value, reg);
505}
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700506
Kyle Pieferb1027b02017-02-10 13:58:58 -0800507/*
508 * _load_gmu_rpmh_ucode() - Load the ucode into the GPU PDC/RSC blocks
509 * PDC and RSC execute GPU power on/off RPMh sequence
510 * @device: Pointer to KGSL device
511 */
512static void _load_gmu_rpmh_ucode(struct kgsl_device *device)
513{
514 struct gmu_device *gmu = &device->gmu;
515
516 /* Setup RSC PDC handshake for sleep and wakeup */
517 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
518 kgsl_gmu_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
519 kgsl_gmu_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
520 kgsl_gmu_regwrite(device,
521 A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET, 0);
522 kgsl_gmu_regwrite(device,
523 A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET, 0);
524 kgsl_gmu_regwrite(device,
525 A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET * 2,
526 0x80000000);
527 kgsl_gmu_regwrite(device,
528 A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET * 2,
529 0);
530 kgsl_gmu_regwrite(device, A6XX_RSCC_OVERRIDE_START_ADDR, 0);
531 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
532 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
533 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
534
535 /* Enable timestamp event */
536 kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
537
538 /* Load RSC sequencer uCode for sleep and wakeup */
539 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0, 0xA7A506A0);
540 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xA1E6A6E7);
541 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xA2E081E1);
542 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xE9A982E2);
543 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020E8A8);
544
545 /* Load PDC sequencer uCode for power up and power down sequence */
546 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0, 0xFFBFA1E1);
547 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 1, 0xE0A4A3A2);
548 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 2, 0xE2848382);
549 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 3, 0xFDBDE4E3);
550 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 4, 0x00002081);
551
552 /* Set TCS commands used by PDC sequence for low power modes */
553 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD_ENABLE_BANK, 7);
554 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK, 0);
555 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CONTROL, 0);
556 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD0_MSGID, 0x10108);
557 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD0_ADDR, 0x30010);
558 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD0_DATA, 1);
559 _regwrite(gmu->pdc_reg_virt,
560 PDC_GPU_TCS0_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
561 _regwrite(gmu->pdc_reg_virt,
562 PDC_GPU_TCS0_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
563 _regwrite(gmu->pdc_reg_virt,
564 PDC_GPU_TCS0_CMD0_DATA + PDC_CMD_OFFSET, 0x0);
565 _regwrite(gmu->pdc_reg_virt,
566 PDC_GPU_TCS0_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
567 _regwrite(gmu->pdc_reg_virt,
568 PDC_GPU_TCS0_CMD0_ADDR + PDC_CMD_OFFSET * 2, 0x30080);
569 _regwrite(gmu->pdc_reg_virt,
570 PDC_GPU_TCS0_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x0);
571 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
572 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
573 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CONTROL, 0);
574 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
575 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
576 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_DATA, 2);
577 _regwrite(gmu->pdc_reg_virt,
578 PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
579 _regwrite(gmu->pdc_reg_virt,
580 PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
581 _regwrite(gmu->pdc_reg_virt,
582 PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET, 0x3);
583 _regwrite(gmu->pdc_reg_virt,
584 PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
585 _regwrite(gmu->pdc_reg_virt,
586 PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET * 2, 0x30080);
587 _regwrite(gmu->pdc_reg_virt,
588 PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x3);
589
590 /* Setup GPU PDC */
591 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_START_ADDR, 0);
592 _regwrite(gmu->pdc_reg_virt, PDC_GPU_ENABLE_PDC, 0x80000001);
593
594 /* ensure no writes happen before the uCode is fully written */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700595 wmb();
Kyle Pieferb1027b02017-02-10 13:58:58 -0800596}
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700597
Kyle Pieferb1027b02017-02-10 13:58:58 -0800598#define GMU_START_TIMEOUT 10 /* ms */
599#define GPU_START_TIMEOUT 100 /* ms */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700600
Kyle Pieferb1027b02017-02-10 13:58:58 -0800601/*
602 * timed_poll_check() - polling *gmu* register at given offset until
603 * its value changed to match expected value. The function times
604 * out and returns after given duration if register is not updated
605 * as expected.
606 *
607 * @device: Pointer to KGSL device
608 * @offset: Register offset
609 * @expected_ret: expected register value that stops polling
610 * @timout: number of jiffies to abort the polling
611 * @mask: bitmask to filter register value to match expected_ret
612 */
613static int timed_poll_check(struct kgsl_device *device,
614 unsigned int offset, unsigned int expected_ret,
615 unsigned int timeout, unsigned int mask)
616{
617 unsigned long t;
618 unsigned int value;
619
620 t = jiffies + msecs_to_jiffies(timeout);
621
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700622 while (!time_after(jiffies, t)) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800623 kgsl_gmu_regread(device, offset, &value);
624 if ((value & mask) == expected_ret)
625 return 0;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700626 cpu_relax();
627 }
628
Kyle Pieferb1027b02017-02-10 13:58:58 -0800629 return -EINVAL;
630}
631
632/*
633 * a6xx_gmu_power_config() - Configure and enable GMU's low power mode
634 * setting based on ADRENO feature flags.
635 * @device: Pointer to KGSL device
636 */
637static void a6xx_gmu_power_config(struct kgsl_device *device)
638{
639 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
640 struct gmu_device *gmu = &device->gmu;
641
Kyle Pieferd3964162017-04-06 15:44:03 -0700642 /* Configure registers for idle setting. The setting is cumulative */
643 switch (gmu->idle_level) {
644 case GPU_HW_MIN_VOLT:
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700645 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
646 MIN_BW_ENABLE_MASK);
647 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_HYST_CTRL, 0,
648 MIN_BW_HYST);
Kyle Pieferd3964162017-04-06 15:44:03 -0700649 /* fall through */
650 case GPU_HW_NAP:
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700651 kgsl_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL, 0,
652 HW_NAP_ENABLE_MASK);
Kyle Pieferd3964162017-04-06 15:44:03 -0700653 /* fall through */
654 case GPU_HW_IFPC:
655 kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
656 0x000A0080);
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700657 kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
Kyle Pieferd3964162017-04-06 15:44:03 -0700658 IFPC_ENABLE_MASK);
659 /* fall through */
660 case GPU_HW_SPTP_PC:
661 kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
662 0x000A0080);
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700663 kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
Kyle Pieferd3964162017-04-06 15:44:03 -0700664 SPTP_ENABLE_MASK);
665 /* fall through */
666 default:
667 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800668 }
669
Kyle Piefer3a5ac092017-04-06 16:05:30 -0700670 /* ACD feature enablement */
671 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700672 kgsl_gmu_regrmw(device, A6XX_GMU_BOOT_KMD_LM_HANDSHAKE, 0,
673 BIT(10));
Kyle Piefer3a5ac092017-04-06 16:05:30 -0700674
Kyle Pieferb1027b02017-02-10 13:58:58 -0800675 /* Enable RPMh GPU client */
676 if (ADRENO_FEATURE(adreno_dev, ADRENO_RPMH))
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700677 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
678 RPMH_ENABLE_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800679
Kyle Pieferd3964162017-04-06 15:44:03 -0700680 /* Disable reference bandgap voltage */
Kyle Pieferb1027b02017-02-10 13:58:58 -0800681 kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
682}
683
684/*
685 * a6xx_gmu_start() - Start GMU and wait until FW boot up.
686 * @device: Pointer to KGSL device
687 */
688static int a6xx_gmu_start(struct kgsl_device *device)
689{
690 struct gmu_device *gmu = &device->gmu;
691
692 /* Write 1 first to make sure the GMU is reset */
693 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);
694
695 /* Make sure putting in reset doesn't happen after clearing */
696 wmb();
697
698 /* Bring GMU out of reset */
699 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0);
700 if (timed_poll_check(device,
701 A6XX_GMU_CM3_FW_INIT_RESULT,
702 0xBABEFACE,
703 GMU_START_TIMEOUT,
704 0xFFFFFFFF)) {
705 dev_err(&gmu->pdev->dev, "GMU doesn't boot\n");
706 return -ETIMEDOUT;
707 }
708
709 return 0;
710}
711
712/*
713 * a6xx_gmu_hfi_start() - Write registers and start HFI.
714 * @device: Pointer to KGSL device
715 */
716static int a6xx_gmu_hfi_start(struct kgsl_device *device)
717{
718 struct gmu_device *gmu = &device->gmu;
719
720 kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK,
721 (HFI_IRQ_MASK & (~HFI_IRQ_MSGQ_MASK)));
722
723 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_CTRL_INIT, 1);
724
725 if (timed_poll_check(device,
726 A6XX_GMU_HFI_CTRL_STATUS,
727 BIT(0),
728 GMU_START_TIMEOUT,
729 BIT(0))) {
730 dev_err(&gmu->pdev->dev, "GMU HFI init failed\n");
731 return -ETIMEDOUT;
732 }
733
734 return 0;
735}
736
737/*
738 * a6xx_oob_set() - Set OOB interrupt to GMU.
739 * @adreno_dev: Pointer to adreno device
740 * @set_mask: set_mask is a bitmask that defines a set of OOB
741 * interrupts to trigger.
742 * @check_mask: check_mask is a bitmask that provides a set of
743 * OOB ACK bits. check_mask usually matches set_mask to
744 * ensure OOBs are handled.
745 * @clear_mask: After GMU handles a OOB interrupt, GMU driver
746 * clears the interrupt. clear_mask is a bitmask defines
747 * a set of OOB interrupts to clear.
748 */
749static int a6xx_oob_set(struct adreno_device *adreno_dev,
750 unsigned int set_mask, unsigned int check_mask,
751 unsigned int clear_mask)
752{
753 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
754 struct gmu_device *gmu = &device->gmu;
755 int ret = 0;
756
757 if (!kgsl_gmu_isenabled(device))
758 return -ENODEV;
759
760 kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, set_mask);
761
762 if (timed_poll_check(device,
763 A6XX_GMU_GMU2HOST_INTR_INFO,
764 check_mask,
765 GPU_START_TIMEOUT,
766 check_mask)) {
767 ret = -ETIMEDOUT;
768 dev_err(&gmu->pdev->dev, "OOB set timed out\n");
769 }
770
771 kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
772
773 trace_kgsl_gmu_oob_set(set_mask);
774 return ret;
775}
776
777/*
778 * a6xx_oob_clear() - Clear a previously set OOB request.
779 * @adreno_dev: Pointer to the adreno device that has the GMU
780 * @clear_mask: Bitmask that provides the OOB bits to clear
781 */
782static inline void a6xx_oob_clear(struct adreno_device *adreno_dev,
783 unsigned int clear_mask)
784{
785 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
786
787 if (!kgsl_gmu_isenabled(device))
788 return;
789
790 kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, clear_mask);
791 trace_kgsl_gmu_oob_clear(clear_mask);
792}
793
794#define SPTPRAC_POWERON_CTRL_MASK 0x00778000
795#define SPTPRAC_POWEROFF_CTRL_MASK 0x00778001
796#define SPTPRAC_POWEROFF_STATUS_MASK BIT(2)
797#define SPTPRAC_POWERON_STATUS_MASK BIT(3)
798#define SPTPRAC_CTRL_TIMEOUT 10 /* ms */
799
800/*
801 * a6xx_sptprac_enable() - Power on SPTPRAC
802 * @adreno_dev: Pointer to Adreno device
803 */
804static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
805{
806 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
807 struct gmu_device *gmu = &device->gmu;
808
809 if (!kgsl_gmu_isenabled(device))
810 return -EINVAL;
811
812 kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
813 SPTPRAC_POWERON_CTRL_MASK);
814
815 if (timed_poll_check(device,
816 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
817 SPTPRAC_POWERON_STATUS_MASK,
818 SPTPRAC_CTRL_TIMEOUT,
819 SPTPRAC_POWERON_STATUS_MASK)) {
820 dev_err(&gmu->pdev->dev, "power on SPTPRAC fail\n");
821 return -EINVAL;
822 }
823
824 return 0;
825}
826
827/*
828 * a6xx_sptprac_disable() - Power of SPTPRAC
829 * @adreno_dev: Pointer to Adreno device
830 */
831static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
832{
833 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
834 struct gmu_device *gmu = &device->gmu;
835
836 if (!kgsl_gmu_isenabled(device))
837 return;
838
839 kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
840 SPTPRAC_POWEROFF_CTRL_MASK);
841
842 if (timed_poll_check(device,
843 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
844 SPTPRAC_POWEROFF_STATUS_MASK,
845 SPTPRAC_CTRL_TIMEOUT,
846 SPTPRAC_POWEROFF_STATUS_MASK))
847 dev_err(&gmu->pdev->dev, "power off SPTPRAC fail\n");
848}
849
850/*
851 * a6xx_hm_enable() - Power on HM and turn on clock
852 * @adreno_dev: Pointer to Adreno device
853 */
854static int a6xx_hm_enable(struct adreno_device *adreno_dev)
855{
856 int ret;
857 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
858 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
859 struct gmu_device *gmu = &device->gmu;
860
861 if (!IS_ERR_OR_NULL(gmu->gx_gdsc)) {
862 ret = regulator_enable(gmu->gx_gdsc);
863 if (ret) {
864 dev_err(&gmu->pdev->dev,
865 "Failed to turn on GPU HM HS\n");
866 return ret;
867 }
868 }
869
870 ret = clk_set_rate(pwr->grp_clks[0],
871 pwr->pwrlevels[pwr->default_pwrlevel].
872 gpu_freq);
873 if (ret)
874 return ret;
875
876 return clk_prepare_enable(pwr->grp_clks[0]);
877}
878
879/*
880 * a6xx_hm_disable() - Turn off HM clock and power off
881 * @adreno_dev: Pointer to Adreno device
882 */
883static int a6xx_hm_disable(struct adreno_device *adreno_dev)
884{
885 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
886 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
887 struct gmu_device *gmu = &device->gmu;
888
889 clk_disable_unprepare(pwr->grp_clks[0]);
890
891 clk_set_rate(pwr->grp_clks[0],
892 pwr->pwrlevels[pwr->num_pwrlevels - 1].
893 gpu_freq);
894
895 if (IS_ERR_OR_NULL(gmu->gx_gdsc))
896 return 0;
897
898 return regulator_disable(gmu->gx_gdsc);
899}
900
901/*
Kyle Pieferba88adc2017-04-03 10:35:34 -0700902 * a6xx_hm_sptprac_enable() - Turn on HM and SPTPRAC
903 * @device: Pointer to KGSL device
904 */
905static int a6xx_hm_sptprac_enable(struct kgsl_device *device)
906{
907 int ret = 0;
908 struct gmu_device *gmu = &device->gmu;
909
910 /* If GMU does not control HM we must */
911 if (gmu->idle_level < GPU_HW_IFPC) {
912 ret = a6xx_hm_enable(ADRENO_DEVICE(device));
913 if (ret) {
914 dev_err(&gmu->pdev->dev, "Failed to power on GPU HM\n");
915 return ret;
916 }
917 }
918
919 /* If GMU does not control SPTPRAC we must */
920 if (gmu->idle_level < GPU_HW_SPTP_PC) {
921 ret = a6xx_sptprac_enable(ADRENO_DEVICE(device));
922 if (ret) {
923 a6xx_hm_disable(ADRENO_DEVICE(device));
924 return ret;
925 }
926 }
927
928 return ret;
929}
930
931/*
932 * a6xx_hm_sptprac_disable() - Turn off SPTPRAC and HM
933 * @device: Pointer to KGSL device
934 */
935static int a6xx_hm_sptprac_disable(struct kgsl_device *device)
936{
937 int ret = 0;
938 struct gmu_device *gmu = &device->gmu;
939
940 /* If GMU does not control SPTPRAC we must */
941 if (gmu->idle_level < GPU_HW_SPTP_PC)
942 a6xx_sptprac_disable(ADRENO_DEVICE(device));
943
944 /* If GMU does not control HM we must */
945 if (gmu->idle_level < GPU_HW_IFPC) {
946 ret = a6xx_hm_disable(ADRENO_DEVICE(device));
947 if (ret)
948 dev_err(&gmu->pdev->dev, "Failed to power off GPU HM\n");
949 }
950
951 return ret;
952}
953
954/*
955 * a6xx_hm_sptprac_control() - Turn HM and SPTPRAC on or off
956 * @device: Pointer to KGSL device
957 * @on: True to turn on or false to turn off
958 */
959static int a6xx_hm_sptprac_control(struct kgsl_device *device, bool on)
960{
961 if (on)
962 return a6xx_hm_sptprac_enable(device);
963 else
964 return a6xx_hm_sptprac_disable(device);
965}
966
967/*
Kyle Pieferb1027b02017-02-10 13:58:58 -0800968 * a6xx_gfx_rail_on() - request GMU to power GPU at given OPP.
969 * @device: Pointer to KGSL device
970 *
971 */
972static int a6xx_gfx_rail_on(struct kgsl_device *device)
973{
974 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
975 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
976 struct gmu_device *gmu = &device->gmu;
977 struct arc_vote_desc *default_opp;
978 unsigned int perf_idx;
979 int ret;
980
981 perf_idx = pwr->num_pwrlevels - pwr->default_pwrlevel - 1;
982 default_opp = &gmu->rpmh_votes.gx_votes[perf_idx];
983
984 kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
985 OOB_BOOT_OPTION);
986 kgsl_gmu_regwrite(device, A6XX_GMU_GX_VOTE_IDX, default_opp->pri_idx);
987 kgsl_gmu_regwrite(device, A6XX_GMU_MX_VOTE_IDX, default_opp->sec_idx);
988
989 ret = a6xx_oob_set(adreno_dev, OOB_BOOT_SLUMBER_SET_MASK,
990 OOB_BOOT_SLUMBER_CHECK_MASK,
991 OOB_BOOT_SLUMBER_CLEAR_MASK);
992
993 if (ret)
994 dev_err(&gmu->pdev->dev, "OOB set after GMU booted timed out\n");
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700995
996 return ret;
997}
998
Kyle Pieferb1027b02017-02-10 13:58:58 -0800999/*
1000 * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
1001 * @device: Pointer to KGSL device
1002 */
1003static int a6xx_notify_slumber(struct kgsl_device *device)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001004{
Kyle Pieferb1027b02017-02-10 13:58:58 -08001005 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1006 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1007 struct gmu_device *gmu = &device->gmu;
1008 int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
1009 int perf_idx = gmu->num_gpupwrlevels - pwr->default_pwrlevel - 1;
1010 int ret, state;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001011
Kyle Pieferb1027b02017-02-10 13:58:58 -08001012 if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
1013 ret = hfi_notify_slumber(gmu, perf_idx, bus_level);
1014 return ret;
1015 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001016
Kyle Pieferb1027b02017-02-10 13:58:58 -08001017 kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
1018 OOB_SLUMBER_OPTION);
1019 kgsl_gmu_regwrite(device, A6XX_GMU_GX_VOTE_IDX, bus_level);
1020 kgsl_gmu_regwrite(device, A6XX_GMU_MX_VOTE_IDX, perf_idx);
1021
1022 ret = a6xx_oob_set(adreno_dev, OOB_BOOT_SLUMBER_SET_MASK,
1023 OOB_BOOT_SLUMBER_CHECK_MASK,
1024 OOB_BOOT_SLUMBER_CLEAR_MASK);
1025 a6xx_oob_clear(adreno_dev, OOB_BOOT_SLUMBER_CLEAR_MASK);
1026
1027 if (ret)
1028 dev_err(&gmu->pdev->dev, "OOB set for slumber timed out\n");
1029 else {
1030 kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &state);
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001031 if (state != GPU_HW_SLUMBER) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001032 dev_err(&gmu->pdev->dev,
1033 "Failed to prepare for slumber\n");
1034 ret = -EINVAL;
1035 }
1036 }
1037
1038 return ret;
1039}
1040
1041static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
1042{
1043 struct gmu_device *gmu = &device->gmu;
1044 struct device *dev = &gmu->pdev->dev;
Kyle Pieferba88adc2017-04-03 10:35:34 -07001045 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001046
1047 if (device->state != KGSL_STATE_INIT &&
1048 device->state != KGSL_STATE_SUSPEND) {
1049 /* RSC wake sequence */
1050 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
1051
1052 /* Write request before polling */
1053 wmb();
1054
1055 if (timed_poll_check(device,
1056 A6XX_GMU_RSCC_CONTROL_ACK,
1057 BIT(1),
1058 GPU_START_TIMEOUT,
1059 BIT(1))) {
1060 dev_err(dev, "Failed to do GPU RSC power on\n");
1061 return -EINVAL;
1062 }
1063
1064 if (timed_poll_check(device,
1065 A6XX_RSCC_SEQ_BUSY_DRV0,
1066 0,
1067 GPU_START_TIMEOUT,
1068 0xFFFFFFFF))
1069 goto error_rsc;
1070
Kyle Pieferba88adc2017-04-03 10:35:34 -07001071 /* Turn on the HM and SPTP head switches */
1072 ret = a6xx_hm_sptprac_control(device, true);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001073 }
1074
Kyle Pieferba88adc2017-04-03 10:35:34 -07001075 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001076
1077error_rsc:
1078 dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
1079 return -EINVAL;
1080}
1081
1082static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
1083{
1084 struct gmu_device *gmu = &device->gmu;
Kyle Pieferba88adc2017-04-03 10:35:34 -07001085 int val, ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001086
Kyle Pieferba88adc2017-04-03 10:35:34 -07001087 /* Turn off the SPTP and HM head switches */
1088 ret = a6xx_hm_sptprac_control(device, false);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001089
1090 /* RSC sleep sequence */
1091 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TIMESTAMP_UNIT1_EN_DRV0, 1);
1092 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001093 wmb();
1094
Kyle Pieferb1027b02017-02-10 13:58:58 -08001095 if (timed_poll_check(device,
1096 A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0,
1097 BIT(0),
1098 GPU_START_TIMEOUT,
1099 BIT(0))) {
1100 dev_err(&gmu->pdev->dev, "GPU RSC power off fail\n");
1101 return -EINVAL;
1102 }
1103
1104 /* Read to clear the timestamp */
1105 kgsl_gmu_regread(device, A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0,
1106 &val);
1107 kgsl_gmu_regread(device, A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
1108 &val);
1109
1110 kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
1111
1112 /* FIXME: v2 has different procedure to trigger sequence */
1113
Kyle Pieferba88adc2017-04-03 10:35:34 -07001114 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001115}
1116
1117/*
1118 * a6xx_gmu_fw_start() - set up GMU and start FW
1119 * @device: Pointer to KGSL device
1120 * @boot_state: State of the GMU being started
1121 */
1122static int a6xx_gmu_fw_start(struct kgsl_device *device,
1123 unsigned int boot_state)
1124{
1125 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1126 struct gmu_device *gmu = &device->gmu;
1127 struct gmu_memdesc *mem_addr = gmu->hfi_mem;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001128 int ret, i;
1129
Kyle Pieferb1027b02017-02-10 13:58:58 -08001130 if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
Kyle Pieferba88adc2017-04-03 10:35:34 -07001131 /* Turn on the HM and SPTP head switches */
1132 ret = a6xx_hm_sptprac_control(device, true);
1133 if (ret)
1134 return ret;
1135
Kyle Pieferb1027b02017-02-10 13:58:58 -08001136 /* Turn on TCM retention */
1137 kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
1138
1139 if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags))
1140 _load_gmu_rpmh_ucode(device);
1141
1142 if (gmu->load_mode == TCM_BOOT) {
1143 /* Load GMU image via AHB bus */
1144 for (i = 0; i < MAX_GMUFW_SIZE; i++)
1145 kgsl_gmu_regwrite(device,
1146 A6XX_GMU_CM3_ITCM_START + i,
1147 *((uint32_t *) gmu->fw_image.
1148 hostptr + i));
1149
1150 /* Prevent leaving reset before the FW is written */
1151 wmb();
1152 } else {
1153 dev_err(&gmu->pdev->dev, "Incorrect GMU load mode %d\n",
1154 gmu->load_mode);
1155 return -EINVAL;
1156 }
1157 } else {
1158 ret = a6xx_rpmh_power_on_gpu(device);
1159 if (ret)
1160 return ret;
1161 }
1162
1163 /* Clear init result to make sure we are getting fresh value */
1164 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_FW_INIT_RESULT, 0);
1165 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_BOOT_CONFIG, gmu->load_mode);
1166
1167 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_QTBL_ADDR,
1168 mem_addr->gmuaddr);
1169 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_QTBL_INFO, 1);
1170
1171 kgsl_gmu_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
1172 FENCE_RANGE_MASK);
1173
Kyle Pieferd3964162017-04-06 15:44:03 -07001174 /* Configure power control and bring the GMU out of reset */
1175 a6xx_gmu_power_config(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001176 ret = a6xx_gmu_start(device);
1177 if (ret)
1178 return ret;
1179
1180 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)
1181 && boot_state == GMU_COLD_BOOT) {
1182 ret = a6xx_gfx_rail_on(device);
1183 if (ret) {
1184 a6xx_oob_clear(adreno_dev,
1185 OOB_BOOT_SLUMBER_CLEAR_MASK);
1186 return ret;
1187 }
1188 }
1189
1190 ret = a6xx_gmu_hfi_start(device);
1191 if (ret)
1192 return ret;
1193
1194 /* Make sure the write to start HFI happens before sending a message */
1195 wmb();
1196 return ret;
1197}
1198
1199/*
1200 * a6xx_gmu_dcvs_nohfi() - request GMU to do DCVS without using HFI
1201 * @device: Pointer to KGSL device
1202 * @perf_idx: Index into GPU performance level table defined in
1203 * HFI DCVS table message
1204 * @bw_idx: Index into GPU b/w table defined in HFI b/w table message
1205 *
1206 */
1207static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
1208 unsigned int perf_idx, unsigned int bw_idx)
1209{
1210 struct hfi_dcvs_cmd dcvs_cmd = {
1211 .ack_type = ACK_BLOCK,
1212 .freq = {
1213 .perf_idx = perf_idx,
1214 .clkset_opt = OPTION_AT_LEAST,
1215 },
1216 .bw = {
1217 .bw_idx = bw_idx,
1218 },
1219 };
1220 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1221 struct gmu_device *gmu = &device->gmu;
1222 union gpu_perf_vote vote;
1223 int ret;
1224
1225 if (device->state == KGSL_STATE_INIT ||
1226 device->state == KGSL_STATE_SUSPEND)
1227 dcvs_cmd.ack_type = ACK_NONBLOCK;
1228
1229 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_ACK_OPTION, dcvs_cmd.ack_type);
1230
1231 vote.fvote = dcvs_cmd.freq;
1232 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_PERF_SETTING, vote.raw);
1233
1234 vote.bvote = dcvs_cmd.bw;
1235 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_BW_SETTING, vote.raw);
1236
1237 ret = a6xx_oob_set(adreno_dev, OOB_DCVS_SET_MASK, OOB_DCVS_CHECK_MASK,
1238 OOB_DCVS_CLEAR_MASK);
1239
1240 if (ret) {
1241 dev_err(&gmu->pdev->dev, "OOB set after GMU booted timed out\n");
1242 goto done;
1243 }
1244
1245 kgsl_gmu_regread(device, A6XX_GMU_DCVS_RETURN, &ret);
1246 if (ret)
1247 dev_err(&gmu->pdev->dev, "OOB DCVS error %d\n", ret);
1248
1249done:
1250 a6xx_oob_clear(adreno_dev, OOB_DCVS_CLEAR_MASK);
1251
1252 return ret;
1253}
1254
1255/*
1256 * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
1257 * @adreno_dev: Pointer to adreno device
1258 * @mode: requested power mode
1259 * @arg1: first argument for mode control
1260 * @arg2: second argument for mode control
1261 */
1262static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
1263 unsigned int mode, unsigned int arg1, unsigned int arg2)
1264{
1265 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1266 struct gmu_device *gmu = &device->gmu;
1267 int ret;
1268
1269 switch (mode) {
1270 case GMU_FW_START:
1271 ret = a6xx_gmu_fw_start(device, arg1);
1272 break;
1273 case GMU_FW_STOP:
1274 ret = a6xx_rpmh_power_off_gpu(device);
1275 break;
1276 case GMU_DCVS_NOHFI:
1277 ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
1278 break;
1279 case GMU_NOTIFY_SLUMBER:
1280 ret = a6xx_notify_slumber(device);
1281 break;
1282 default:
1283 dev_err(&gmu->pdev->dev,
1284 "unsupported GMU power ctrl mode:%d\n", mode);
1285 ret = -EINVAL;
1286 break;
1287 }
1288
1289 return ret;
1290}
1291
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001292static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
1293{
1294 unsigned int reg;
1295
1296 kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
1297 A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
1298 return ((~reg & GPUBUSYIGNAHB) != 0);
1299}
1300
1301static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001302{
1303 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1304 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001305
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001306 if (timed_poll_check(device, A6XX_GMU_RPMH_POWER_STATE,
1307 gmu->idle_level, GMU_START_TIMEOUT, 0xf)) {
1308 dev_err(&gmu->pdev->dev,
1309 "GMU is not going to powerstate %d\n",
1310 gmu->idle_level);
1311 return -ETIMEDOUT;
1312 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001313
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001314 if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
1315 0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
1316 dev_err(&gmu->pdev->dev, "GMU is not idling\n");
1317 return -ETIMEDOUT;
1318 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001319
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001320 return 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001321}
1322
1323/*
1324 * _load_gmu_firmware() - Load the ucode into the GPMU RAM & PDC/RSC
1325 * @device: Pointer to KGSL device
1326 */
1327static int _load_gmu_firmware(struct kgsl_device *device)
1328{
1329 const struct firmware *fw = NULL;
1330 const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1331 struct gmu_device *gmu = &device->gmu;
1332 const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
1333 int image_size, ret = -EINVAL;
1334
1335 /* there is no GMU */
1336 if (!kgsl_gmu_isenabled(device))
1337 return 0;
1338
1339 /* GMU fw already saved and verified so do nothing new */
1340 if (gmu->fw_image.hostptr != 0)
1341 return 0;
1342
1343 if (gpucore->gpmufw_name == NULL)
1344 return -EINVAL;
1345
1346 ret = request_firmware(&fw, gpucore->gpmufw_name, device->dev);
1347 if (ret || fw == NULL) {
1348 KGSL_CORE_ERR("request_firmware (%s) failed: %d\n",
1349 gpucore->gpmufw_name, ret);
1350 return ret;
1351 }
1352
1353 image_size = PAGE_ALIGN(fw->size);
1354
1355 ret = allocate_gmu_image(gmu, image_size);
1356
1357 /* load into shared memory with GMU */
1358 if (!ret)
1359 memcpy(gmu->fw_image.hostptr, fw->data, fw->size);
1360
1361 release_firmware(fw);
1362
1363 return ret;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001364}
1365
1366/*
1367 * a6xx_microcode_read() - Read microcode
1368 * @adreno_dev: Pointer to adreno device
1369 */
1370static int a6xx_microcode_read(struct adreno_device *adreno_dev)
1371{
1372 return _load_firmware(KGSL_DEVICE(adreno_dev),
1373 adreno_dev->gpucore->sqefw_name,
1374 ADRENO_FW(adreno_dev, ADRENO_FW_SQE));
1375}
1376
1377static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
1378{
1379 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1380 unsigned int status1, status2;
1381
1382 kgsl_regread(device, A6XX_CP_INTERRUPT_STATUS, &status1);
1383
Shrenuj Bansala602c022017-03-08 10:40:34 -08001384 if (status1 & BIT(A6XX_CP_OPCODE_ERROR)) {
1385 unsigned int opcode;
1386
1387 kgsl_regwrite(device, A6XX_CP_SQE_STAT_ADDR, 1);
1388 kgsl_regread(device, A6XX_CP_SQE_STAT_DATA, &opcode);
1389 KGSL_DRV_CRIT_RATELIMIT(device,
Kyle Piefer2ce06162017-03-15 11:29:08 -07001390 "CP opcode error interrupt | opcode=0x%8.8x\n",
1391 opcode);
Shrenuj Bansala602c022017-03-08 10:40:34 -08001392 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001393 if (status1 & BIT(A6XX_CP_UCODE_ERROR))
1394 KGSL_DRV_CRIT_RATELIMIT(device, "CP ucode error interrupt\n");
1395 if (status1 & BIT(A6XX_CP_HW_FAULT_ERROR)) {
1396 kgsl_regread(device, A6XX_CP_HW_FAULT, &status2);
1397 KGSL_DRV_CRIT_RATELIMIT(device,
1398 "CP | Ringbuffer HW fault | status=%x\n",
1399 status2);
1400 }
1401 if (status1 & BIT(A6XX_CP_REGISTER_PROTECTION_ERROR)) {
1402 kgsl_regread(device, A6XX_CP_PROTECT_STATUS, &status2);
1403 KGSL_DRV_CRIT_RATELIMIT(device,
1404 "CP | Protected mode error | %s | addr=%x | status=%x\n",
1405 status2 & (1 << 20) ? "READ" : "WRITE",
Lynus Vazdc807342017-02-20 18:23:25 +05301406 status2 & 0x3FFFF, status2);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001407 }
1408 if (status1 & BIT(A6XX_CP_AHB_ERROR))
1409 KGSL_DRV_CRIT_RATELIMIT(device,
1410 "CP AHB error interrupt\n");
1411 if (status1 & BIT(A6XX_CP_VSD_PARITY_ERROR))
1412 KGSL_DRV_CRIT_RATELIMIT(device,
1413 "CP VSD decoder parity error\n");
1414 if (status1 & BIT(A6XX_CP_ILLEGAL_INSTR_ERROR))
1415 KGSL_DRV_CRIT_RATELIMIT(device,
1416 "CP Illegal instruction error\n");
1417
1418}
1419
1420static void a6xx_err_callback(struct adreno_device *adreno_dev, int bit)
1421{
1422 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1423
1424 switch (bit) {
1425 case A6XX_INT_CP_AHB_ERROR:
1426 KGSL_DRV_CRIT_RATELIMIT(device, "CP: AHB bus error\n");
1427 break;
1428 case A6XX_INT_ATB_ASYNCFIFO_OVERFLOW:
1429 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB ASYNC overflow\n");
1430 break;
1431 case A6XX_INT_RBBM_ATB_BUS_OVERFLOW:
1432 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB bus overflow\n");
1433 break;
1434 case A6XX_INT_UCHE_OOB_ACCESS:
1435 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Out of bounds access\n");
1436 break;
1437 case A6XX_INT_UCHE_TRAP_INTR:
1438 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Trap interrupt\n");
1439 break;
1440 default:
1441 KGSL_DRV_CRIT_RATELIMIT(device, "Unknown interrupt %d\n", bit);
1442 }
1443}
1444
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001445/* GPU System Cache control registers */
1446#define A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0 0x4
1447#define A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1 0x8
1448
1449static inline void _reg_rmw(void __iomem *regaddr,
1450 unsigned int mask, unsigned int bits)
1451{
1452 unsigned int val = 0;
1453
1454 val = __raw_readl(regaddr);
1455 /* Make sure the above read completes before we proceed */
1456 rmb();
1457 val &= ~mask;
1458 __raw_writel(val | bits, regaddr);
1459 /* Make sure the above write posts before we proceed*/
1460 wmb();
1461}
1462
1463
1464/*
1465 * a6xx_llc_configure_gpu_scid() - Program the sub-cache ID for all GPU blocks
1466 * @adreno_dev: The adreno device pointer
1467 */
1468static void a6xx_llc_configure_gpu_scid(struct adreno_device *adreno_dev)
1469{
1470 uint32_t gpu_scid;
1471 uint32_t gpu_cntl1_val = 0;
1472 int i;
1473 void __iomem *gpu_cx_reg;
1474
1475 gpu_scid = adreno_llc_get_scid(adreno_dev->gpu_llc_slice);
1476 for (i = 0; i < A6XX_LLC_NUM_GPU_SCIDS; i++)
1477 gpu_cntl1_val = (gpu_cntl1_val << A6XX_GPU_LLC_SCID_NUM_BITS)
1478 | gpu_scid;
1479
1480 gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
1481 _reg_rmw(gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
1482 A6XX_GPU_LLC_SCID_MASK, gpu_cntl1_val);
1483 iounmap(gpu_cx_reg);
1484}
1485
1486/*
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001487 * a6xx_llc_configure_gpuhtw_scid() - Program the SCID for GPU pagetables
1488 * @adreno_dev: The adreno device pointer
1489 */
1490static void a6xx_llc_configure_gpuhtw_scid(struct adreno_device *adreno_dev)
1491{
1492 uint32_t gpuhtw_scid;
1493 void __iomem *gpu_cx_reg;
1494
1495 gpuhtw_scid = adreno_llc_get_scid(adreno_dev->gpuhtw_llc_slice);
1496
1497 gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
Kyle Piefer11a48b62017-03-17 14:53:40 -07001498 _reg_rmw(gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001499 A6XX_GPUHTW_LLC_SCID_MASK,
1500 gpuhtw_scid << A6XX_GPUHTW_LLC_SCID_SHIFT);
1501 iounmap(gpu_cx_reg);
1502}
1503
1504/*
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001505 * a6xx_llc_enable_overrides() - Override the page attributes
1506 * @adreno_dev: The adreno device pointer
1507 */
1508static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
1509{
1510 void __iomem *gpu_cx_reg;
1511
1512 /*
1513 * 0x3: readnoallocoverrideen=0
1514 * read-no-alloc=0 - Allocate lines on read miss
1515 * writenoallocoverrideen=1
1516 * write-no-alloc=1 - Do not allocates lines on write miss
1517 */
1518 gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
1519 __raw_writel(0x3, gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0);
1520 /* Make sure the above write posts before we proceed*/
1521 wmb();
1522 iounmap(gpu_cx_reg);
1523}
1524
Lynus Vaz1fde74d2017-03-20 18:02:47 +05301525static const char *fault_block[8] = {
1526 [0] = "CP",
1527 [1] = "UCHE",
1528 [2] = "VFD",
1529 [3] = "UCHE",
1530 [4] = "CCU",
1531 [5] = "unknown",
1532 [6] = "CDP Prefetch",
1533 [7] = "GPMU",
1534};
1535
1536static const char *uche_client[8] = {
1537 [0] = "VFD",
1538 [1] = "SP",
1539 [2] = "VSC",
1540 [3] = "VPC",
1541 [4] = "HLSQ",
1542 [5] = "PC",
1543 [6] = "LRZ",
1544 [7] = "unknown",
1545};
1546
1547static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
1548 unsigned int fsynr1)
1549{
1550 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1551 unsigned int client_id;
1552 unsigned int uche_client_id;
1553
1554 client_id = fsynr1 & 0xff;
1555
1556 if (client_id >= ARRAY_SIZE(fault_block))
1557 return "unknown";
1558 else if (client_id != 3)
1559 return fault_block[client_id];
1560
1561 kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
1562 return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
1563}
1564
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001565#define A6XX_INT_MASK \
Kyle Pieferb1027b02017-02-10 13:58:58 -08001566 ((1 << A6XX_INT_CP_AHB_ERROR) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001567 (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \
Kyle Pieferb1027b02017-02-10 13:58:58 -08001568 (1 << A6XX_INT_RBBM_GPC_ERROR) | \
1569 (1 << A6XX_INT_CP_SW) | \
1570 (1 << A6XX_INT_CP_HW_ERROR) | \
1571 (1 << A6XX_INT_CP_IB2) | \
1572 (1 << A6XX_INT_CP_IB1) | \
1573 (1 << A6XX_INT_CP_RB) | \
1574 (1 << A6XX_INT_CP_CACHE_FLUSH_TS) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001575 (1 << A6XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
Kyle Pieferb1027b02017-02-10 13:58:58 -08001576 (1 << A6XX_INT_RBBM_HANG_DETECT) | \
1577 (1 << A6XX_INT_UCHE_OOB_ACCESS) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001578 (1 << A6XX_INT_UCHE_TRAP_INTR))
1579
1580static struct adreno_irq_funcs a6xx_irq_funcs[32] = {
1581 ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
1582 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 1 - RBBM_AHB_ERROR */
1583 ADRENO_IRQ_CALLBACK(NULL), /* 2 - UNUSED */
1584 ADRENO_IRQ_CALLBACK(NULL), /* 3 - UNUSED */
1585 ADRENO_IRQ_CALLBACK(NULL), /* 4 - UNUSED */
1586 ADRENO_IRQ_CALLBACK(NULL), /* 5 - UNUSED */
1587 /* 6 - RBBM_ATB_ASYNC_OVERFLOW */
1588 ADRENO_IRQ_CALLBACK(a6xx_err_callback),
1589 ADRENO_IRQ_CALLBACK(NULL), /* 7 - GPC_ERR */
1590 ADRENO_IRQ_CALLBACK(NULL),/* 8 - CP_SW */
1591 ADRENO_IRQ_CALLBACK(a6xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
1592 ADRENO_IRQ_CALLBACK(NULL), /* 10 - CP_CCU_FLUSH_DEPTH_TS */
1593 ADRENO_IRQ_CALLBACK(NULL), /* 11 - CP_CCU_FLUSH_COLOR_TS */
1594 ADRENO_IRQ_CALLBACK(NULL), /* 12 - CP_CCU_RESOLVE_TS */
1595 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 13 - CP_IB2_INT */
1596 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 14 - CP_IB1_INT */
1597 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 15 - CP_RB_INT */
1598 ADRENO_IRQ_CALLBACK(NULL), /* 16 - UNUSED */
1599 ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
1600 ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_WT_DONE_TS */
1601 ADRENO_IRQ_CALLBACK(NULL), /* 19 - UNUSED */
1602 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
1603 ADRENO_IRQ_CALLBACK(NULL), /* 21 - UNUSED */
1604 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 22 - RBBM_ATB_BUS_OVERFLOW */
1605 /* 23 - MISC_HANG_DETECT */
1606 ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
1607 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 24 - UCHE_OOB_ACCESS */
1608 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 25 - UCHE_TRAP_INTR */
1609 ADRENO_IRQ_CALLBACK(NULL), /* 26 - DEBBUS_INTR_0 */
1610 ADRENO_IRQ_CALLBACK(NULL), /* 27 - DEBBUS_INTR_1 */
1611 ADRENO_IRQ_CALLBACK(NULL), /* 28 - UNUSED */
1612 ADRENO_IRQ_CALLBACK(NULL), /* 29 - UNUSED */
1613 ADRENO_IRQ_CALLBACK(NULL), /* 30 - ISDB_CPU_IRQ */
1614 ADRENO_IRQ_CALLBACK(NULL), /* 31 - ISDB_UNDER_DEBUG */
1615};
1616
1617static struct adreno_irq a6xx_irq = {
1618 .funcs = a6xx_irq_funcs,
1619 .mask = A6XX_INT_MASK,
1620};
1621
Shrenuj Bansal41665402016-12-16 15:25:54 -08001622static struct adreno_snapshot_sizes a6xx_snap_sizes = {
1623 .cp_pfp = 0x33,
1624 .roq = 0x400,
1625};
1626
1627static struct adreno_snapshot_data a6xx_snapshot_data = {
1628 .sect_sizes = &a6xx_snap_sizes,
1629};
1630
Lynus Vaz107d2892017-03-01 13:48:06 +05301631static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
1632 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
1633 A6XX_RBBM_PERFCTR_CP_0_HI, 0, A6XX_CP_PERFCTR_CP_SEL_0 },
1634 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_1_LO,
1635 A6XX_RBBM_PERFCTR_CP_1_HI, 1, A6XX_CP_PERFCTR_CP_SEL_1 },
1636 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_2_LO,
1637 A6XX_RBBM_PERFCTR_CP_2_HI, 2, A6XX_CP_PERFCTR_CP_SEL_2 },
1638 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_3_LO,
1639 A6XX_RBBM_PERFCTR_CP_3_HI, 3, A6XX_CP_PERFCTR_CP_SEL_3 },
1640 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_4_LO,
1641 A6XX_RBBM_PERFCTR_CP_4_HI, 4, A6XX_CP_PERFCTR_CP_SEL_4 },
1642 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_5_LO,
1643 A6XX_RBBM_PERFCTR_CP_5_HI, 5, A6XX_CP_PERFCTR_CP_SEL_5 },
1644 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_6_LO,
1645 A6XX_RBBM_PERFCTR_CP_6_HI, 6, A6XX_CP_PERFCTR_CP_SEL_6 },
1646 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_7_LO,
1647 A6XX_RBBM_PERFCTR_CP_7_HI, 7, A6XX_CP_PERFCTR_CP_SEL_7 },
1648 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_8_LO,
1649 A6XX_RBBM_PERFCTR_CP_8_HI, 8, A6XX_CP_PERFCTR_CP_SEL_8 },
1650 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_9_LO,
1651 A6XX_RBBM_PERFCTR_CP_9_HI, 9, A6XX_CP_PERFCTR_CP_SEL_9 },
1652 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_10_LO,
1653 A6XX_RBBM_PERFCTR_CP_10_HI, 10, A6XX_CP_PERFCTR_CP_SEL_10 },
1654 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_11_LO,
1655 A6XX_RBBM_PERFCTR_CP_11_HI, 11, A6XX_CP_PERFCTR_CP_SEL_11 },
1656 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_12_LO,
1657 A6XX_RBBM_PERFCTR_CP_12_HI, 12, A6XX_CP_PERFCTR_CP_SEL_12 },
1658 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_13_LO,
1659 A6XX_RBBM_PERFCTR_CP_13_HI, 13, A6XX_CP_PERFCTR_CP_SEL_13 },
1660};
1661
1662static struct adreno_perfcount_register a6xx_perfcounters_rbbm[] = {
1663 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_0_LO,
1664 A6XX_RBBM_PERFCTR_RBBM_0_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_0 },
1665 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_1_LO,
1666 A6XX_RBBM_PERFCTR_RBBM_1_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_1 },
1667 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_2_LO,
1668 A6XX_RBBM_PERFCTR_RBBM_2_HI, 16, A6XX_RBBM_PERFCTR_RBBM_SEL_2 },
1669 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_3_LO,
1670 A6XX_RBBM_PERFCTR_RBBM_3_HI, 17, A6XX_RBBM_PERFCTR_RBBM_SEL_3 },
1671};
1672
1673static struct adreno_perfcount_register a6xx_perfcounters_pc[] = {
1674 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_0_LO,
1675 A6XX_RBBM_PERFCTR_PC_0_HI, 18, A6XX_PC_PERFCTR_PC_SEL_0 },
1676 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_1_LO,
1677 A6XX_RBBM_PERFCTR_PC_1_HI, 19, A6XX_PC_PERFCTR_PC_SEL_1 },
1678 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_2_LO,
1679 A6XX_RBBM_PERFCTR_PC_2_HI, 20, A6XX_PC_PERFCTR_PC_SEL_2 },
1680 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_3_LO,
1681 A6XX_RBBM_PERFCTR_PC_3_HI, 21, A6XX_PC_PERFCTR_PC_SEL_3 },
1682 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_4_LO,
1683 A6XX_RBBM_PERFCTR_PC_4_HI, 22, A6XX_PC_PERFCTR_PC_SEL_4 },
1684 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_5_LO,
1685 A6XX_RBBM_PERFCTR_PC_5_HI, 23, A6XX_PC_PERFCTR_PC_SEL_5 },
1686 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_6_LO,
1687 A6XX_RBBM_PERFCTR_PC_6_HI, 24, A6XX_PC_PERFCTR_PC_SEL_6 },
1688 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_7_LO,
1689 A6XX_RBBM_PERFCTR_PC_7_HI, 25, A6XX_PC_PERFCTR_PC_SEL_7 },
1690};
1691
1692static struct adreno_perfcount_register a6xx_perfcounters_vfd[] = {
1693 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_0_LO,
1694 A6XX_RBBM_PERFCTR_VFD_0_HI, 26, A6XX_VFD_PERFCTR_VFD_SEL_0 },
1695 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_1_LO,
1696 A6XX_RBBM_PERFCTR_VFD_1_HI, 27, A6XX_VFD_PERFCTR_VFD_SEL_1 },
1697 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_2_LO,
1698 A6XX_RBBM_PERFCTR_VFD_2_HI, 28, A6XX_VFD_PERFCTR_VFD_SEL_2 },
1699 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_3_LO,
1700 A6XX_RBBM_PERFCTR_VFD_3_HI, 29, A6XX_VFD_PERFCTR_VFD_SEL_3 },
1701 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_4_LO,
1702 A6XX_RBBM_PERFCTR_VFD_4_HI, 30, A6XX_VFD_PERFCTR_VFD_SEL_4 },
1703 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_5_LO,
1704 A6XX_RBBM_PERFCTR_VFD_5_HI, 31, A6XX_VFD_PERFCTR_VFD_SEL_5 },
1705 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_6_LO,
1706 A6XX_RBBM_PERFCTR_VFD_6_HI, 32, A6XX_VFD_PERFCTR_VFD_SEL_6 },
1707 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_7_LO,
1708 A6XX_RBBM_PERFCTR_VFD_7_HI, 33, A6XX_VFD_PERFCTR_VFD_SEL_7 },
1709};
1710
1711static struct adreno_perfcount_register a6xx_perfcounters_hlsq[] = {
1712 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_0_LO,
1713 A6XX_RBBM_PERFCTR_HLSQ_0_HI, 34, A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
1714 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_1_LO,
1715 A6XX_RBBM_PERFCTR_HLSQ_1_HI, 35, A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
1716 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_2_LO,
1717 A6XX_RBBM_PERFCTR_HLSQ_2_HI, 36, A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
1718 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_3_LO,
1719 A6XX_RBBM_PERFCTR_HLSQ_3_HI, 37, A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
1720 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_4_LO,
1721 A6XX_RBBM_PERFCTR_HLSQ_4_HI, 38, A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
1722 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_5_LO,
1723 A6XX_RBBM_PERFCTR_HLSQ_5_HI, 39, A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
1724};
1725
1726static struct adreno_perfcount_register a6xx_perfcounters_vpc[] = {
1727 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_0_LO,
1728 A6XX_RBBM_PERFCTR_VPC_0_HI, 40, A6XX_VPC_PERFCTR_VPC_SEL_0 },
1729 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_1_LO,
1730 A6XX_RBBM_PERFCTR_VPC_1_HI, 41, A6XX_VPC_PERFCTR_VPC_SEL_1 },
1731 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_2_LO,
1732 A6XX_RBBM_PERFCTR_VPC_2_HI, 42, A6XX_VPC_PERFCTR_VPC_SEL_2 },
1733 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_3_LO,
1734 A6XX_RBBM_PERFCTR_VPC_3_HI, 43, A6XX_VPC_PERFCTR_VPC_SEL_3 },
1735 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_4_LO,
1736 A6XX_RBBM_PERFCTR_VPC_4_HI, 44, A6XX_VPC_PERFCTR_VPC_SEL_4 },
1737 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_5_LO,
1738 A6XX_RBBM_PERFCTR_VPC_5_HI, 45, A6XX_VPC_PERFCTR_VPC_SEL_5 },
1739};
1740
1741static struct adreno_perfcount_register a6xx_perfcounters_ccu[] = {
1742 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_0_LO,
1743 A6XX_RBBM_PERFCTR_CCU_0_HI, 46, A6XX_RB_PERFCTR_CCU_SEL_0 },
1744 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_1_LO,
1745 A6XX_RBBM_PERFCTR_CCU_1_HI, 47, A6XX_RB_PERFCTR_CCU_SEL_1 },
1746 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_2_LO,
1747 A6XX_RBBM_PERFCTR_CCU_2_HI, 48, A6XX_RB_PERFCTR_CCU_SEL_2 },
1748 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_3_LO,
1749 A6XX_RBBM_PERFCTR_CCU_3_HI, 49, A6XX_RB_PERFCTR_CCU_SEL_3 },
1750 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_4_LO,
1751 A6XX_RBBM_PERFCTR_CCU_4_HI, 50, A6XX_RB_PERFCTR_CCU_SEL_4 },
1752};
1753
1754static struct adreno_perfcount_register a6xx_perfcounters_tse[] = {
1755 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_0_LO,
1756 A6XX_RBBM_PERFCTR_TSE_0_HI, 51, A6XX_GRAS_PERFCTR_TSE_SEL_0 },
1757 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_1_LO,
1758 A6XX_RBBM_PERFCTR_TSE_1_HI, 52, A6XX_GRAS_PERFCTR_TSE_SEL_1 },
1759 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_2_LO,
1760 A6XX_RBBM_PERFCTR_TSE_2_HI, 53, A6XX_GRAS_PERFCTR_TSE_SEL_2 },
1761 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_3_LO,
1762 A6XX_RBBM_PERFCTR_TSE_3_HI, 54, A6XX_GRAS_PERFCTR_TSE_SEL_3 },
1763};
1764
1765static struct adreno_perfcount_register a6xx_perfcounters_ras[] = {
1766 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_0_LO,
1767 A6XX_RBBM_PERFCTR_RAS_0_HI, 55, A6XX_GRAS_PERFCTR_RAS_SEL_0 },
1768 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_1_LO,
1769 A6XX_RBBM_PERFCTR_RAS_1_HI, 56, A6XX_GRAS_PERFCTR_RAS_SEL_1 },
1770 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_2_LO,
1771 A6XX_RBBM_PERFCTR_RAS_2_HI, 57, A6XX_GRAS_PERFCTR_RAS_SEL_2 },
1772 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_3_LO,
1773 A6XX_RBBM_PERFCTR_RAS_3_HI, 58, A6XX_GRAS_PERFCTR_RAS_SEL_3 },
1774};
1775
1776static struct adreno_perfcount_register a6xx_perfcounters_uche[] = {
1777 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_0_LO,
1778 A6XX_RBBM_PERFCTR_UCHE_0_HI, 59, A6XX_UCHE_PERFCTR_UCHE_SEL_0 },
1779 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_1_LO,
1780 A6XX_RBBM_PERFCTR_UCHE_1_HI, 60, A6XX_UCHE_PERFCTR_UCHE_SEL_1 },
1781 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_2_LO,
1782 A6XX_RBBM_PERFCTR_UCHE_2_HI, 61, A6XX_UCHE_PERFCTR_UCHE_SEL_2 },
1783 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_3_LO,
1784 A6XX_RBBM_PERFCTR_UCHE_3_HI, 62, A6XX_UCHE_PERFCTR_UCHE_SEL_3 },
1785 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_4_LO,
1786 A6XX_RBBM_PERFCTR_UCHE_4_HI, 63, A6XX_UCHE_PERFCTR_UCHE_SEL_4 },
1787 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_5_LO,
1788 A6XX_RBBM_PERFCTR_UCHE_5_HI, 64, A6XX_UCHE_PERFCTR_UCHE_SEL_5 },
1789 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_6_LO,
1790 A6XX_RBBM_PERFCTR_UCHE_6_HI, 65, A6XX_UCHE_PERFCTR_UCHE_SEL_6 },
1791 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_7_LO,
1792 A6XX_RBBM_PERFCTR_UCHE_7_HI, 66, A6XX_UCHE_PERFCTR_UCHE_SEL_7 },
1793 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_8_LO,
1794 A6XX_RBBM_PERFCTR_UCHE_8_HI, 67, A6XX_UCHE_PERFCTR_UCHE_SEL_8 },
1795 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_9_LO,
1796 A6XX_RBBM_PERFCTR_UCHE_9_HI, 68, A6XX_UCHE_PERFCTR_UCHE_SEL_9 },
1797 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_10_LO,
1798 A6XX_RBBM_PERFCTR_UCHE_10_HI, 69,
1799 A6XX_UCHE_PERFCTR_UCHE_SEL_10 },
1800 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_11_LO,
1801 A6XX_RBBM_PERFCTR_UCHE_11_HI, 70,
1802 A6XX_UCHE_PERFCTR_UCHE_SEL_11 },
1803};
1804
1805static struct adreno_perfcount_register a6xx_perfcounters_tp[] = {
1806 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_0_LO,
1807 A6XX_RBBM_PERFCTR_TP_0_HI, 71, A6XX_TPL1_PERFCTR_TP_SEL_0 },
1808 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_1_LO,
1809 A6XX_RBBM_PERFCTR_TP_1_HI, 72, A6XX_TPL1_PERFCTR_TP_SEL_1 },
1810 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_2_LO,
1811 A6XX_RBBM_PERFCTR_TP_2_HI, 73, A6XX_TPL1_PERFCTR_TP_SEL_2 },
1812 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_3_LO,
1813 A6XX_RBBM_PERFCTR_TP_3_HI, 74, A6XX_TPL1_PERFCTR_TP_SEL_3 },
1814 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_4_LO,
1815 A6XX_RBBM_PERFCTR_TP_4_HI, 75, A6XX_TPL1_PERFCTR_TP_SEL_4 },
1816 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_5_LO,
1817 A6XX_RBBM_PERFCTR_TP_5_HI, 76, A6XX_TPL1_PERFCTR_TP_SEL_5 },
1818 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_6_LO,
1819 A6XX_RBBM_PERFCTR_TP_6_HI, 77, A6XX_TPL1_PERFCTR_TP_SEL_6 },
1820 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_7_LO,
1821 A6XX_RBBM_PERFCTR_TP_7_HI, 78, A6XX_TPL1_PERFCTR_TP_SEL_7 },
1822 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_8_LO,
1823 A6XX_RBBM_PERFCTR_TP_8_HI, 79, A6XX_TPL1_PERFCTR_TP_SEL_8 },
1824 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_9_LO,
1825 A6XX_RBBM_PERFCTR_TP_9_HI, 80, A6XX_TPL1_PERFCTR_TP_SEL_9 },
1826 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_10_LO,
1827 A6XX_RBBM_PERFCTR_TP_10_HI, 81, A6XX_TPL1_PERFCTR_TP_SEL_10 },
1828 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_11_LO,
1829 A6XX_RBBM_PERFCTR_TP_11_HI, 82, A6XX_TPL1_PERFCTR_TP_SEL_11 },
1830};
1831
1832static struct adreno_perfcount_register a6xx_perfcounters_sp[] = {
1833 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_0_LO,
1834 A6XX_RBBM_PERFCTR_SP_0_HI, 83, A6XX_SP_PERFCTR_SP_SEL_0 },
1835 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_1_LO,
1836 A6XX_RBBM_PERFCTR_SP_1_HI, 84, A6XX_SP_PERFCTR_SP_SEL_1 },
1837 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_2_LO,
1838 A6XX_RBBM_PERFCTR_SP_2_HI, 85, A6XX_SP_PERFCTR_SP_SEL_2 },
1839 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_3_LO,
1840 A6XX_RBBM_PERFCTR_SP_3_HI, 86, A6XX_SP_PERFCTR_SP_SEL_3 },
1841 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_4_LO,
1842 A6XX_RBBM_PERFCTR_SP_4_HI, 87, A6XX_SP_PERFCTR_SP_SEL_4 },
1843 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_5_LO,
1844 A6XX_RBBM_PERFCTR_SP_5_HI, 88, A6XX_SP_PERFCTR_SP_SEL_5 },
1845 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_6_LO,
1846 A6XX_RBBM_PERFCTR_SP_6_HI, 89, A6XX_SP_PERFCTR_SP_SEL_6 },
1847 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_7_LO,
1848 A6XX_RBBM_PERFCTR_SP_7_HI, 90, A6XX_SP_PERFCTR_SP_SEL_7 },
1849 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_8_LO,
1850 A6XX_RBBM_PERFCTR_SP_8_HI, 91, A6XX_SP_PERFCTR_SP_SEL_8 },
1851 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_9_LO,
1852 A6XX_RBBM_PERFCTR_SP_9_HI, 92, A6XX_SP_PERFCTR_SP_SEL_9 },
1853 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_10_LO,
1854 A6XX_RBBM_PERFCTR_SP_10_HI, 93, A6XX_SP_PERFCTR_SP_SEL_10 },
1855 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_11_LO,
1856 A6XX_RBBM_PERFCTR_SP_11_HI, 94, A6XX_SP_PERFCTR_SP_SEL_11 },
1857 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_12_LO,
1858 A6XX_RBBM_PERFCTR_SP_12_HI, 95, A6XX_SP_PERFCTR_SP_SEL_12 },
1859 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_13_LO,
1860 A6XX_RBBM_PERFCTR_SP_13_HI, 96, A6XX_SP_PERFCTR_SP_SEL_13 },
1861 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_14_LO,
1862 A6XX_RBBM_PERFCTR_SP_14_HI, 97, A6XX_SP_PERFCTR_SP_SEL_14 },
1863 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_15_LO,
1864 A6XX_RBBM_PERFCTR_SP_15_HI, 98, A6XX_SP_PERFCTR_SP_SEL_15 },
1865 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_16_LO,
1866 A6XX_RBBM_PERFCTR_SP_16_HI, 99, A6XX_SP_PERFCTR_SP_SEL_16 },
1867 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_17_LO,
1868 A6XX_RBBM_PERFCTR_SP_17_HI, 100, A6XX_SP_PERFCTR_SP_SEL_17 },
1869 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_18_LO,
1870 A6XX_RBBM_PERFCTR_SP_18_HI, 101, A6XX_SP_PERFCTR_SP_SEL_18 },
1871 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_19_LO,
1872 A6XX_RBBM_PERFCTR_SP_19_HI, 102, A6XX_SP_PERFCTR_SP_SEL_19 },
1873 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_20_LO,
1874 A6XX_RBBM_PERFCTR_SP_20_HI, 103, A6XX_SP_PERFCTR_SP_SEL_20 },
1875 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_21_LO,
1876 A6XX_RBBM_PERFCTR_SP_21_HI, 104, A6XX_SP_PERFCTR_SP_SEL_21 },
1877 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_22_LO,
1878 A6XX_RBBM_PERFCTR_SP_22_HI, 105, A6XX_SP_PERFCTR_SP_SEL_22 },
1879 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_23_LO,
1880 A6XX_RBBM_PERFCTR_SP_23_HI, 106, A6XX_SP_PERFCTR_SP_SEL_23 },
1881};
1882
1883static struct adreno_perfcount_register a6xx_perfcounters_rb[] = {
1884 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_0_LO,
1885 A6XX_RBBM_PERFCTR_RB_0_HI, 107, A6XX_RB_PERFCTR_RB_SEL_0 },
1886 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_1_LO,
1887 A6XX_RBBM_PERFCTR_RB_1_HI, 108, A6XX_RB_PERFCTR_RB_SEL_1 },
1888 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_2_LO,
1889 A6XX_RBBM_PERFCTR_RB_2_HI, 109, A6XX_RB_PERFCTR_RB_SEL_2 },
1890 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_3_LO,
1891 A6XX_RBBM_PERFCTR_RB_3_HI, 110, A6XX_RB_PERFCTR_RB_SEL_3 },
1892 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_4_LO,
1893 A6XX_RBBM_PERFCTR_RB_4_HI, 111, A6XX_RB_PERFCTR_RB_SEL_4 },
1894 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_5_LO,
1895 A6XX_RBBM_PERFCTR_RB_5_HI, 112, A6XX_RB_PERFCTR_RB_SEL_5 },
1896 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_6_LO,
1897 A6XX_RBBM_PERFCTR_RB_6_HI, 113, A6XX_RB_PERFCTR_RB_SEL_6 },
1898 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_7_LO,
1899 A6XX_RBBM_PERFCTR_RB_7_HI, 114, A6XX_RB_PERFCTR_RB_SEL_7 },
1900};
1901
1902static struct adreno_perfcount_register a6xx_perfcounters_vsc[] = {
1903 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_0_LO,
1904 A6XX_RBBM_PERFCTR_VSC_0_HI, 115, A6XX_VSC_PERFCTR_VSC_SEL_0 },
1905 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_1_LO,
1906 A6XX_RBBM_PERFCTR_VSC_1_HI, 116, A6XX_VSC_PERFCTR_VSC_SEL_1 },
1907};
1908
1909static struct adreno_perfcount_register a6xx_perfcounters_lrz[] = {
1910 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_0_LO,
1911 A6XX_RBBM_PERFCTR_LRZ_0_HI, 117, A6XX_GRAS_PERFCTR_LRZ_SEL_0 },
1912 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_1_LO,
1913 A6XX_RBBM_PERFCTR_LRZ_1_HI, 118, A6XX_GRAS_PERFCTR_LRZ_SEL_1 },
1914 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_2_LO,
1915 A6XX_RBBM_PERFCTR_LRZ_2_HI, 119, A6XX_GRAS_PERFCTR_LRZ_SEL_2 },
1916 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_3_LO,
1917 A6XX_RBBM_PERFCTR_LRZ_3_HI, 120, A6XX_GRAS_PERFCTR_LRZ_SEL_3 },
1918};
1919
1920static struct adreno_perfcount_register a6xx_perfcounters_cmp[] = {
1921 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_0_LO,
1922 A6XX_RBBM_PERFCTR_CMP_0_HI, 121, A6XX_RB_PERFCTR_CMP_SEL_0 },
1923 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_1_LO,
1924 A6XX_RBBM_PERFCTR_CMP_1_HI, 122, A6XX_RB_PERFCTR_CMP_SEL_1 },
1925 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_2_LO,
1926 A6XX_RBBM_PERFCTR_CMP_2_HI, 123, A6XX_RB_PERFCTR_CMP_SEL_2 },
1927 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_3_LO,
1928 A6XX_RBBM_PERFCTR_CMP_3_HI, 124, A6XX_RB_PERFCTR_CMP_SEL_3 },
1929};
1930
1931static struct adreno_perfcount_register a6xx_perfcounters_vbif[] = {
1932 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW0,
1933 A6XX_VBIF_PERF_CNT_HIGH0, -1, A6XX_VBIF_PERF_CNT_SEL0 },
1934 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW1,
1935 A6XX_VBIF_PERF_CNT_HIGH1, -1, A6XX_VBIF_PERF_CNT_SEL1 },
1936 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW2,
1937 A6XX_VBIF_PERF_CNT_HIGH2, -1, A6XX_VBIF_PERF_CNT_SEL2 },
1938 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW3,
1939 A6XX_VBIF_PERF_CNT_HIGH3, -1, A6XX_VBIF_PERF_CNT_SEL3 },
1940};
1941
1942static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
1943 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW0,
1944 A6XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A6XX_VBIF_PERF_PWR_CNT_EN0 },
1945 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW1,
1946 A6XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A6XX_VBIF_PERF_PWR_CNT_EN1 },
1947 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW2,
1948 A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
1949};
1950
1951static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
1952 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
1953 A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
1954};
1955
1956#define A6XX_PERFCOUNTER_GROUP(offset, name) \
1957 ADRENO_PERFCOUNTER_GROUP(a6xx, offset, name)
1958
1959#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
1960 ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
1961
1962static struct adreno_perfcount_group a6xx_perfcounter_groups
1963 [KGSL_PERFCOUNTER_GROUP_MAX] = {
1964 A6XX_PERFCOUNTER_GROUP(CP, cp),
1965 A6XX_PERFCOUNTER_GROUP(RBBM, rbbm),
1966 A6XX_PERFCOUNTER_GROUP(PC, pc),
1967 A6XX_PERFCOUNTER_GROUP(VFD, vfd),
1968 A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
1969 A6XX_PERFCOUNTER_GROUP(VPC, vpc),
1970 A6XX_PERFCOUNTER_GROUP(CCU, ccu),
1971 A6XX_PERFCOUNTER_GROUP(CMP, cmp),
1972 A6XX_PERFCOUNTER_GROUP(TSE, tse),
1973 A6XX_PERFCOUNTER_GROUP(RAS, ras),
1974 A6XX_PERFCOUNTER_GROUP(LRZ, lrz),
1975 A6XX_PERFCOUNTER_GROUP(UCHE, uche),
1976 A6XX_PERFCOUNTER_GROUP(TP, tp),
1977 A6XX_PERFCOUNTER_GROUP(SP, sp),
1978 A6XX_PERFCOUNTER_GROUP(RB, rb),
1979 A6XX_PERFCOUNTER_GROUP(VSC, vsc),
1980 A6XX_PERFCOUNTER_GROUP(VBIF, vbif),
1981 A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
1982 ADRENO_PERFCOUNTER_GROUP_FIXED),
1983 A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
1984 ADRENO_PERFCOUNTER_GROUP_FIXED),
1985};
1986
1987static struct adreno_perfcounters a6xx_perfcounters = {
1988 a6xx_perfcounter_groups,
1989 ARRAY_SIZE(a6xx_perfcounter_groups),
1990};
1991
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001992/* Register offset defines for A6XX, in order of enum adreno_regs */
1993static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
1994
1995 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A6XX_CP_RB_BASE),
Shrenuj Bansal41665402016-12-16 15:25:54 -08001996 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A6XX_CP_RB_BASE_HI),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001997 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
1998 A6XX_CP_RB_RPTR_ADDR_LO),
1999 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
2000 A6XX_CP_RB_RPTR_ADDR_HI),
2001 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A6XX_CP_RB_RPTR),
2002 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A6XX_CP_RB_WPTR),
2003 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A6XX_CP_RB_CNTL),
Shrenuj Bansal41665402016-12-16 15:25:54 -08002004 ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A6XX_CP_SQE_CNTL),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002005 ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A6XX_CP_MISC_CNTL),
Carter Cooper8567af02017-03-15 14:22:03 -06002006 ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A6XX_CP_HW_FAULT),
Shrenuj Bansal41665402016-12-16 15:25:54 -08002007 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A6XX_CP_IB1_BASE),
2008 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, A6XX_CP_IB1_BASE_HI),
2009 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A6XX_CP_IB1_REM_SIZE),
2010 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A6XX_CP_IB2_BASE),
2011 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, A6XX_CP_IB2_BASE_HI),
2012 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A6XX_CP_IB2_REM_SIZE),
2013 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A6XX_CP_ROQ_DBG_ADDR),
2014 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A6XX_CP_ROQ_DBG_DATA),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002015 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
2016 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
Lynus Vaz107d2892017-03-01 13:48:06 +05302017 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
2018 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
2019 A6XX_RBBM_PERFCTR_LOAD_CMD0),
2020 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
2021 A6XX_RBBM_PERFCTR_LOAD_CMD1),
2022 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
2023 A6XX_RBBM_PERFCTR_LOAD_CMD2),
2024 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
2025 A6XX_RBBM_PERFCTR_LOAD_CMD3),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002026
2027 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A6XX_RBBM_INT_0_MASK),
2028 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A6XX_RBBM_INT_0_STATUS),
2029 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A6XX_RBBM_CLOCK_CNTL),
2030 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
2031 A6XX_RBBM_INT_CLEAR_CMD),
2032 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A6XX_RBBM_SW_RESET_CMD),
2033 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD,
2034 A6XX_RBBM_BLOCK_SW_RESET_CMD),
2035 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
2036 A6XX_RBBM_BLOCK_SW_RESET_CMD2),
Lynus Vaz107d2892017-03-01 13:48:06 +05302037 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
2038 A6XX_RBBM_PERFCTR_LOAD_VALUE_LO),
2039 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
2040 A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002041 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
2042 A6XX_CP_ALWAYS_ON_COUNTER_LO),
2043 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
2044 A6XX_CP_ALWAYS_ON_COUNTER_HI),
2045 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION, A6XX_VBIF_VERSION),
Kyle Pieferb1027b02017-02-10 13:58:58 -08002046 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
2047 A6XX_GMU_ALWAYS_ON_COUNTER_L),
2048 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
2049 A6XX_GMU_ALWAYS_ON_COUNTER_H),
2050 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
2051 A6XX_GMU_AO_INTERRUPT_EN),
2052 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST_INTERRUPT_CLR,
2053 A6XX_GMU_HOST_INTERRUPT_CLR),
2054 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST_INTERRUPT_STATUS,
2055 A6XX_GMU_HOST_INTERRUPT_STATUS),
2056 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST_INTERRUPT_MASK,
2057 A6XX_GMU_HOST_INTERRUPT_MASK),
2058 ADRENO_REG_DEFINE(ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
2059 A6XX_GMU_GMU_PWR_COL_KEEPALIVE),
2060 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AHB_FENCE_STATUS,
2061 A6XX_GMU_AHB_FENCE_STATUS),
2062 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_CTRL_STATUS,
2063 A6XX_GMU_HFI_CTRL_STATUS),
2064 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_VERSION_INFO,
2065 A6XX_GMU_HFI_VERSION_INFO),
2066 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_SFR_ADDR,
2067 A6XX_GMU_HFI_SFR_ADDR),
2068 ADRENO_REG_DEFINE(ADRENO_REG_GMU_RPMH_POWER_STATE,
2069 A6XX_GMU_RPMH_POWER_STATE),
2070 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
2071 A6XX_GMU_GMU2HOST_INTR_CLR),
2072 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
2073 A6XX_GMU_GMU2HOST_INTR_INFO),
2074 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_SET,
2075 A6XX_GMU_HOST2GMU_INTR_SET),
2076 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
2077 A6XX_GMU_HOST2GMU_INTR_CLR),
2078 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
2079 A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
Carter Cooper4a313ae2017-02-23 11:11:56 -07002080
2081 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
2082 A6XX_RBBM_SECVID_TRUST_CNTL),
2083 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
2084 A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO),
2085 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
2086 A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
2087 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
2088 A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
2089 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
2090 A6XX_RBBM_SECVID_TSB_CNTL),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002091};
2092
2093static const struct adreno_reg_offsets a6xx_reg_offsets = {
2094 .offsets = a6xx_register_offsets,
2095 .offset_0 = ADRENO_REG_REGISTER_MAX,
2096};
2097
2098struct adreno_gpudev adreno_a6xx_gpudev = {
2099 .reg_offsets = &a6xx_reg_offsets,
2100 .start = a6xx_start,
Shrenuj Bansal41665402016-12-16 15:25:54 -08002101 .snapshot = a6xx_snapshot,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002102 .irq = &a6xx_irq,
Shrenuj Bansal41665402016-12-16 15:25:54 -08002103 .snapshot_data = &a6xx_snapshot_data,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002104 .irq_trace = trace_kgsl_a5xx_irq_status,
2105 .num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
2106 .platform_setup = a6xx_platform_setup,
Shrenuj Bansal41665402016-12-16 15:25:54 -08002107 .init = a6xx_init,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002108 .rb_start = a6xx_rb_start,
2109 .regulator_enable = a6xx_sptprac_enable,
2110 .regulator_disable = a6xx_sptprac_disable,
Lynus Vaz107d2892017-03-01 13:48:06 +05302111 .perfcounters = &a6xx_perfcounters,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002112 .microcode_read = a6xx_microcode_read,
2113 .enable_64bit = a6xx_enable_64bit,
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002114 .llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07002115 .llc_configure_gpuhtw_scid = a6xx_llc_configure_gpuhtw_scid,
Kyle Piefer11a48b62017-03-17 14:53:40 -07002116 .llc_enable_overrides = a6xx_llc_enable_overrides,
Kyle Pieferb1027b02017-02-10 13:58:58 -08002117 .oob_set = a6xx_oob_set,
2118 .oob_clear = a6xx_oob_clear,
2119 .rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
Oleg Perelet62d5cec2017-03-27 16:14:52 -07002120 .hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
Lynus Vaz1fde74d2017-03-20 18:02:47 +05302121 .wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
2122 .iommu_fault_block = a6xx_iommu_fault_block,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002123};