blob: 14ace333a3115802ea99ad0dfb1c58d304380ab1 [file] [log] [blame]
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
Carter Cooper4a313ae2017-02-23 11:11:56 -070014#include <soc/qcom/subsystem_restart.h>
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070015#include <linux/pm_opp.h>
16
17#include "adreno.h"
18#include "a6xx_reg.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080019#include "adreno_a6xx.h"
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070020#include "adreno_cp_parser.h"
21#include "adreno_trace.h"
22#include "adreno_pm4types.h"
23#include "adreno_perfcounter.h"
24#include "adreno_ringbuffer.h"
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060025#include "adreno_llc.h"
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070026#include "kgsl_sharedmem.h"
27#include "kgsl_log.h"
28#include "kgsl.h"
Kyle Pieferb1027b02017-02-10 13:58:58 -080029#include "kgsl_gmu.h"
30#include "kgsl_trace.h"
31
32#define OOB_REQUEST_TIMEOUT 10 /* ms */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070033
34#define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
35 (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
36
37#define MIN_HBB 13
38
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060039#define A6XX_LLC_NUM_GPU_SCIDS 5
40#define A6XX_GPU_LLC_SCID_NUM_BITS 5
41#define A6XX_GPU_LLC_SCID_MASK \
42 ((1 << (A6XX_LLC_NUM_GPU_SCIDS * A6XX_GPU_LLC_SCID_NUM_BITS)) - 1)
Sushmita Susheelendra906564d2017-01-10 15:53:55 -070043#define A6XX_GPUHTW_LLC_SCID_SHIFT 25
44#define A6XX_GPUHTW_LLC_SCID_MASK \
45 (((1 << A6XX_GPU_LLC_SCID_NUM_BITS) - 1) << A6XX_GPUHTW_LLC_SCID_SHIFT)
46
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060047#define A6XX_GPU_CX_REG_BASE 0x509E000
48#define A6XX_GPU_CX_REG_SIZE 0x1000
49
Kyle Pieferb1027b02017-02-10 13:58:58 -080050static int _load_gmu_firmware(struct kgsl_device *device);
51
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070052static const struct adreno_vbif_data a630_vbif[] = {
53 {A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009},
54 {A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
55 {0, 0},
56};
57
58static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
59 { adreno_is_a630, a630_vbif },
60};
61
Oleg Pereletcb9b6212017-03-16 15:38:43 -070062
63struct kgsl_hwcg_reg {
64 unsigned int off;
65 unsigned int val;
66};
67static const struct kgsl_hwcg_reg a630_hwcg_regs[] = {
68 {A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
69 {A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
70 {A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
71 {A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
72 {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
73 {A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
74 {A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
75 {A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
76 {A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF},
77 {A6XX_RBBM_CLOCK_DELAY_SP1, 0x0000F3CF},
78 {A6XX_RBBM_CLOCK_DELAY_SP2, 0x0000F3CF},
79 {A6XX_RBBM_CLOCK_DELAY_SP3, 0x0000F3CF},
80 {A6XX_RBBM_CLOCK_HYST_SP0, 0x00000080},
81 {A6XX_RBBM_CLOCK_HYST_SP1, 0x00000080},
82 {A6XX_RBBM_CLOCK_HYST_SP2, 0x00000080},
83 {A6XX_RBBM_CLOCK_HYST_SP3, 0x00000080},
84 {A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
85 {A6XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
86 {A6XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
87 {A6XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
88 {A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
89 {A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
90 {A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
91 {A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
92 {A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
93 {A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
94 {A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
95 {A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
96 {A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
97 {A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
98 {A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
99 {A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
100 {A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
101 {A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
102 {A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
103 {A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
104 {A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
105 {A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
106 {A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
107 {A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
108 {A6XX_RBBM_CLOCK_HYST3_TP0, 0x07777777},
109 {A6XX_RBBM_CLOCK_HYST3_TP1, 0x07777777},
110 {A6XX_RBBM_CLOCK_HYST3_TP2, 0x07777777},
111 {A6XX_RBBM_CLOCK_HYST3_TP3, 0x07777777},
112 {A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
113 {A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
114 {A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
115 {A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
116 {A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
117 {A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
118 {A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
119 {A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
120 {A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
121 {A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
122 {A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
123 {A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
124 {A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
125 {A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
126 {A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
127 {A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
128 {A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
129 {A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
130 {A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
131 {A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
132 {A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
133 {A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
134 {A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
135 {A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
136 {A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
137 {A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
138 {A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
139 {A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
140 {A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
141 {A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
142 {A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
143 {A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
144 {A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
145 {A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
146 {A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
147 {A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
148 {A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
149 {A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
150 {A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
151 {A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
152 {A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
153 {A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
154 {A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
155 {A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
156 {A6XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
157 {A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
158 {A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
159 {A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
160 {A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
161 {A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
162 {A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
163 {A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
164 {A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
165 {A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
166 {A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
167 {A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
168 {A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
169 {A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
170 {A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
171 {A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
172 {A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
173};
174
175static const struct {
176 int (*devfunc)(struct adreno_device *adreno_dev);
177 const struct kgsl_hwcg_reg *regs;
178 unsigned int count;
179} a6xx_hwcg_registers[] = {
180 {adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)}
181};
182
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700183static struct a6xx_protected_regs {
184 unsigned int base;
185 unsigned int count;
186 int read_protect;
187} a6xx_protected_regs_group[] = {
188 { 0x600, 0x51, 0 },
189 { 0xAE50, 0x2, 1 },
190 { 0x9624, 0x13, 1 },
191 { 0x8630, 0x8, 1 },
192 { 0x9E70, 0x1, 1 },
193 { 0x9E78, 0x187, 1 },
194 { 0xF000, 0x810, 1 },
195 { 0xFC00, 0x3, 0 },
196 { 0x50E, 0x0, 1 },
197 { 0x50F, 0x0, 0 },
198 { 0x510, 0x0, 1 },
199 { 0x0, 0x4F9, 0 },
200 { 0x501, 0xA, 0 },
201 { 0x511, 0x44, 0 },
202 { 0xE00, 0xE, 1 },
203 { 0x8E00, 0x0, 1 },
204 { 0x8E50, 0xF, 1 },
205 { 0xBE02, 0x0, 1 },
206 { 0xBE20, 0x11F3, 1 },
207 { 0x800, 0x82, 1 },
208 { 0x8A0, 0x8, 1 },
209 { 0x8AB, 0x19, 1 },
210 { 0x900, 0x4D, 1 },
211 { 0x98D, 0x76, 1 },
212 { 0x8D0, 0x23, 0 },
213 { 0x980, 0x4, 0 },
214 { 0xA630, 0x0, 1 },
215};
216
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700217static void a6xx_platform_setup(struct adreno_device *adreno_dev)
218{
219 uint64_t addr;
Carter Cooperafc85912017-03-20 09:39:18 -0600220 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700221
222 /* Calculate SP local and private mem addresses */
223 addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
224 adreno_dev->sp_local_gpuaddr = addr;
225 adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
Carter Cooperafc85912017-03-20 09:39:18 -0600226 gpudev->vbif_xin_halt_ctrl0_mask = A6XX_VBIF_XIN_HALT_CTRL0_MASK;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700227}
228
Carter Cooper6ce00422017-03-20 11:25:09 -0600229static void _update_always_on_regs(struct adreno_device *adreno_dev)
230{
231 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
232 unsigned int *const regs = gpudev->reg_offsets->offsets;
233
234 regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO] =
235 A6XX_CP_ALWAYS_ON_COUNTER_LO;
236 regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI] =
237 A6XX_CP_ALWAYS_ON_COUNTER_HI;
238}
239
Shrenuj Bansal41665402016-12-16 15:25:54 -0800240static void a6xx_init(struct adreno_device *adreno_dev)
241{
242 a6xx_crashdump_init(adreno_dev);
Carter Cooper6ce00422017-03-20 11:25:09 -0600243
244 /*
245 * If the GMU is not enabled, rewrite the offset for the always on
246 * counters to point to the CP always on instead of GMU always on
247 */
248 if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
249 _update_always_on_regs(adreno_dev);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800250}
251
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700252/**
253 * a6xx_protect_init() - Initializes register protection on a6xx
254 * @device: Pointer to the device structure
255 * Performs register writes to enable protected access to sensitive
256 * registers
257 */
258static void a6xx_protect_init(struct adreno_device *adreno_dev)
259{
260 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Tarun Karra9f945502017-03-23 12:28:03 -0700261 struct kgsl_protected_registers *mmu_prot =
262 kgsl_mmu_get_prot_regs(&device->mmu);
263 int i, num_sets;
264 int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
265 int max_sets = adreno_dev->gpucore->num_protected_regs;
266 unsigned int mmu_base = 0, mmu_range = 0, cur_range;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700267
268 /* enable access protection to privileged registers */
Harshdeep Dhatt9fc043e2017-04-21 12:06:22 -0600269 kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700270
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530271 if (mmu_prot) {
272 mmu_base = mmu_prot->base;
273 mmu_range = 1 << mmu_prot->range;
Tarun Karra9f945502017-03-23 12:28:03 -0700274 req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530275 }
276
Tarun Karra9f945502017-03-23 12:28:03 -0700277 if (req_sets > max_sets)
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530278 WARN(1, "Size exceeds the num of protection regs available\n");
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530279
Tarun Karra9f945502017-03-23 12:28:03 -0700280 /* Protect GPU registers */
281 num_sets = min_t(unsigned int,
282 ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
283 for (i = 0; i < num_sets; i++) {
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700284 struct a6xx_protected_regs *regs =
285 &a6xx_protected_regs_group[i];
286
287 kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
288 regs->base | (regs->count << 18) |
289 (regs->read_protect << 31));
290 }
291
Tarun Karra9f945502017-03-23 12:28:03 -0700292 /* Protect MMU registers */
293 if (mmu_prot) {
294 while ((i < max_sets) && (mmu_range > 0)) {
295 cur_range = min_t(unsigned int, mmu_range,
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530296 0x2000);
Tarun Karra9f945502017-03-23 12:28:03 -0700297 kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
298 mmu_base | ((cur_range - 1) << 18) | (1 << 31));
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530299
Tarun Karra9f945502017-03-23 12:28:03 -0700300 mmu_base += cur_range;
301 mmu_range -= cur_range;
302 i++;
303 }
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530304 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700305}
306
307static void a6xx_enable_64bit(struct adreno_device *adreno_dev)
308{
309 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
310
311 kgsl_regwrite(device, A6XX_CP_ADDR_MODE_CNTL, 0x1);
312 kgsl_regwrite(device, A6XX_VSC_ADDR_MODE_CNTL, 0x1);
313 kgsl_regwrite(device, A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
314 kgsl_regwrite(device, A6XX_RB_ADDR_MODE_CNTL, 0x1);
315 kgsl_regwrite(device, A6XX_PC_ADDR_MODE_CNTL, 0x1);
316 kgsl_regwrite(device, A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
317 kgsl_regwrite(device, A6XX_VFD_ADDR_MODE_CNTL, 0x1);
318 kgsl_regwrite(device, A6XX_VPC_ADDR_MODE_CNTL, 0x1);
319 kgsl_regwrite(device, A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
320 kgsl_regwrite(device, A6XX_SP_ADDR_MODE_CNTL, 0x1);
321 kgsl_regwrite(device, A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
322 kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
323}
324
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700325
326static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
327{
328 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
329 const struct kgsl_hwcg_reg *regs;
330 int i, j;
331
332 if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
333 return;
334
335 for (i = 0; i < ARRAY_SIZE(a6xx_hwcg_registers); i++) {
336 if (a6xx_hwcg_registers[i].devfunc(adreno_dev))
337 break;
338 }
339
340 if (i == ARRAY_SIZE(a6xx_hwcg_registers))
341 return;
342
343 regs = a6xx_hwcg_registers[i].regs;
344
345 /* Disable SP clock before programming HWCG registers */
346 kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 0);
347
348 for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
349 kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
350
351 if (kgsl_gmu_isenabled(device)) {
352 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
353 0x00020222);
354 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
355 0x00010111);
356 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
357 0x00050555);
358 }
359 /* Enable SP clock */
360 kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
361
362 /* enable top level HWCG */
363 kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL, on ? 0x8AA8AA02 : 0);
364 kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
365}
366
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700367/*
368 * a6xx_start() - Device start
369 * @adreno_dev: Pointer to adreno device
370 *
371 * a6xx device start
372 */
373static void a6xx_start(struct adreno_device *adreno_dev)
374{
375 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700376 unsigned int bit, mal, mode, glbl_inv;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700377 unsigned int amsbc = 0;
378
Oleg Perelet62d5cec2017-03-27 16:14:52 -0700379 /* runtime adjust callbacks based on feature sets */
380 if (!kgsl_gmu_isenabled(device))
381 /* Legacy idle management if gmu is disabled */
382 ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700383 /* enable hardware clockgating */
384 a6xx_hwcg_set(adreno_dev, true);
Oleg Perelet62d5cec2017-03-27 16:14:52 -0700385
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700386 adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
387 ARRAY_SIZE(a6xx_vbif_platforms));
388 /*
389 * Set UCHE_WRITE_THRU_BASE to the UCHE_TRAP_BASE effectively
390 * disabling L2 bypass
391 */
392 kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
393 kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
394 kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
395 kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
396 kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
397 kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
398
399 /* Program the GMEM VA range for the UCHE path */
400 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
401 ADRENO_UCHE_GMEM_BASE);
402 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
403 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
404 ADRENO_UCHE_GMEM_BASE +
405 adreno_dev->gmem_size - 1);
406 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
407
408 kgsl_regwrite(device, A6XX_UCHE_FILTER_CNTL, 0x804);
409 kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4);
410
411 kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0);
412 kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
413
414 /* Setting the mem pool size */
415 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 128);
416
417 /* Setting the primFifo thresholds default values */
418 kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
419
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700420 /* Set the AHB default slave response to "ERROR" */
421 kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);
422
Harshdeep Dhatt859f3d62017-04-28 17:54:33 -0600423 /* Turn on performance counters */
424 kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);
425
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700426 if (of_property_read_u32(device->pdev->dev.of_node,
427 "qcom,highest-bank-bit", &bit))
428 bit = MIN_HBB;
429
430 if (of_property_read_u32(device->pdev->dev.of_node,
431 "qcom,min-access-length", &mal))
432 mal = 32;
433
434 if (of_property_read_u32(device->pdev->dev.of_node,
435 "qcom,ubwc-mode", &mode))
436 mode = 0;
437
438 switch (mode) {
439 case KGSL_UBWC_1_0:
440 mode = 1;
441 break;
442 case KGSL_UBWC_2_0:
443 mode = 0;
444 break;
445 case KGSL_UBWC_3_0:
446 mode = 0;
447 amsbc = 1; /* Only valid for A640 and A680 */
448 break;
449 default:
450 break;
451 }
452
453 if (bit >= 13 && bit <= 16)
454 bit = (bit - 13) & 0x03;
455 else
456 bit = 0;
457
458 mal = (mal == 64) ? 1 : 0;
459
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700460 /* (1 << 29)globalInvFlushFilterDis bit needs to be set for A630 V1 */
461 glbl_inv = (adreno_is_a630v1(adreno_dev)) ? 1 : 0;
462
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700463 kgsl_regwrite(device, A6XX_RB_NC_MODE_CNTL, (amsbc << 4) | (mal << 3) |
464 (bit << 1) | mode);
465 kgsl_regwrite(device, A6XX_TPL1_NC_MODE_CNTL, (mal << 3) |
466 (bit << 1) | mode);
467 kgsl_regwrite(device, A6XX_SP_NC_MODE_CNTL, (mal << 3) | (bit << 1) |
468 mode);
469
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700470 kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (glbl_inv << 29) |
471 (mal << 23) | (bit << 21));
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700472
Shrenuj Bansal90bc410842017-04-28 15:05:43 -0700473 /* Set hang detection threshold to 4 million cycles (0x3FFFF*16) */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700474 kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
Shrenuj Bansal90bc410842017-04-28 15:05:43 -0700475 (1 << 30) | 0x3ffff);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700476
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530477 kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
478
Lynus Vaz85c8cee2017-03-07 11:31:02 +0530479 /* Set TWOPASSUSEWFI in A6XX_PC_DBG_ECO_CNTL if requested */
480 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
481 kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
482
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700483 a6xx_protect_init(adreno_dev);
484}
485
486/*
487 * a6xx_microcode_load() - Load microcode
488 * @adreno_dev: Pointer to adreno device
489 */
490static int a6xx_microcode_load(struct adreno_device *adreno_dev)
491{
492 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
493 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
494 uint64_t gpuaddr;
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -0600495 void *zap;
Carter Cooper4a313ae2017-02-23 11:11:56 -0700496 int ret = 0;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700497
498 gpuaddr = fw->memdesc.gpuaddr;
499 kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
500 lower_32_bits(gpuaddr));
501 kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
502 upper_32_bits(gpuaddr));
503
Carter Cooper4a313ae2017-02-23 11:11:56 -0700504 /* Load the zap shader firmware through PIL if its available */
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -0600505 if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
Carter Cooper4a313ae2017-02-23 11:11:56 -0700506 zap = subsystem_get(adreno_dev->gpucore->zap_name);
507
508 /* Return error if the zap shader cannot be loaded */
509 if (IS_ERR_OR_NULL(zap)) {
510 ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
511 zap = NULL;
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -0600512 } else
513 adreno_dev->zap_loaded = 1;
Carter Cooper4a313ae2017-02-23 11:11:56 -0700514 }
515
516 return ret;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700517}
518
519
520/*
521 * CP_INIT_MAX_CONTEXT bit tells if the multiple hardware contexts can
522 * be used at once of if they should be serialized
523 */
524#define CP_INIT_MAX_CONTEXT BIT(0)
525
526/* Enables register protection mode */
527#define CP_INIT_ERROR_DETECTION_CONTROL BIT(1)
528
529/* Header dump information */
530#define CP_INIT_HEADER_DUMP BIT(2) /* Reserved */
531
532/* Default Reset states enabled for PFP and ME */
533#define CP_INIT_DEFAULT_RESET_STATE BIT(3)
534
535/* Drawcall filter range */
536#define CP_INIT_DRAWCALL_FILTER_RANGE BIT(4)
537
538/* Ucode workaround masks */
539#define CP_INIT_UCODE_WORKAROUND_MASK BIT(5)
540
541#define CP_INIT_MASK (CP_INIT_MAX_CONTEXT | \
542 CP_INIT_ERROR_DETECTION_CONTROL | \
543 CP_INIT_HEADER_DUMP | \
544 CP_INIT_DEFAULT_RESET_STATE | \
545 CP_INIT_UCODE_WORKAROUND_MASK)
546
547static void _set_ordinals(struct adreno_device *adreno_dev,
548 unsigned int *cmds, unsigned int count)
549{
550 unsigned int *start = cmds;
551
552 /* Enabled ordinal mask */
553 *cmds++ = CP_INIT_MASK;
554
555 if (CP_INIT_MASK & CP_INIT_MAX_CONTEXT)
556 *cmds++ = 0x00000003;
557
558 if (CP_INIT_MASK & CP_INIT_ERROR_DETECTION_CONTROL)
559 *cmds++ = 0x20000000;
560
561 if (CP_INIT_MASK & CP_INIT_HEADER_DUMP) {
562 /* Header dump address */
563 *cmds++ = 0x00000000;
564 /* Header dump enable and dump size */
565 *cmds++ = 0x00000000;
566 }
567
568 if (CP_INIT_MASK & CP_INIT_DRAWCALL_FILTER_RANGE) {
569 /* Start range */
570 *cmds++ = 0x00000000;
571 /* End range (inclusive) */
572 *cmds++ = 0x00000000;
573 }
574
575 if (CP_INIT_MASK & CP_INIT_UCODE_WORKAROUND_MASK)
576 *cmds++ = 0x00000000;
577
578 /* Pad rest of the cmds with 0's */
579 while ((unsigned int)(cmds - start) < count)
580 *cmds++ = 0x0;
581}
582
583/*
584 * a6xx_send_cp_init() - Initialize ringbuffer
585 * @adreno_dev: Pointer to adreno device
586 * @rb: Pointer to the ringbuffer of device
587 *
588 * Submit commands for ME initialization,
589 */
590static int a6xx_send_cp_init(struct adreno_device *adreno_dev,
591 struct adreno_ringbuffer *rb)
592{
593 unsigned int *cmds;
594 int ret;
595
596 cmds = adreno_ringbuffer_allocspace(rb, 9);
597 if (IS_ERR(cmds))
598 return PTR_ERR(cmds);
599
600 *cmds++ = cp_type7_packet(CP_ME_INIT, 8);
601
602 _set_ordinals(adreno_dev, cmds, 8);
603
604 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
605 if (ret)
Carter Cooper8567af02017-03-15 14:22:03 -0600606 adreno_spin_idle_debug(adreno_dev,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700607 "CP initialization failed to idle\n");
608
609 return ret;
610}
611
612/*
613 * a6xx_rb_start() - Start the ringbuffer
614 * @adreno_dev: Pointer to adreno device
615 * @start_type: Warm or cold start
616 */
617static int a6xx_rb_start(struct adreno_device *adreno_dev,
618 unsigned int start_type)
619{
620 struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
621 struct kgsl_device *device = &adreno_dev->dev;
622 uint64_t addr;
623 int ret;
624
625 addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
626
627 adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
628 ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
629
630 /*
631 * The size of the ringbuffer in the hardware is the log2
632 * representation of the size in quadwords (sizedwords / 2).
633 */
634 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
635 A6XX_CP_RB_CNTL_DEFAULT);
636
637 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
638 rb->buffer_desc.gpuaddr);
639
640 ret = a6xx_microcode_load(adreno_dev);
641 if (ret)
642 return ret;
643
644 /* Clear the SQE_HALT to start the CP engine */
645 kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
646
Carter Cooper4a313ae2017-02-23 11:11:56 -0700647 ret = a6xx_send_cp_init(adreno_dev, rb);
648 if (ret)
649 return ret;
650
651 /* GPU comes up in secured mode, make it unsecured by default */
652 return adreno_set_unsecured_mode(adreno_dev, rb);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700653}
654
655static int _load_firmware(struct kgsl_device *device, const char *fwfile,
656 struct adreno_firmware *firmware)
657{
658 const struct firmware *fw = NULL;
659 int ret;
660
661 ret = request_firmware(&fw, fwfile, device->dev);
662
663 if (ret) {
664 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
665 fwfile, ret);
666 return ret;
667 }
668
669 ret = kgsl_allocate_global(device, &firmware->memdesc, fw->size - 4,
670 KGSL_MEMFLAGS_GPUREADONLY, 0, "ucode");
671
672 if (!ret) {
673 memcpy(firmware->memdesc.hostptr, &fw->data[4], fw->size - 4);
674 firmware->size = (fw->size - 4) / sizeof(uint32_t);
675 firmware->version = *(unsigned int *)&fw->data[4];
676 }
677
678 release_firmware(fw);
679
Kyle Pieferb1027b02017-02-10 13:58:58 -0800680 ret = _load_gmu_firmware(device);
681
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700682 return ret;
683}
684
Kyle Pieferb1027b02017-02-10 13:58:58 -0800685#define RSC_CMD_OFFSET 2
686#define PDC_CMD_OFFSET 4
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700687
Kyle Pieferb1027b02017-02-10 13:58:58 -0800688static void _regwrite(void __iomem *regbase,
689 unsigned int offsetwords, unsigned int value)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700690{
Kyle Pieferb1027b02017-02-10 13:58:58 -0800691 void __iomem *reg;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700692
Kyle Pieferb1027b02017-02-10 13:58:58 -0800693 reg = regbase + (offsetwords << 2);
694 __raw_writel(value, reg);
695}
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700696
Kyle Pieferb1027b02017-02-10 13:58:58 -0800697/*
698 * _load_gmu_rpmh_ucode() - Load the ucode into the GPU PDC/RSC blocks
699 * PDC and RSC execute GPU power on/off RPMh sequence
700 * @device: Pointer to KGSL device
701 */
702static void _load_gmu_rpmh_ucode(struct kgsl_device *device)
703{
704 struct gmu_device *gmu = &device->gmu;
705
706 /* Setup RSC PDC handshake for sleep and wakeup */
707 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
708 kgsl_gmu_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
709 kgsl_gmu_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
710 kgsl_gmu_regwrite(device,
711 A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET, 0);
712 kgsl_gmu_regwrite(device,
713 A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET, 0);
714 kgsl_gmu_regwrite(device,
715 A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET * 2,
716 0x80000000);
717 kgsl_gmu_regwrite(device,
718 A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET * 2,
719 0);
720 kgsl_gmu_regwrite(device, A6XX_RSCC_OVERRIDE_START_ADDR, 0);
721 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
722 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
723 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
724
725 /* Enable timestamp event */
726 kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
727
728 /* Load RSC sequencer uCode for sleep and wakeup */
729 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0, 0xA7A506A0);
730 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xA1E6A6E7);
731 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xA2E081E1);
732 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xE9A982E2);
733 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020E8A8);
734
735 /* Load PDC sequencer uCode for power up and power down sequence */
736 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0, 0xFFBFA1E1);
737 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 1, 0xE0A4A3A2);
738 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 2, 0xE2848382);
739 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 3, 0xFDBDE4E3);
740 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 4, 0x00002081);
741
742 /* Set TCS commands used by PDC sequence for low power modes */
743 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD_ENABLE_BANK, 7);
744 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK, 0);
745 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CONTROL, 0);
746 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD0_MSGID, 0x10108);
747 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD0_ADDR, 0x30010);
748 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD0_DATA, 1);
749 _regwrite(gmu->pdc_reg_virt,
750 PDC_GPU_TCS0_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
751 _regwrite(gmu->pdc_reg_virt,
752 PDC_GPU_TCS0_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
753 _regwrite(gmu->pdc_reg_virt,
754 PDC_GPU_TCS0_CMD0_DATA + PDC_CMD_OFFSET, 0x0);
755 _regwrite(gmu->pdc_reg_virt,
756 PDC_GPU_TCS0_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
757 _regwrite(gmu->pdc_reg_virt,
758 PDC_GPU_TCS0_CMD0_ADDR + PDC_CMD_OFFSET * 2, 0x30080);
759 _regwrite(gmu->pdc_reg_virt,
760 PDC_GPU_TCS0_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x0);
761 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
762 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
763 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CONTROL, 0);
764 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
765 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
766 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_DATA, 2);
767 _regwrite(gmu->pdc_reg_virt,
768 PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
769 _regwrite(gmu->pdc_reg_virt,
770 PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
771 _regwrite(gmu->pdc_reg_virt,
772 PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET, 0x3);
773 _regwrite(gmu->pdc_reg_virt,
774 PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
775 _regwrite(gmu->pdc_reg_virt,
776 PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET * 2, 0x30080);
777 _regwrite(gmu->pdc_reg_virt,
778 PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x3);
779
780 /* Setup GPU PDC */
781 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_START_ADDR, 0);
782 _regwrite(gmu->pdc_reg_virt, PDC_GPU_ENABLE_PDC, 0x80000001);
783
784 /* ensure no writes happen before the uCode is fully written */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700785 wmb();
Kyle Pieferb1027b02017-02-10 13:58:58 -0800786}
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700787
Kyle Pieferb1027b02017-02-10 13:58:58 -0800788#define GMU_START_TIMEOUT 10 /* ms */
789#define GPU_START_TIMEOUT 100 /* ms */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700790
Kyle Pieferb1027b02017-02-10 13:58:58 -0800791/*
792 * timed_poll_check() - polling *gmu* register at given offset until
793 * its value changed to match expected value. The function times
794 * out and returns after given duration if register is not updated
795 * as expected.
796 *
797 * @device: Pointer to KGSL device
798 * @offset: Register offset
799 * @expected_ret: expected register value that stops polling
800 * @timout: number of jiffies to abort the polling
801 * @mask: bitmask to filter register value to match expected_ret
802 */
803static int timed_poll_check(struct kgsl_device *device,
804 unsigned int offset, unsigned int expected_ret,
805 unsigned int timeout, unsigned int mask)
806{
807 unsigned long t;
808 unsigned int value;
809
810 t = jiffies + msecs_to_jiffies(timeout);
811
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700812 while (!time_after(jiffies, t)) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800813 kgsl_gmu_regread(device, offset, &value);
814 if ((value & mask) == expected_ret)
815 return 0;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700816 cpu_relax();
817 }
818
Kyle Pieferb1027b02017-02-10 13:58:58 -0800819 return -EINVAL;
820}
821
822/*
823 * a6xx_gmu_power_config() - Configure and enable GMU's low power mode
824 * setting based on ADRENO feature flags.
825 * @device: Pointer to KGSL device
826 */
827static void a6xx_gmu_power_config(struct kgsl_device *device)
828{
829 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
830 struct gmu_device *gmu = &device->gmu;
831
Kyle Pieferd3964162017-04-06 15:44:03 -0700832 /* Configure registers for idle setting. The setting is cumulative */
833 switch (gmu->idle_level) {
834 case GPU_HW_MIN_VOLT:
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700835 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
836 MIN_BW_ENABLE_MASK);
837 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_HYST_CTRL, 0,
838 MIN_BW_HYST);
Kyle Pieferd3964162017-04-06 15:44:03 -0700839 /* fall through */
840 case GPU_HW_NAP:
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700841 kgsl_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL, 0,
842 HW_NAP_ENABLE_MASK);
Kyle Pieferd3964162017-04-06 15:44:03 -0700843 /* fall through */
844 case GPU_HW_IFPC:
845 kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
846 0x000A0080);
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700847 kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
Kyle Pieferd3964162017-04-06 15:44:03 -0700848 IFPC_ENABLE_MASK);
849 /* fall through */
850 case GPU_HW_SPTP_PC:
851 kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
852 0x000A0080);
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700853 kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
Kyle Pieferd3964162017-04-06 15:44:03 -0700854 SPTP_ENABLE_MASK);
855 /* fall through */
856 default:
857 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800858 }
859
Kyle Piefer3a5ac092017-04-06 16:05:30 -0700860 /* ACD feature enablement */
861 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700862 kgsl_gmu_regrmw(device, A6XX_GMU_BOOT_KMD_LM_HANDSHAKE, 0,
863 BIT(10));
Kyle Piefer3a5ac092017-04-06 16:05:30 -0700864
Kyle Pieferb1027b02017-02-10 13:58:58 -0800865 /* Enable RPMh GPU client */
866 if (ADRENO_FEATURE(adreno_dev, ADRENO_RPMH))
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700867 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
868 RPMH_ENABLE_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800869
Kyle Pieferd3964162017-04-06 15:44:03 -0700870 /* Disable reference bandgap voltage */
Kyle Pieferb1027b02017-02-10 13:58:58 -0800871 kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
872}
873
874/*
875 * a6xx_gmu_start() - Start GMU and wait until FW boot up.
876 * @device: Pointer to KGSL device
877 */
878static int a6xx_gmu_start(struct kgsl_device *device)
879{
880 struct gmu_device *gmu = &device->gmu;
881
882 /* Write 1 first to make sure the GMU is reset */
883 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);
884
885 /* Make sure putting in reset doesn't happen after clearing */
886 wmb();
887
888 /* Bring GMU out of reset */
889 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0);
890 if (timed_poll_check(device,
891 A6XX_GMU_CM3_FW_INIT_RESULT,
892 0xBABEFACE,
893 GMU_START_TIMEOUT,
894 0xFFFFFFFF)) {
895 dev_err(&gmu->pdev->dev, "GMU doesn't boot\n");
896 return -ETIMEDOUT;
897 }
898
899 return 0;
900}
901
902/*
903 * a6xx_gmu_hfi_start() - Write registers and start HFI.
904 * @device: Pointer to KGSL device
905 */
906static int a6xx_gmu_hfi_start(struct kgsl_device *device)
907{
908 struct gmu_device *gmu = &device->gmu;
909
Kyle Piefere7b06b42017-04-06 13:53:01 -0700910 kgsl_gmu_regrmw(device, A6XX_GMU_GMU2HOST_INTR_MASK,
911 HFI_IRQ_MSGQ_MASK, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800912 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_CTRL_INIT, 1);
913
914 if (timed_poll_check(device,
915 A6XX_GMU_HFI_CTRL_STATUS,
916 BIT(0),
917 GMU_START_TIMEOUT,
918 BIT(0))) {
919 dev_err(&gmu->pdev->dev, "GMU HFI init failed\n");
920 return -ETIMEDOUT;
921 }
922
923 return 0;
924}
925
926/*
927 * a6xx_oob_set() - Set OOB interrupt to GMU.
928 * @adreno_dev: Pointer to adreno device
929 * @set_mask: set_mask is a bitmask that defines a set of OOB
930 * interrupts to trigger.
931 * @check_mask: check_mask is a bitmask that provides a set of
932 * OOB ACK bits. check_mask usually matches set_mask to
933 * ensure OOBs are handled.
934 * @clear_mask: After GMU handles a OOB interrupt, GMU driver
935 * clears the interrupt. clear_mask is a bitmask defines
936 * a set of OOB interrupts to clear.
937 */
938static int a6xx_oob_set(struct adreno_device *adreno_dev,
939 unsigned int set_mask, unsigned int check_mask,
940 unsigned int clear_mask)
941{
942 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
943 struct gmu_device *gmu = &device->gmu;
944 int ret = 0;
945
946 if (!kgsl_gmu_isenabled(device))
947 return -ENODEV;
948
949 kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, set_mask);
950
951 if (timed_poll_check(device,
952 A6XX_GMU_GMU2HOST_INTR_INFO,
953 check_mask,
954 GPU_START_TIMEOUT,
955 check_mask)) {
956 ret = -ETIMEDOUT;
957 dev_err(&gmu->pdev->dev, "OOB set timed out\n");
958 }
959
960 kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
961
962 trace_kgsl_gmu_oob_set(set_mask);
963 return ret;
964}
965
966/*
967 * a6xx_oob_clear() - Clear a previously set OOB request.
968 * @adreno_dev: Pointer to the adreno device that has the GMU
969 * @clear_mask: Bitmask that provides the OOB bits to clear
970 */
971static inline void a6xx_oob_clear(struct adreno_device *adreno_dev,
972 unsigned int clear_mask)
973{
974 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
975
976 if (!kgsl_gmu_isenabled(device))
977 return;
978
979 kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, clear_mask);
980 trace_kgsl_gmu_oob_clear(clear_mask);
981}
982
983#define SPTPRAC_POWERON_CTRL_MASK 0x00778000
984#define SPTPRAC_POWEROFF_CTRL_MASK 0x00778001
985#define SPTPRAC_POWEROFF_STATUS_MASK BIT(2)
986#define SPTPRAC_POWERON_STATUS_MASK BIT(3)
987#define SPTPRAC_CTRL_TIMEOUT 10 /* ms */
988
989/*
990 * a6xx_sptprac_enable() - Power on SPTPRAC
991 * @adreno_dev: Pointer to Adreno device
992 */
993static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
994{
995 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
996 struct gmu_device *gmu = &device->gmu;
997
Kyle Piefer51dc0142017-04-14 12:32:49 -0700998 if (!gmu->pdev)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800999 return -EINVAL;
1000
1001 kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
1002 SPTPRAC_POWERON_CTRL_MASK);
1003
1004 if (timed_poll_check(device,
1005 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
1006 SPTPRAC_POWERON_STATUS_MASK,
1007 SPTPRAC_CTRL_TIMEOUT,
1008 SPTPRAC_POWERON_STATUS_MASK)) {
1009 dev_err(&gmu->pdev->dev, "power on SPTPRAC fail\n");
1010 return -EINVAL;
1011 }
1012
1013 return 0;
1014}
1015
1016/*
1017 * a6xx_sptprac_disable() - Power of SPTPRAC
1018 * @adreno_dev: Pointer to Adreno device
1019 */
1020static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
1021{
1022 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1023 struct gmu_device *gmu = &device->gmu;
1024
Kyle Piefer51dc0142017-04-14 12:32:49 -07001025 if (!gmu->pdev)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001026 return;
1027
1028 kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
1029 SPTPRAC_POWEROFF_CTRL_MASK);
1030
1031 if (timed_poll_check(device,
1032 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
1033 SPTPRAC_POWEROFF_STATUS_MASK,
1034 SPTPRAC_CTRL_TIMEOUT,
1035 SPTPRAC_POWEROFF_STATUS_MASK))
1036 dev_err(&gmu->pdev->dev, "power off SPTPRAC fail\n");
1037}
1038
1039/*
1040 * a6xx_hm_enable() - Power on HM and turn on clock
1041 * @adreno_dev: Pointer to Adreno device
1042 */
1043static int a6xx_hm_enable(struct adreno_device *adreno_dev)
1044{
1045 int ret;
1046 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1047 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1048 struct gmu_device *gmu = &device->gmu;
1049
Kyle Piefera6f18bb2017-04-25 13:38:25 -07001050 if (regulator_is_enabled(gmu->gx_gdsc))
1051 return 0;
1052
1053 ret = regulator_enable(gmu->gx_gdsc);
1054 if (ret) {
1055 dev_err(&gmu->pdev->dev,
1056 "Failed to turn on GPU HM HS\n");
1057 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001058 }
1059
1060 ret = clk_set_rate(pwr->grp_clks[0],
1061 pwr->pwrlevels[pwr->default_pwrlevel].
1062 gpu_freq);
1063 if (ret)
1064 return ret;
1065
1066 return clk_prepare_enable(pwr->grp_clks[0]);
1067}
1068
1069/*
1070 * a6xx_hm_disable() - Turn off HM clock and power off
1071 * @adreno_dev: Pointer to Adreno device
1072 */
1073static int a6xx_hm_disable(struct adreno_device *adreno_dev)
1074{
1075 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1076 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1077 struct gmu_device *gmu = &device->gmu;
1078
Kyle Piefera6f18bb2017-04-25 13:38:25 -07001079 if (!regulator_is_enabled(gmu->gx_gdsc))
1080 return 0;
1081
Kyle Pieferb1027b02017-02-10 13:58:58 -08001082 clk_disable_unprepare(pwr->grp_clks[0]);
1083
1084 clk_set_rate(pwr->grp_clks[0],
1085 pwr->pwrlevels[pwr->num_pwrlevels - 1].
1086 gpu_freq);
1087
Kyle Pieferb1027b02017-02-10 13:58:58 -08001088 return regulator_disable(gmu->gx_gdsc);
1089}
1090
1091/*
Kyle Pieferba88adc2017-04-03 10:35:34 -07001092 * a6xx_hm_sptprac_enable() - Turn on HM and SPTPRAC
1093 * @device: Pointer to KGSL device
1094 */
1095static int a6xx_hm_sptprac_enable(struct kgsl_device *device)
1096{
1097 int ret = 0;
1098 struct gmu_device *gmu = &device->gmu;
1099
1100 /* If GMU does not control HM we must */
1101 if (gmu->idle_level < GPU_HW_IFPC) {
1102 ret = a6xx_hm_enable(ADRENO_DEVICE(device));
1103 if (ret) {
1104 dev_err(&gmu->pdev->dev, "Failed to power on GPU HM\n");
1105 return ret;
1106 }
1107 }
1108
1109 /* If GMU does not control SPTPRAC we must */
1110 if (gmu->idle_level < GPU_HW_SPTP_PC) {
1111 ret = a6xx_sptprac_enable(ADRENO_DEVICE(device));
1112 if (ret) {
1113 a6xx_hm_disable(ADRENO_DEVICE(device));
1114 return ret;
1115 }
1116 }
1117
1118 return ret;
1119}
1120
1121/*
1122 * a6xx_hm_sptprac_disable() - Turn off SPTPRAC and HM
1123 * @device: Pointer to KGSL device
1124 */
1125static int a6xx_hm_sptprac_disable(struct kgsl_device *device)
1126{
1127 int ret = 0;
1128 struct gmu_device *gmu = &device->gmu;
1129
1130 /* If GMU does not control SPTPRAC we must */
1131 if (gmu->idle_level < GPU_HW_SPTP_PC)
1132 a6xx_sptprac_disable(ADRENO_DEVICE(device));
1133
1134 /* If GMU does not control HM we must */
1135 if (gmu->idle_level < GPU_HW_IFPC) {
1136 ret = a6xx_hm_disable(ADRENO_DEVICE(device));
1137 if (ret)
1138 dev_err(&gmu->pdev->dev, "Failed to power off GPU HM\n");
1139 }
1140
1141 return ret;
1142}
1143
1144/*
1145 * a6xx_hm_sptprac_control() - Turn HM and SPTPRAC on or off
1146 * @device: Pointer to KGSL device
1147 * @on: True to turn on or false to turn off
1148 */
1149static int a6xx_hm_sptprac_control(struct kgsl_device *device, bool on)
1150{
1151 if (on)
1152 return a6xx_hm_sptprac_enable(device);
1153 else
1154 return a6xx_hm_sptprac_disable(device);
1155}
1156
1157/*
Kyle Pieferb1027b02017-02-10 13:58:58 -08001158 * a6xx_gfx_rail_on() - request GMU to power GPU at given OPP.
1159 * @device: Pointer to KGSL device
1160 *
1161 */
1162static int a6xx_gfx_rail_on(struct kgsl_device *device)
1163{
1164 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1165 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1166 struct gmu_device *gmu = &device->gmu;
1167 struct arc_vote_desc *default_opp;
1168 unsigned int perf_idx;
1169 int ret;
1170
1171 perf_idx = pwr->num_pwrlevels - pwr->default_pwrlevel - 1;
1172 default_opp = &gmu->rpmh_votes.gx_votes[perf_idx];
1173
1174 kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
1175 OOB_BOOT_OPTION);
1176 kgsl_gmu_regwrite(device, A6XX_GMU_GX_VOTE_IDX, default_opp->pri_idx);
1177 kgsl_gmu_regwrite(device, A6XX_GMU_MX_VOTE_IDX, default_opp->sec_idx);
1178
1179 ret = a6xx_oob_set(adreno_dev, OOB_BOOT_SLUMBER_SET_MASK,
1180 OOB_BOOT_SLUMBER_CHECK_MASK,
1181 OOB_BOOT_SLUMBER_CLEAR_MASK);
1182
1183 if (ret)
1184 dev_err(&gmu->pdev->dev, "OOB set after GMU booted timed out\n");
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001185
1186 return ret;
1187}
1188
Kyle Pieferb1027b02017-02-10 13:58:58 -08001189/*
1190 * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
1191 * @device: Pointer to KGSL device
1192 */
1193static int a6xx_notify_slumber(struct kgsl_device *device)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001194{
Kyle Pieferb1027b02017-02-10 13:58:58 -08001195 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1196 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1197 struct gmu_device *gmu = &device->gmu;
1198 int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
1199 int perf_idx = gmu->num_gpupwrlevels - pwr->default_pwrlevel - 1;
1200 int ret, state;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001201
Kyle Pieferb1027b02017-02-10 13:58:58 -08001202 if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
1203 ret = hfi_notify_slumber(gmu, perf_idx, bus_level);
1204 return ret;
1205 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001206
Kyle Pieferb1027b02017-02-10 13:58:58 -08001207 kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
1208 OOB_SLUMBER_OPTION);
1209 kgsl_gmu_regwrite(device, A6XX_GMU_GX_VOTE_IDX, bus_level);
1210 kgsl_gmu_regwrite(device, A6XX_GMU_MX_VOTE_IDX, perf_idx);
1211
1212 ret = a6xx_oob_set(adreno_dev, OOB_BOOT_SLUMBER_SET_MASK,
1213 OOB_BOOT_SLUMBER_CHECK_MASK,
1214 OOB_BOOT_SLUMBER_CLEAR_MASK);
1215 a6xx_oob_clear(adreno_dev, OOB_BOOT_SLUMBER_CLEAR_MASK);
1216
1217 if (ret)
1218 dev_err(&gmu->pdev->dev, "OOB set for slumber timed out\n");
1219 else {
1220 kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &state);
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001221 if (state != GPU_HW_SLUMBER) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001222 dev_err(&gmu->pdev->dev,
1223 "Failed to prepare for slumber\n");
1224 ret = -EINVAL;
1225 }
1226 }
1227
1228 return ret;
1229}
1230
1231static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
1232{
1233 struct gmu_device *gmu = &device->gmu;
1234 struct device *dev = &gmu->pdev->dev;
Kyle Pieferba88adc2017-04-03 10:35:34 -07001235 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001236
1237 if (device->state != KGSL_STATE_INIT &&
1238 device->state != KGSL_STATE_SUSPEND) {
1239 /* RSC wake sequence */
1240 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
1241
1242 /* Write request before polling */
1243 wmb();
1244
1245 if (timed_poll_check(device,
1246 A6XX_GMU_RSCC_CONTROL_ACK,
1247 BIT(1),
1248 GPU_START_TIMEOUT,
1249 BIT(1))) {
1250 dev_err(dev, "Failed to do GPU RSC power on\n");
1251 return -EINVAL;
1252 }
1253
1254 if (timed_poll_check(device,
1255 A6XX_RSCC_SEQ_BUSY_DRV0,
1256 0,
1257 GPU_START_TIMEOUT,
1258 0xFFFFFFFF))
1259 goto error_rsc;
1260
Kyle Piefer9e0ac3c2017-05-01 16:34:14 -07001261 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
1262
Kyle Pieferba88adc2017-04-03 10:35:34 -07001263 /* Turn on the HM and SPTP head switches */
1264 ret = a6xx_hm_sptprac_control(device, true);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001265 }
1266
Kyle Pieferba88adc2017-04-03 10:35:34 -07001267 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001268
1269error_rsc:
1270 dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
1271 return -EINVAL;
1272}
1273
1274static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
1275{
1276 struct gmu_device *gmu = &device->gmu;
Kyle Pieferba88adc2017-04-03 10:35:34 -07001277 int val, ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001278
Kyle Pieferba88adc2017-04-03 10:35:34 -07001279 /* Turn off the SPTP and HM head switches */
1280 ret = a6xx_hm_sptprac_control(device, false);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001281
1282 /* RSC sleep sequence */
George Shen076a1432017-04-20 14:04:02 -07001283 kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001284 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001285 wmb();
1286
Kyle Pieferb1027b02017-02-10 13:58:58 -08001287 if (timed_poll_check(device,
1288 A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0,
1289 BIT(0),
1290 GPU_START_TIMEOUT,
1291 BIT(0))) {
1292 dev_err(&gmu->pdev->dev, "GPU RSC power off fail\n");
1293 return -EINVAL;
1294 }
1295
1296 /* Read to clear the timestamp */
1297 kgsl_gmu_regread(device, A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0,
1298 &val);
1299 kgsl_gmu_regread(device, A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
1300 &val);
Kyle Piefer9e0ac3c2017-05-01 16:34:14 -07001301 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001302
1303 kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
1304
1305 /* FIXME: v2 has different procedure to trigger sequence */
1306
Kyle Pieferba88adc2017-04-03 10:35:34 -07001307 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001308}
1309
1310/*
1311 * a6xx_gmu_fw_start() - set up GMU and start FW
1312 * @device: Pointer to KGSL device
1313 * @boot_state: State of the GMU being started
1314 */
1315static int a6xx_gmu_fw_start(struct kgsl_device *device,
1316 unsigned int boot_state)
1317{
1318 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1319 struct gmu_device *gmu = &device->gmu;
1320 struct gmu_memdesc *mem_addr = gmu->hfi_mem;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001321 int ret, i;
1322
Kyle Pieferb1027b02017-02-10 13:58:58 -08001323 if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
Kyle Pieferba88adc2017-04-03 10:35:34 -07001324 /* Turn on the HM and SPTP head switches */
1325 ret = a6xx_hm_sptprac_control(device, true);
1326 if (ret)
1327 return ret;
1328
Kyle Pieferb1027b02017-02-10 13:58:58 -08001329 /* Turn on TCM retention */
1330 kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
1331
1332 if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags))
1333 _load_gmu_rpmh_ucode(device);
1334
1335 if (gmu->load_mode == TCM_BOOT) {
1336 /* Load GMU image via AHB bus */
1337 for (i = 0; i < MAX_GMUFW_SIZE; i++)
1338 kgsl_gmu_regwrite(device,
1339 A6XX_GMU_CM3_ITCM_START + i,
1340 *((uint32_t *) gmu->fw_image.
1341 hostptr + i));
1342
1343 /* Prevent leaving reset before the FW is written */
1344 wmb();
1345 } else {
1346 dev_err(&gmu->pdev->dev, "Incorrect GMU load mode %d\n",
1347 gmu->load_mode);
1348 return -EINVAL;
1349 }
1350 } else {
1351 ret = a6xx_rpmh_power_on_gpu(device);
1352 if (ret)
1353 return ret;
1354 }
1355
1356 /* Clear init result to make sure we are getting fresh value */
1357 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_FW_INIT_RESULT, 0);
1358 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_BOOT_CONFIG, gmu->load_mode);
1359
1360 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_QTBL_ADDR,
1361 mem_addr->gmuaddr);
1362 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_QTBL_INFO, 1);
1363
1364 kgsl_gmu_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
1365 FENCE_RANGE_MASK);
1366
Kyle Pieferd3964162017-04-06 15:44:03 -07001367 /* Configure power control and bring the GMU out of reset */
1368 a6xx_gmu_power_config(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001369 ret = a6xx_gmu_start(device);
1370 if (ret)
1371 return ret;
1372
1373 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)
1374 && boot_state == GMU_COLD_BOOT) {
1375 ret = a6xx_gfx_rail_on(device);
1376 if (ret) {
1377 a6xx_oob_clear(adreno_dev,
1378 OOB_BOOT_SLUMBER_CLEAR_MASK);
1379 return ret;
1380 }
1381 }
1382
1383 ret = a6xx_gmu_hfi_start(device);
1384 if (ret)
1385 return ret;
1386
1387 /* Make sure the write to start HFI happens before sending a message */
1388 wmb();
1389 return ret;
1390}
1391
1392/*
1393 * a6xx_gmu_dcvs_nohfi() - request GMU to do DCVS without using HFI
1394 * @device: Pointer to KGSL device
1395 * @perf_idx: Index into GPU performance level table defined in
1396 * HFI DCVS table message
1397 * @bw_idx: Index into GPU b/w table defined in HFI b/w table message
1398 *
1399 */
1400static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
1401 unsigned int perf_idx, unsigned int bw_idx)
1402{
1403 struct hfi_dcvs_cmd dcvs_cmd = {
1404 .ack_type = ACK_BLOCK,
1405 .freq = {
1406 .perf_idx = perf_idx,
1407 .clkset_opt = OPTION_AT_LEAST,
1408 },
1409 .bw = {
1410 .bw_idx = bw_idx,
1411 },
1412 };
1413 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1414 struct gmu_device *gmu = &device->gmu;
1415 union gpu_perf_vote vote;
1416 int ret;
1417
1418 if (device->state == KGSL_STATE_INIT ||
1419 device->state == KGSL_STATE_SUSPEND)
1420 dcvs_cmd.ack_type = ACK_NONBLOCK;
1421
1422 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_ACK_OPTION, dcvs_cmd.ack_type);
1423
1424 vote.fvote = dcvs_cmd.freq;
1425 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_PERF_SETTING, vote.raw);
1426
1427 vote.bvote = dcvs_cmd.bw;
1428 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_BW_SETTING, vote.raw);
1429
1430 ret = a6xx_oob_set(adreno_dev, OOB_DCVS_SET_MASK, OOB_DCVS_CHECK_MASK,
1431 OOB_DCVS_CLEAR_MASK);
1432
1433 if (ret) {
1434 dev_err(&gmu->pdev->dev, "OOB set after GMU booted timed out\n");
1435 goto done;
1436 }
1437
1438 kgsl_gmu_regread(device, A6XX_GMU_DCVS_RETURN, &ret);
1439 if (ret)
1440 dev_err(&gmu->pdev->dev, "OOB DCVS error %d\n", ret);
1441
1442done:
1443 a6xx_oob_clear(adreno_dev, OOB_DCVS_CLEAR_MASK);
1444
1445 return ret;
1446}
1447
1448/*
1449 * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
1450 * @adreno_dev: Pointer to adreno device
1451 * @mode: requested power mode
1452 * @arg1: first argument for mode control
1453 * @arg2: second argument for mode control
1454 */
1455static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
1456 unsigned int mode, unsigned int arg1, unsigned int arg2)
1457{
1458 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1459 struct gmu_device *gmu = &device->gmu;
1460 int ret;
1461
1462 switch (mode) {
1463 case GMU_FW_START:
1464 ret = a6xx_gmu_fw_start(device, arg1);
1465 break;
1466 case GMU_FW_STOP:
1467 ret = a6xx_rpmh_power_off_gpu(device);
1468 break;
1469 case GMU_DCVS_NOHFI:
1470 ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
1471 break;
1472 case GMU_NOTIFY_SLUMBER:
1473 ret = a6xx_notify_slumber(device);
1474 break;
1475 default:
1476 dev_err(&gmu->pdev->dev,
1477 "unsupported GMU power ctrl mode:%d\n", mode);
1478 ret = -EINVAL;
1479 break;
1480 }
1481
1482 return ret;
1483}
1484
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001485static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
1486{
1487 unsigned int reg;
1488
1489 kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
1490 A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
1491 return ((~reg & GPUBUSYIGNAHB) != 0);
1492}
1493
1494static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001495{
1496 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1497 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001498
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001499 /* TODO: Remove this register write when firmware is updated */
1500 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_FW_BUSY, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001501
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001502 if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001503 0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001504 dev_err(&gmu->pdev->dev, "GMU is not idling\n");
1505 return -ETIMEDOUT;
1506 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001507
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001508 return 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001509}
1510
1511/*
1512 * _load_gmu_firmware() - Load the ucode into the GPMU RAM & PDC/RSC
1513 * @device: Pointer to KGSL device
1514 */
1515static int _load_gmu_firmware(struct kgsl_device *device)
1516{
1517 const struct firmware *fw = NULL;
1518 const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1519 struct gmu_device *gmu = &device->gmu;
1520 const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
1521 int image_size, ret = -EINVAL;
1522
1523 /* there is no GMU */
1524 if (!kgsl_gmu_isenabled(device))
1525 return 0;
1526
1527 /* GMU fw already saved and verified so do nothing new */
1528 if (gmu->fw_image.hostptr != 0)
1529 return 0;
1530
1531 if (gpucore->gpmufw_name == NULL)
1532 return -EINVAL;
1533
1534 ret = request_firmware(&fw, gpucore->gpmufw_name, device->dev);
1535 if (ret || fw == NULL) {
1536 KGSL_CORE_ERR("request_firmware (%s) failed: %d\n",
1537 gpucore->gpmufw_name, ret);
1538 return ret;
1539 }
1540
1541 image_size = PAGE_ALIGN(fw->size);
1542
1543 ret = allocate_gmu_image(gmu, image_size);
1544
1545 /* load into shared memory with GMU */
1546 if (!ret)
1547 memcpy(gmu->fw_image.hostptr, fw->data, fw->size);
1548
1549 release_firmware(fw);
1550
1551 return ret;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001552}
1553
1554/*
1555 * a6xx_microcode_read() - Read microcode
1556 * @adreno_dev: Pointer to adreno device
1557 */
1558static int a6xx_microcode_read(struct adreno_device *adreno_dev)
1559{
1560 return _load_firmware(KGSL_DEVICE(adreno_dev),
1561 adreno_dev->gpucore->sqefw_name,
1562 ADRENO_FW(adreno_dev, ADRENO_FW_SQE));
1563}
1564
1565static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
1566{
1567 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1568 unsigned int status1, status2;
1569
1570 kgsl_regread(device, A6XX_CP_INTERRUPT_STATUS, &status1);
1571
Shrenuj Bansala602c022017-03-08 10:40:34 -08001572 if (status1 & BIT(A6XX_CP_OPCODE_ERROR)) {
1573 unsigned int opcode;
1574
1575 kgsl_regwrite(device, A6XX_CP_SQE_STAT_ADDR, 1);
1576 kgsl_regread(device, A6XX_CP_SQE_STAT_DATA, &opcode);
1577 KGSL_DRV_CRIT_RATELIMIT(device,
Kyle Piefer2ce06162017-03-15 11:29:08 -07001578 "CP opcode error interrupt | opcode=0x%8.8x\n",
1579 opcode);
Shrenuj Bansala602c022017-03-08 10:40:34 -08001580 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001581 if (status1 & BIT(A6XX_CP_UCODE_ERROR))
1582 KGSL_DRV_CRIT_RATELIMIT(device, "CP ucode error interrupt\n");
1583 if (status1 & BIT(A6XX_CP_HW_FAULT_ERROR)) {
1584 kgsl_regread(device, A6XX_CP_HW_FAULT, &status2);
1585 KGSL_DRV_CRIT_RATELIMIT(device,
1586 "CP | Ringbuffer HW fault | status=%x\n",
1587 status2);
1588 }
1589 if (status1 & BIT(A6XX_CP_REGISTER_PROTECTION_ERROR)) {
1590 kgsl_regread(device, A6XX_CP_PROTECT_STATUS, &status2);
1591 KGSL_DRV_CRIT_RATELIMIT(device,
1592 "CP | Protected mode error | %s | addr=%x | status=%x\n",
1593 status2 & (1 << 20) ? "READ" : "WRITE",
Lynus Vazdc807342017-02-20 18:23:25 +05301594 status2 & 0x3FFFF, status2);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001595 }
1596 if (status1 & BIT(A6XX_CP_AHB_ERROR))
1597 KGSL_DRV_CRIT_RATELIMIT(device,
1598 "CP AHB error interrupt\n");
1599 if (status1 & BIT(A6XX_CP_VSD_PARITY_ERROR))
1600 KGSL_DRV_CRIT_RATELIMIT(device,
1601 "CP VSD decoder parity error\n");
1602 if (status1 & BIT(A6XX_CP_ILLEGAL_INSTR_ERROR))
1603 KGSL_DRV_CRIT_RATELIMIT(device,
1604 "CP Illegal instruction error\n");
1605
1606}
1607
1608static void a6xx_err_callback(struct adreno_device *adreno_dev, int bit)
1609{
1610 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1611
1612 switch (bit) {
1613 case A6XX_INT_CP_AHB_ERROR:
1614 KGSL_DRV_CRIT_RATELIMIT(device, "CP: AHB bus error\n");
1615 break;
1616 case A6XX_INT_ATB_ASYNCFIFO_OVERFLOW:
1617 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB ASYNC overflow\n");
1618 break;
1619 case A6XX_INT_RBBM_ATB_BUS_OVERFLOW:
1620 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB bus overflow\n");
1621 break;
1622 case A6XX_INT_UCHE_OOB_ACCESS:
1623 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Out of bounds access\n");
1624 break;
1625 case A6XX_INT_UCHE_TRAP_INTR:
1626 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Trap interrupt\n");
1627 break;
1628 default:
1629 KGSL_DRV_CRIT_RATELIMIT(device, "Unknown interrupt %d\n", bit);
1630 }
1631}
1632
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001633/* GPU System Cache control registers */
1634#define A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0 0x4
1635#define A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1 0x8
1636
1637static inline void _reg_rmw(void __iomem *regaddr,
1638 unsigned int mask, unsigned int bits)
1639{
1640 unsigned int val = 0;
1641
1642 val = __raw_readl(regaddr);
1643 /* Make sure the above read completes before we proceed */
1644 rmb();
1645 val &= ~mask;
1646 __raw_writel(val | bits, regaddr);
1647 /* Make sure the above write posts before we proceed*/
1648 wmb();
1649}
1650
1651
1652/*
1653 * a6xx_llc_configure_gpu_scid() - Program the sub-cache ID for all GPU blocks
1654 * @adreno_dev: The adreno device pointer
1655 */
1656static void a6xx_llc_configure_gpu_scid(struct adreno_device *adreno_dev)
1657{
1658 uint32_t gpu_scid;
1659 uint32_t gpu_cntl1_val = 0;
1660 int i;
1661 void __iomem *gpu_cx_reg;
1662
1663 gpu_scid = adreno_llc_get_scid(adreno_dev->gpu_llc_slice);
1664 for (i = 0; i < A6XX_LLC_NUM_GPU_SCIDS; i++)
1665 gpu_cntl1_val = (gpu_cntl1_val << A6XX_GPU_LLC_SCID_NUM_BITS)
1666 | gpu_scid;
1667
1668 gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
1669 _reg_rmw(gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
1670 A6XX_GPU_LLC_SCID_MASK, gpu_cntl1_val);
1671 iounmap(gpu_cx_reg);
1672}
1673
1674/*
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001675 * a6xx_llc_configure_gpuhtw_scid() - Program the SCID for GPU pagetables
1676 * @adreno_dev: The adreno device pointer
1677 */
1678static void a6xx_llc_configure_gpuhtw_scid(struct adreno_device *adreno_dev)
1679{
1680 uint32_t gpuhtw_scid;
1681 void __iomem *gpu_cx_reg;
1682
1683 gpuhtw_scid = adreno_llc_get_scid(adreno_dev->gpuhtw_llc_slice);
1684
1685 gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
Kyle Piefer11a48b62017-03-17 14:53:40 -07001686 _reg_rmw(gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001687 A6XX_GPUHTW_LLC_SCID_MASK,
1688 gpuhtw_scid << A6XX_GPUHTW_LLC_SCID_SHIFT);
1689 iounmap(gpu_cx_reg);
1690}
1691
1692/*
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001693 * a6xx_llc_enable_overrides() - Override the page attributes
1694 * @adreno_dev: The adreno device pointer
1695 */
1696static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
1697{
1698 void __iomem *gpu_cx_reg;
1699
1700 /*
1701 * 0x3: readnoallocoverrideen=0
1702 * read-no-alloc=0 - Allocate lines on read miss
1703 * writenoallocoverrideen=1
1704 * write-no-alloc=1 - Do not allocates lines on write miss
1705 */
1706 gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
1707 __raw_writel(0x3, gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0);
1708 /* Make sure the above write posts before we proceed*/
1709 wmb();
1710 iounmap(gpu_cx_reg);
1711}
1712
Lynus Vaz1fde74d2017-03-20 18:02:47 +05301713static const char *fault_block[8] = {
1714 [0] = "CP",
1715 [1] = "UCHE",
1716 [2] = "VFD",
1717 [3] = "UCHE",
1718 [4] = "CCU",
1719 [5] = "unknown",
1720 [6] = "CDP Prefetch",
1721 [7] = "GPMU",
1722};
1723
1724static const char *uche_client[8] = {
1725 [0] = "VFD",
1726 [1] = "SP",
1727 [2] = "VSC",
1728 [3] = "VPC",
1729 [4] = "HLSQ",
1730 [5] = "PC",
1731 [6] = "LRZ",
1732 [7] = "unknown",
1733};
1734
1735static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
1736 unsigned int fsynr1)
1737{
1738 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1739 unsigned int client_id;
1740 unsigned int uche_client_id;
1741
1742 client_id = fsynr1 & 0xff;
1743
1744 if (client_id >= ARRAY_SIZE(fault_block))
1745 return "unknown";
1746 else if (client_id != 3)
1747 return fault_block[client_id];
1748
Harshdeep Dhatt3f074a92017-05-01 12:59:01 -06001749 mutex_lock(&device->mutex);
Lynus Vaz1fde74d2017-03-20 18:02:47 +05301750 kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
Harshdeep Dhatt3f074a92017-05-01 12:59:01 -06001751 mutex_unlock(&device->mutex);
1752
Lynus Vaz1fde74d2017-03-20 18:02:47 +05301753 return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
1754}
1755
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001756#define A6XX_INT_MASK \
Kyle Pieferb1027b02017-02-10 13:58:58 -08001757 ((1 << A6XX_INT_CP_AHB_ERROR) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001758 (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \
Kyle Pieferb1027b02017-02-10 13:58:58 -08001759 (1 << A6XX_INT_RBBM_GPC_ERROR) | \
1760 (1 << A6XX_INT_CP_SW) | \
1761 (1 << A6XX_INT_CP_HW_ERROR) | \
1762 (1 << A6XX_INT_CP_IB2) | \
1763 (1 << A6XX_INT_CP_IB1) | \
1764 (1 << A6XX_INT_CP_RB) | \
1765 (1 << A6XX_INT_CP_CACHE_FLUSH_TS) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001766 (1 << A6XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
Kyle Pieferb1027b02017-02-10 13:58:58 -08001767 (1 << A6XX_INT_RBBM_HANG_DETECT) | \
1768 (1 << A6XX_INT_UCHE_OOB_ACCESS) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001769 (1 << A6XX_INT_UCHE_TRAP_INTR))
1770
1771static struct adreno_irq_funcs a6xx_irq_funcs[32] = {
1772 ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
1773 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 1 - RBBM_AHB_ERROR */
1774 ADRENO_IRQ_CALLBACK(NULL), /* 2 - UNUSED */
1775 ADRENO_IRQ_CALLBACK(NULL), /* 3 - UNUSED */
1776 ADRENO_IRQ_CALLBACK(NULL), /* 4 - UNUSED */
1777 ADRENO_IRQ_CALLBACK(NULL), /* 5 - UNUSED */
1778 /* 6 - RBBM_ATB_ASYNC_OVERFLOW */
1779 ADRENO_IRQ_CALLBACK(a6xx_err_callback),
1780 ADRENO_IRQ_CALLBACK(NULL), /* 7 - GPC_ERR */
1781 ADRENO_IRQ_CALLBACK(NULL),/* 8 - CP_SW */
1782 ADRENO_IRQ_CALLBACK(a6xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
1783 ADRENO_IRQ_CALLBACK(NULL), /* 10 - CP_CCU_FLUSH_DEPTH_TS */
1784 ADRENO_IRQ_CALLBACK(NULL), /* 11 - CP_CCU_FLUSH_COLOR_TS */
1785 ADRENO_IRQ_CALLBACK(NULL), /* 12 - CP_CCU_RESOLVE_TS */
1786 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 13 - CP_IB2_INT */
1787 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 14 - CP_IB1_INT */
1788 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 15 - CP_RB_INT */
1789 ADRENO_IRQ_CALLBACK(NULL), /* 16 - UNUSED */
1790 ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
1791 ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_WT_DONE_TS */
1792 ADRENO_IRQ_CALLBACK(NULL), /* 19 - UNUSED */
1793 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
1794 ADRENO_IRQ_CALLBACK(NULL), /* 21 - UNUSED */
1795 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 22 - RBBM_ATB_BUS_OVERFLOW */
1796 /* 23 - MISC_HANG_DETECT */
1797 ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
1798 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 24 - UCHE_OOB_ACCESS */
1799 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 25 - UCHE_TRAP_INTR */
1800 ADRENO_IRQ_CALLBACK(NULL), /* 26 - DEBBUS_INTR_0 */
1801 ADRENO_IRQ_CALLBACK(NULL), /* 27 - DEBBUS_INTR_1 */
1802 ADRENO_IRQ_CALLBACK(NULL), /* 28 - UNUSED */
1803 ADRENO_IRQ_CALLBACK(NULL), /* 29 - UNUSED */
1804 ADRENO_IRQ_CALLBACK(NULL), /* 30 - ISDB_CPU_IRQ */
1805 ADRENO_IRQ_CALLBACK(NULL), /* 31 - ISDB_UNDER_DEBUG */
1806};
1807
1808static struct adreno_irq a6xx_irq = {
1809 .funcs = a6xx_irq_funcs,
1810 .mask = A6XX_INT_MASK,
1811};
1812
Shrenuj Bansal41665402016-12-16 15:25:54 -08001813static struct adreno_snapshot_sizes a6xx_snap_sizes = {
1814 .cp_pfp = 0x33,
1815 .roq = 0x400,
1816};
1817
1818static struct adreno_snapshot_data a6xx_snapshot_data = {
1819 .sect_sizes = &a6xx_snap_sizes,
1820};
1821
Lynus Vaz107d2892017-03-01 13:48:06 +05301822static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
1823 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
1824 A6XX_RBBM_PERFCTR_CP_0_HI, 0, A6XX_CP_PERFCTR_CP_SEL_0 },
1825 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_1_LO,
1826 A6XX_RBBM_PERFCTR_CP_1_HI, 1, A6XX_CP_PERFCTR_CP_SEL_1 },
1827 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_2_LO,
1828 A6XX_RBBM_PERFCTR_CP_2_HI, 2, A6XX_CP_PERFCTR_CP_SEL_2 },
1829 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_3_LO,
1830 A6XX_RBBM_PERFCTR_CP_3_HI, 3, A6XX_CP_PERFCTR_CP_SEL_3 },
1831 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_4_LO,
1832 A6XX_RBBM_PERFCTR_CP_4_HI, 4, A6XX_CP_PERFCTR_CP_SEL_4 },
1833 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_5_LO,
1834 A6XX_RBBM_PERFCTR_CP_5_HI, 5, A6XX_CP_PERFCTR_CP_SEL_5 },
1835 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_6_LO,
1836 A6XX_RBBM_PERFCTR_CP_6_HI, 6, A6XX_CP_PERFCTR_CP_SEL_6 },
1837 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_7_LO,
1838 A6XX_RBBM_PERFCTR_CP_7_HI, 7, A6XX_CP_PERFCTR_CP_SEL_7 },
1839 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_8_LO,
1840 A6XX_RBBM_PERFCTR_CP_8_HI, 8, A6XX_CP_PERFCTR_CP_SEL_8 },
1841 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_9_LO,
1842 A6XX_RBBM_PERFCTR_CP_9_HI, 9, A6XX_CP_PERFCTR_CP_SEL_9 },
1843 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_10_LO,
1844 A6XX_RBBM_PERFCTR_CP_10_HI, 10, A6XX_CP_PERFCTR_CP_SEL_10 },
1845 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_11_LO,
1846 A6XX_RBBM_PERFCTR_CP_11_HI, 11, A6XX_CP_PERFCTR_CP_SEL_11 },
1847 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_12_LO,
1848 A6XX_RBBM_PERFCTR_CP_12_HI, 12, A6XX_CP_PERFCTR_CP_SEL_12 },
1849 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_13_LO,
1850 A6XX_RBBM_PERFCTR_CP_13_HI, 13, A6XX_CP_PERFCTR_CP_SEL_13 },
1851};
1852
1853static struct adreno_perfcount_register a6xx_perfcounters_rbbm[] = {
1854 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_0_LO,
1855 A6XX_RBBM_PERFCTR_RBBM_0_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_0 },
1856 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_1_LO,
1857 A6XX_RBBM_PERFCTR_RBBM_1_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_1 },
1858 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_2_LO,
1859 A6XX_RBBM_PERFCTR_RBBM_2_HI, 16, A6XX_RBBM_PERFCTR_RBBM_SEL_2 },
1860 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_3_LO,
1861 A6XX_RBBM_PERFCTR_RBBM_3_HI, 17, A6XX_RBBM_PERFCTR_RBBM_SEL_3 },
1862};
1863
1864static struct adreno_perfcount_register a6xx_perfcounters_pc[] = {
1865 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_0_LO,
1866 A6XX_RBBM_PERFCTR_PC_0_HI, 18, A6XX_PC_PERFCTR_PC_SEL_0 },
1867 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_1_LO,
1868 A6XX_RBBM_PERFCTR_PC_1_HI, 19, A6XX_PC_PERFCTR_PC_SEL_1 },
1869 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_2_LO,
1870 A6XX_RBBM_PERFCTR_PC_2_HI, 20, A6XX_PC_PERFCTR_PC_SEL_2 },
1871 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_3_LO,
1872 A6XX_RBBM_PERFCTR_PC_3_HI, 21, A6XX_PC_PERFCTR_PC_SEL_3 },
1873 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_4_LO,
1874 A6XX_RBBM_PERFCTR_PC_4_HI, 22, A6XX_PC_PERFCTR_PC_SEL_4 },
1875 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_5_LO,
1876 A6XX_RBBM_PERFCTR_PC_5_HI, 23, A6XX_PC_PERFCTR_PC_SEL_5 },
1877 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_6_LO,
1878 A6XX_RBBM_PERFCTR_PC_6_HI, 24, A6XX_PC_PERFCTR_PC_SEL_6 },
1879 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_7_LO,
1880 A6XX_RBBM_PERFCTR_PC_7_HI, 25, A6XX_PC_PERFCTR_PC_SEL_7 },
1881};
1882
1883static struct adreno_perfcount_register a6xx_perfcounters_vfd[] = {
1884 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_0_LO,
1885 A6XX_RBBM_PERFCTR_VFD_0_HI, 26, A6XX_VFD_PERFCTR_VFD_SEL_0 },
1886 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_1_LO,
1887 A6XX_RBBM_PERFCTR_VFD_1_HI, 27, A6XX_VFD_PERFCTR_VFD_SEL_1 },
1888 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_2_LO,
1889 A6XX_RBBM_PERFCTR_VFD_2_HI, 28, A6XX_VFD_PERFCTR_VFD_SEL_2 },
1890 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_3_LO,
1891 A6XX_RBBM_PERFCTR_VFD_3_HI, 29, A6XX_VFD_PERFCTR_VFD_SEL_3 },
1892 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_4_LO,
1893 A6XX_RBBM_PERFCTR_VFD_4_HI, 30, A6XX_VFD_PERFCTR_VFD_SEL_4 },
1894 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_5_LO,
1895 A6XX_RBBM_PERFCTR_VFD_5_HI, 31, A6XX_VFD_PERFCTR_VFD_SEL_5 },
1896 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_6_LO,
1897 A6XX_RBBM_PERFCTR_VFD_6_HI, 32, A6XX_VFD_PERFCTR_VFD_SEL_6 },
1898 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_7_LO,
1899 A6XX_RBBM_PERFCTR_VFD_7_HI, 33, A6XX_VFD_PERFCTR_VFD_SEL_7 },
1900};
1901
1902static struct adreno_perfcount_register a6xx_perfcounters_hlsq[] = {
1903 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_0_LO,
1904 A6XX_RBBM_PERFCTR_HLSQ_0_HI, 34, A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
1905 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_1_LO,
1906 A6XX_RBBM_PERFCTR_HLSQ_1_HI, 35, A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
1907 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_2_LO,
1908 A6XX_RBBM_PERFCTR_HLSQ_2_HI, 36, A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
1909 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_3_LO,
1910 A6XX_RBBM_PERFCTR_HLSQ_3_HI, 37, A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
1911 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_4_LO,
1912 A6XX_RBBM_PERFCTR_HLSQ_4_HI, 38, A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
1913 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_5_LO,
1914 A6XX_RBBM_PERFCTR_HLSQ_5_HI, 39, A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
1915};
1916
1917static struct adreno_perfcount_register a6xx_perfcounters_vpc[] = {
1918 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_0_LO,
1919 A6XX_RBBM_PERFCTR_VPC_0_HI, 40, A6XX_VPC_PERFCTR_VPC_SEL_0 },
1920 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_1_LO,
1921 A6XX_RBBM_PERFCTR_VPC_1_HI, 41, A6XX_VPC_PERFCTR_VPC_SEL_1 },
1922 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_2_LO,
1923 A6XX_RBBM_PERFCTR_VPC_2_HI, 42, A6XX_VPC_PERFCTR_VPC_SEL_2 },
1924 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_3_LO,
1925 A6XX_RBBM_PERFCTR_VPC_3_HI, 43, A6XX_VPC_PERFCTR_VPC_SEL_3 },
1926 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_4_LO,
1927 A6XX_RBBM_PERFCTR_VPC_4_HI, 44, A6XX_VPC_PERFCTR_VPC_SEL_4 },
1928 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_5_LO,
1929 A6XX_RBBM_PERFCTR_VPC_5_HI, 45, A6XX_VPC_PERFCTR_VPC_SEL_5 },
1930};
1931
1932static struct adreno_perfcount_register a6xx_perfcounters_ccu[] = {
1933 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_0_LO,
1934 A6XX_RBBM_PERFCTR_CCU_0_HI, 46, A6XX_RB_PERFCTR_CCU_SEL_0 },
1935 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_1_LO,
1936 A6XX_RBBM_PERFCTR_CCU_1_HI, 47, A6XX_RB_PERFCTR_CCU_SEL_1 },
1937 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_2_LO,
1938 A6XX_RBBM_PERFCTR_CCU_2_HI, 48, A6XX_RB_PERFCTR_CCU_SEL_2 },
1939 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_3_LO,
1940 A6XX_RBBM_PERFCTR_CCU_3_HI, 49, A6XX_RB_PERFCTR_CCU_SEL_3 },
1941 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_4_LO,
1942 A6XX_RBBM_PERFCTR_CCU_4_HI, 50, A6XX_RB_PERFCTR_CCU_SEL_4 },
1943};
1944
1945static struct adreno_perfcount_register a6xx_perfcounters_tse[] = {
1946 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_0_LO,
1947 A6XX_RBBM_PERFCTR_TSE_0_HI, 51, A6XX_GRAS_PERFCTR_TSE_SEL_0 },
1948 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_1_LO,
1949 A6XX_RBBM_PERFCTR_TSE_1_HI, 52, A6XX_GRAS_PERFCTR_TSE_SEL_1 },
1950 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_2_LO,
1951 A6XX_RBBM_PERFCTR_TSE_2_HI, 53, A6XX_GRAS_PERFCTR_TSE_SEL_2 },
1952 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_3_LO,
1953 A6XX_RBBM_PERFCTR_TSE_3_HI, 54, A6XX_GRAS_PERFCTR_TSE_SEL_3 },
1954};
1955
1956static struct adreno_perfcount_register a6xx_perfcounters_ras[] = {
1957 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_0_LO,
1958 A6XX_RBBM_PERFCTR_RAS_0_HI, 55, A6XX_GRAS_PERFCTR_RAS_SEL_0 },
1959 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_1_LO,
1960 A6XX_RBBM_PERFCTR_RAS_1_HI, 56, A6XX_GRAS_PERFCTR_RAS_SEL_1 },
1961 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_2_LO,
1962 A6XX_RBBM_PERFCTR_RAS_2_HI, 57, A6XX_GRAS_PERFCTR_RAS_SEL_2 },
1963 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_3_LO,
1964 A6XX_RBBM_PERFCTR_RAS_3_HI, 58, A6XX_GRAS_PERFCTR_RAS_SEL_3 },
1965};
1966
1967static struct adreno_perfcount_register a6xx_perfcounters_uche[] = {
1968 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_0_LO,
1969 A6XX_RBBM_PERFCTR_UCHE_0_HI, 59, A6XX_UCHE_PERFCTR_UCHE_SEL_0 },
1970 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_1_LO,
1971 A6XX_RBBM_PERFCTR_UCHE_1_HI, 60, A6XX_UCHE_PERFCTR_UCHE_SEL_1 },
1972 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_2_LO,
1973 A6XX_RBBM_PERFCTR_UCHE_2_HI, 61, A6XX_UCHE_PERFCTR_UCHE_SEL_2 },
1974 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_3_LO,
1975 A6XX_RBBM_PERFCTR_UCHE_3_HI, 62, A6XX_UCHE_PERFCTR_UCHE_SEL_3 },
1976 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_4_LO,
1977 A6XX_RBBM_PERFCTR_UCHE_4_HI, 63, A6XX_UCHE_PERFCTR_UCHE_SEL_4 },
1978 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_5_LO,
1979 A6XX_RBBM_PERFCTR_UCHE_5_HI, 64, A6XX_UCHE_PERFCTR_UCHE_SEL_5 },
1980 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_6_LO,
1981 A6XX_RBBM_PERFCTR_UCHE_6_HI, 65, A6XX_UCHE_PERFCTR_UCHE_SEL_6 },
1982 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_7_LO,
1983 A6XX_RBBM_PERFCTR_UCHE_7_HI, 66, A6XX_UCHE_PERFCTR_UCHE_SEL_7 },
1984 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_8_LO,
1985 A6XX_RBBM_PERFCTR_UCHE_8_HI, 67, A6XX_UCHE_PERFCTR_UCHE_SEL_8 },
1986 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_9_LO,
1987 A6XX_RBBM_PERFCTR_UCHE_9_HI, 68, A6XX_UCHE_PERFCTR_UCHE_SEL_9 },
1988 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_10_LO,
1989 A6XX_RBBM_PERFCTR_UCHE_10_HI, 69,
1990 A6XX_UCHE_PERFCTR_UCHE_SEL_10 },
1991 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_11_LO,
1992 A6XX_RBBM_PERFCTR_UCHE_11_HI, 70,
1993 A6XX_UCHE_PERFCTR_UCHE_SEL_11 },
1994};
1995
1996static struct adreno_perfcount_register a6xx_perfcounters_tp[] = {
1997 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_0_LO,
1998 A6XX_RBBM_PERFCTR_TP_0_HI, 71, A6XX_TPL1_PERFCTR_TP_SEL_0 },
1999 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_1_LO,
2000 A6XX_RBBM_PERFCTR_TP_1_HI, 72, A6XX_TPL1_PERFCTR_TP_SEL_1 },
2001 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_2_LO,
2002 A6XX_RBBM_PERFCTR_TP_2_HI, 73, A6XX_TPL1_PERFCTR_TP_SEL_2 },
2003 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_3_LO,
2004 A6XX_RBBM_PERFCTR_TP_3_HI, 74, A6XX_TPL1_PERFCTR_TP_SEL_3 },
2005 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_4_LO,
2006 A6XX_RBBM_PERFCTR_TP_4_HI, 75, A6XX_TPL1_PERFCTR_TP_SEL_4 },
2007 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_5_LO,
2008 A6XX_RBBM_PERFCTR_TP_5_HI, 76, A6XX_TPL1_PERFCTR_TP_SEL_5 },
2009 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_6_LO,
2010 A6XX_RBBM_PERFCTR_TP_6_HI, 77, A6XX_TPL1_PERFCTR_TP_SEL_6 },
2011 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_7_LO,
2012 A6XX_RBBM_PERFCTR_TP_7_HI, 78, A6XX_TPL1_PERFCTR_TP_SEL_7 },
2013 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_8_LO,
2014 A6XX_RBBM_PERFCTR_TP_8_HI, 79, A6XX_TPL1_PERFCTR_TP_SEL_8 },
2015 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_9_LO,
2016 A6XX_RBBM_PERFCTR_TP_9_HI, 80, A6XX_TPL1_PERFCTR_TP_SEL_9 },
2017 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_10_LO,
2018 A6XX_RBBM_PERFCTR_TP_10_HI, 81, A6XX_TPL1_PERFCTR_TP_SEL_10 },
2019 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_11_LO,
2020 A6XX_RBBM_PERFCTR_TP_11_HI, 82, A6XX_TPL1_PERFCTR_TP_SEL_11 },
2021};
2022
2023static struct adreno_perfcount_register a6xx_perfcounters_sp[] = {
2024 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_0_LO,
2025 A6XX_RBBM_PERFCTR_SP_0_HI, 83, A6XX_SP_PERFCTR_SP_SEL_0 },
2026 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_1_LO,
2027 A6XX_RBBM_PERFCTR_SP_1_HI, 84, A6XX_SP_PERFCTR_SP_SEL_1 },
2028 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_2_LO,
2029 A6XX_RBBM_PERFCTR_SP_2_HI, 85, A6XX_SP_PERFCTR_SP_SEL_2 },
2030 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_3_LO,
2031 A6XX_RBBM_PERFCTR_SP_3_HI, 86, A6XX_SP_PERFCTR_SP_SEL_3 },
2032 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_4_LO,
2033 A6XX_RBBM_PERFCTR_SP_4_HI, 87, A6XX_SP_PERFCTR_SP_SEL_4 },
2034 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_5_LO,
2035 A6XX_RBBM_PERFCTR_SP_5_HI, 88, A6XX_SP_PERFCTR_SP_SEL_5 },
2036 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_6_LO,
2037 A6XX_RBBM_PERFCTR_SP_6_HI, 89, A6XX_SP_PERFCTR_SP_SEL_6 },
2038 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_7_LO,
2039 A6XX_RBBM_PERFCTR_SP_7_HI, 90, A6XX_SP_PERFCTR_SP_SEL_7 },
2040 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_8_LO,
2041 A6XX_RBBM_PERFCTR_SP_8_HI, 91, A6XX_SP_PERFCTR_SP_SEL_8 },
2042 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_9_LO,
2043 A6XX_RBBM_PERFCTR_SP_9_HI, 92, A6XX_SP_PERFCTR_SP_SEL_9 },
2044 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_10_LO,
2045 A6XX_RBBM_PERFCTR_SP_10_HI, 93, A6XX_SP_PERFCTR_SP_SEL_10 },
2046 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_11_LO,
2047 A6XX_RBBM_PERFCTR_SP_11_HI, 94, A6XX_SP_PERFCTR_SP_SEL_11 },
2048 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_12_LO,
2049 A6XX_RBBM_PERFCTR_SP_12_HI, 95, A6XX_SP_PERFCTR_SP_SEL_12 },
2050 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_13_LO,
2051 A6XX_RBBM_PERFCTR_SP_13_HI, 96, A6XX_SP_PERFCTR_SP_SEL_13 },
2052 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_14_LO,
2053 A6XX_RBBM_PERFCTR_SP_14_HI, 97, A6XX_SP_PERFCTR_SP_SEL_14 },
2054 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_15_LO,
2055 A6XX_RBBM_PERFCTR_SP_15_HI, 98, A6XX_SP_PERFCTR_SP_SEL_15 },
2056 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_16_LO,
2057 A6XX_RBBM_PERFCTR_SP_16_HI, 99, A6XX_SP_PERFCTR_SP_SEL_16 },
2058 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_17_LO,
2059 A6XX_RBBM_PERFCTR_SP_17_HI, 100, A6XX_SP_PERFCTR_SP_SEL_17 },
2060 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_18_LO,
2061 A6XX_RBBM_PERFCTR_SP_18_HI, 101, A6XX_SP_PERFCTR_SP_SEL_18 },
2062 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_19_LO,
2063 A6XX_RBBM_PERFCTR_SP_19_HI, 102, A6XX_SP_PERFCTR_SP_SEL_19 },
2064 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_20_LO,
2065 A6XX_RBBM_PERFCTR_SP_20_HI, 103, A6XX_SP_PERFCTR_SP_SEL_20 },
2066 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_21_LO,
2067 A6XX_RBBM_PERFCTR_SP_21_HI, 104, A6XX_SP_PERFCTR_SP_SEL_21 },
2068 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_22_LO,
2069 A6XX_RBBM_PERFCTR_SP_22_HI, 105, A6XX_SP_PERFCTR_SP_SEL_22 },
2070 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_23_LO,
2071 A6XX_RBBM_PERFCTR_SP_23_HI, 106, A6XX_SP_PERFCTR_SP_SEL_23 },
2072};
2073
2074static struct adreno_perfcount_register a6xx_perfcounters_rb[] = {
2075 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_0_LO,
2076 A6XX_RBBM_PERFCTR_RB_0_HI, 107, A6XX_RB_PERFCTR_RB_SEL_0 },
2077 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_1_LO,
2078 A6XX_RBBM_PERFCTR_RB_1_HI, 108, A6XX_RB_PERFCTR_RB_SEL_1 },
2079 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_2_LO,
2080 A6XX_RBBM_PERFCTR_RB_2_HI, 109, A6XX_RB_PERFCTR_RB_SEL_2 },
2081 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_3_LO,
2082 A6XX_RBBM_PERFCTR_RB_3_HI, 110, A6XX_RB_PERFCTR_RB_SEL_3 },
2083 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_4_LO,
2084 A6XX_RBBM_PERFCTR_RB_4_HI, 111, A6XX_RB_PERFCTR_RB_SEL_4 },
2085 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_5_LO,
2086 A6XX_RBBM_PERFCTR_RB_5_HI, 112, A6XX_RB_PERFCTR_RB_SEL_5 },
2087 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_6_LO,
2088 A6XX_RBBM_PERFCTR_RB_6_HI, 113, A6XX_RB_PERFCTR_RB_SEL_6 },
2089 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_7_LO,
2090 A6XX_RBBM_PERFCTR_RB_7_HI, 114, A6XX_RB_PERFCTR_RB_SEL_7 },
2091};
2092
2093static struct adreno_perfcount_register a6xx_perfcounters_vsc[] = {
2094 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_0_LO,
2095 A6XX_RBBM_PERFCTR_VSC_0_HI, 115, A6XX_VSC_PERFCTR_VSC_SEL_0 },
2096 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_1_LO,
2097 A6XX_RBBM_PERFCTR_VSC_1_HI, 116, A6XX_VSC_PERFCTR_VSC_SEL_1 },
2098};
2099
2100static struct adreno_perfcount_register a6xx_perfcounters_lrz[] = {
2101 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_0_LO,
2102 A6XX_RBBM_PERFCTR_LRZ_0_HI, 117, A6XX_GRAS_PERFCTR_LRZ_SEL_0 },
2103 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_1_LO,
2104 A6XX_RBBM_PERFCTR_LRZ_1_HI, 118, A6XX_GRAS_PERFCTR_LRZ_SEL_1 },
2105 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_2_LO,
2106 A6XX_RBBM_PERFCTR_LRZ_2_HI, 119, A6XX_GRAS_PERFCTR_LRZ_SEL_2 },
2107 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_3_LO,
2108 A6XX_RBBM_PERFCTR_LRZ_3_HI, 120, A6XX_GRAS_PERFCTR_LRZ_SEL_3 },
2109};
2110
2111static struct adreno_perfcount_register a6xx_perfcounters_cmp[] = {
2112 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_0_LO,
2113 A6XX_RBBM_PERFCTR_CMP_0_HI, 121, A6XX_RB_PERFCTR_CMP_SEL_0 },
2114 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_1_LO,
2115 A6XX_RBBM_PERFCTR_CMP_1_HI, 122, A6XX_RB_PERFCTR_CMP_SEL_1 },
2116 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_2_LO,
2117 A6XX_RBBM_PERFCTR_CMP_2_HI, 123, A6XX_RB_PERFCTR_CMP_SEL_2 },
2118 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_3_LO,
2119 A6XX_RBBM_PERFCTR_CMP_3_HI, 124, A6XX_RB_PERFCTR_CMP_SEL_3 },
2120};
2121
2122static struct adreno_perfcount_register a6xx_perfcounters_vbif[] = {
2123 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW0,
2124 A6XX_VBIF_PERF_CNT_HIGH0, -1, A6XX_VBIF_PERF_CNT_SEL0 },
2125 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW1,
2126 A6XX_VBIF_PERF_CNT_HIGH1, -1, A6XX_VBIF_PERF_CNT_SEL1 },
2127 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW2,
2128 A6XX_VBIF_PERF_CNT_HIGH2, -1, A6XX_VBIF_PERF_CNT_SEL2 },
2129 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW3,
2130 A6XX_VBIF_PERF_CNT_HIGH3, -1, A6XX_VBIF_PERF_CNT_SEL3 },
2131};
2132
2133static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
2134 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW0,
2135 A6XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A6XX_VBIF_PERF_PWR_CNT_EN0 },
2136 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW1,
2137 A6XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A6XX_VBIF_PERF_PWR_CNT_EN1 },
2138 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW2,
2139 A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
2140};
2141
2142static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
2143 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
2144 A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
2145};
2146
2147#define A6XX_PERFCOUNTER_GROUP(offset, name) \
2148 ADRENO_PERFCOUNTER_GROUP(a6xx, offset, name)
2149
2150#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
2151 ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
2152
2153static struct adreno_perfcount_group a6xx_perfcounter_groups
2154 [KGSL_PERFCOUNTER_GROUP_MAX] = {
2155 A6XX_PERFCOUNTER_GROUP(CP, cp),
2156 A6XX_PERFCOUNTER_GROUP(RBBM, rbbm),
2157 A6XX_PERFCOUNTER_GROUP(PC, pc),
2158 A6XX_PERFCOUNTER_GROUP(VFD, vfd),
2159 A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
2160 A6XX_PERFCOUNTER_GROUP(VPC, vpc),
2161 A6XX_PERFCOUNTER_GROUP(CCU, ccu),
2162 A6XX_PERFCOUNTER_GROUP(CMP, cmp),
2163 A6XX_PERFCOUNTER_GROUP(TSE, tse),
2164 A6XX_PERFCOUNTER_GROUP(RAS, ras),
2165 A6XX_PERFCOUNTER_GROUP(LRZ, lrz),
2166 A6XX_PERFCOUNTER_GROUP(UCHE, uche),
2167 A6XX_PERFCOUNTER_GROUP(TP, tp),
2168 A6XX_PERFCOUNTER_GROUP(SP, sp),
2169 A6XX_PERFCOUNTER_GROUP(RB, rb),
2170 A6XX_PERFCOUNTER_GROUP(VSC, vsc),
2171 A6XX_PERFCOUNTER_GROUP(VBIF, vbif),
2172 A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
2173 ADRENO_PERFCOUNTER_GROUP_FIXED),
2174 A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
2175 ADRENO_PERFCOUNTER_GROUP_FIXED),
2176};
2177
2178static struct adreno_perfcounters a6xx_perfcounters = {
2179 a6xx_perfcounter_groups,
2180 ARRAY_SIZE(a6xx_perfcounter_groups),
2181};
2182
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002183/* Register offset defines for A6XX, in order of enum adreno_regs */
2184static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
2185
2186 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A6XX_CP_RB_BASE),
Shrenuj Bansal41665402016-12-16 15:25:54 -08002187 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A6XX_CP_RB_BASE_HI),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002188 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
2189 A6XX_CP_RB_RPTR_ADDR_LO),
2190 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
2191 A6XX_CP_RB_RPTR_ADDR_HI),
2192 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A6XX_CP_RB_RPTR),
2193 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A6XX_CP_RB_WPTR),
2194 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A6XX_CP_RB_CNTL),
Shrenuj Bansal41665402016-12-16 15:25:54 -08002195 ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A6XX_CP_SQE_CNTL),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002196 ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A6XX_CP_MISC_CNTL),
Carter Cooper8567af02017-03-15 14:22:03 -06002197 ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A6XX_CP_HW_FAULT),
Shrenuj Bansal41665402016-12-16 15:25:54 -08002198 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A6XX_CP_IB1_BASE),
2199 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, A6XX_CP_IB1_BASE_HI),
2200 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A6XX_CP_IB1_REM_SIZE),
2201 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A6XX_CP_IB2_BASE),
2202 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, A6XX_CP_IB2_BASE_HI),
2203 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A6XX_CP_IB2_REM_SIZE),
2204 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A6XX_CP_ROQ_DBG_ADDR),
2205 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A6XX_CP_ROQ_DBG_DATA),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002206 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
2207 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
Lynus Vaz107d2892017-03-01 13:48:06 +05302208 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
2209 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
2210 A6XX_RBBM_PERFCTR_LOAD_CMD0),
2211 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
2212 A6XX_RBBM_PERFCTR_LOAD_CMD1),
2213 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
2214 A6XX_RBBM_PERFCTR_LOAD_CMD2),
2215 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
2216 A6XX_RBBM_PERFCTR_LOAD_CMD3),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002217
2218 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A6XX_RBBM_INT_0_MASK),
2219 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A6XX_RBBM_INT_0_STATUS),
2220 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A6XX_RBBM_CLOCK_CNTL),
2221 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
2222 A6XX_RBBM_INT_CLEAR_CMD),
2223 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A6XX_RBBM_SW_RESET_CMD),
2224 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD,
2225 A6XX_RBBM_BLOCK_SW_RESET_CMD),
2226 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
2227 A6XX_RBBM_BLOCK_SW_RESET_CMD2),
Lynus Vaz107d2892017-03-01 13:48:06 +05302228 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
2229 A6XX_RBBM_PERFCTR_LOAD_VALUE_LO),
2230 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
2231 A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002232 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION, A6XX_VBIF_VERSION),
Carter Cooperafc85912017-03-20 09:39:18 -06002233 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
2234 A6XX_VBIF_XIN_HALT_CTRL0),
2235 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
2236 A6XX_VBIF_XIN_HALT_CTRL1),
Kyle Pieferb1027b02017-02-10 13:58:58 -08002237 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
2238 A6XX_GMU_ALWAYS_ON_COUNTER_L),
2239 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
2240 A6XX_GMU_ALWAYS_ON_COUNTER_H),
2241 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
2242 A6XX_GMU_AO_INTERRUPT_EN),
Kyle Piefere7b06b42017-04-06 13:53:01 -07002243 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
2244 A6XX_GMU_AO_HOST_INTERRUPT_CLR),
2245 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
2246 A6XX_GMU_AO_HOST_INTERRUPT_STATUS),
2247 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
2248 A6XX_GMU_AO_HOST_INTERRUPT_MASK),
Kyle Pieferb1027b02017-02-10 13:58:58 -08002249 ADRENO_REG_DEFINE(ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
2250 A6XX_GMU_GMU_PWR_COL_KEEPALIVE),
2251 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AHB_FENCE_STATUS,
2252 A6XX_GMU_AHB_FENCE_STATUS),
2253 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_CTRL_STATUS,
2254 A6XX_GMU_HFI_CTRL_STATUS),
2255 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_VERSION_INFO,
2256 A6XX_GMU_HFI_VERSION_INFO),
2257 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_SFR_ADDR,
2258 A6XX_GMU_HFI_SFR_ADDR),
2259 ADRENO_REG_DEFINE(ADRENO_REG_GMU_RPMH_POWER_STATE,
2260 A6XX_GMU_RPMH_POWER_STATE),
2261 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
2262 A6XX_GMU_GMU2HOST_INTR_CLR),
2263 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
2264 A6XX_GMU_GMU2HOST_INTR_INFO),
Kyle Piefere7b06b42017-04-06 13:53:01 -07002265 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
2266 A6XX_GMU_GMU2HOST_INTR_MASK),
Kyle Pieferb1027b02017-02-10 13:58:58 -08002267 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_SET,
2268 A6XX_GMU_HOST2GMU_INTR_SET),
2269 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
2270 A6XX_GMU_HOST2GMU_INTR_CLR),
2271 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
2272 A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
Carter Cooper4a313ae2017-02-23 11:11:56 -07002273
2274 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
2275 A6XX_RBBM_SECVID_TRUST_CNTL),
2276 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
2277 A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO),
2278 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
2279 A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
2280 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
2281 A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
2282 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
2283 A6XX_RBBM_SECVID_TSB_CNTL),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002284};
2285
2286static const struct adreno_reg_offsets a6xx_reg_offsets = {
2287 .offsets = a6xx_register_offsets,
2288 .offset_0 = ADRENO_REG_REGISTER_MAX,
2289};
2290
2291struct adreno_gpudev adreno_a6xx_gpudev = {
2292 .reg_offsets = &a6xx_reg_offsets,
2293 .start = a6xx_start,
Shrenuj Bansal41665402016-12-16 15:25:54 -08002294 .snapshot = a6xx_snapshot,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002295 .irq = &a6xx_irq,
Shrenuj Bansal41665402016-12-16 15:25:54 -08002296 .snapshot_data = &a6xx_snapshot_data,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002297 .irq_trace = trace_kgsl_a5xx_irq_status,
2298 .num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
2299 .platform_setup = a6xx_platform_setup,
Shrenuj Bansal41665402016-12-16 15:25:54 -08002300 .init = a6xx_init,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002301 .rb_start = a6xx_rb_start,
2302 .regulator_enable = a6xx_sptprac_enable,
2303 .regulator_disable = a6xx_sptprac_disable,
Lynus Vaz107d2892017-03-01 13:48:06 +05302304 .perfcounters = &a6xx_perfcounters,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002305 .microcode_read = a6xx_microcode_read,
2306 .enable_64bit = a6xx_enable_64bit,
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002307 .llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07002308 .llc_configure_gpuhtw_scid = a6xx_llc_configure_gpuhtw_scid,
Kyle Piefer11a48b62017-03-17 14:53:40 -07002309 .llc_enable_overrides = a6xx_llc_enable_overrides,
Kyle Pieferb1027b02017-02-10 13:58:58 -08002310 .oob_set = a6xx_oob_set,
2311 .oob_clear = a6xx_oob_clear,
2312 .rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
Oleg Perelet62d5cec2017-03-27 16:14:52 -07002313 .hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
Lynus Vaz1fde74d2017-03-20 18:02:47 +05302314 .wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
2315 .iommu_fault_block = a6xx_iommu_fault_block,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002316};