blob: c807b673216a7a172c0b574ef0af542ab1749b76 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/delay.h>
15#include <linux/sched.h>
16#include <linux/msm_kgsl.h>
17
18#include "adreno.h"
19#include "kgsl_sharedmem.h"
20#include "a4xx_reg.h"
21#include "adreno_a3xx.h"
22#include "adreno_a4xx.h"
23#include "adreno_cp_parser.h"
24#include "adreno_trace.h"
25#include "adreno_pm4types.h"
26#include "adreno_perfcounter.h"
27
28#define SP_TP_PWR_ON BIT(20)
29/* A4XX_RBBM_CLOCK_CTL_IP */
30#define CNTL_IP_SW_COLLAPSE BIT(0)
31
32/*
33 * Define registers for a4xx that contain addresses used by the
34 * cp parser logic
35 */
36const unsigned int a4xx_cp_addr_regs[ADRENO_CP_ADDR_MAX] = {
37 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_0,
38 A4XX_VSC_PIPE_DATA_ADDRESS_0),
39 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_0,
40 A4XX_VSC_PIPE_DATA_LENGTH_0),
41 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_1,
42 A4XX_VSC_PIPE_DATA_ADDRESS_1),
43 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_1,
44 A4XX_VSC_PIPE_DATA_LENGTH_1),
45 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_2,
46 A4XX_VSC_PIPE_DATA_ADDRESS_2),
47 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_2,
48 A4XX_VSC_PIPE_DATA_LENGTH_2),
49 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_3,
50 A4XX_VSC_PIPE_DATA_ADDRESS_3),
51 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_3,
52 A4XX_VSC_PIPE_DATA_LENGTH_3),
53 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_4,
54 A4XX_VSC_PIPE_DATA_ADDRESS_4),
55 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_4,
56 A4XX_VSC_PIPE_DATA_LENGTH_4),
57 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_5,
58 A4XX_VSC_PIPE_DATA_ADDRESS_5),
59 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_5,
60 A4XX_VSC_PIPE_DATA_LENGTH_5),
61 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_6,
62 A4XX_VSC_PIPE_DATA_ADDRESS_6),
63 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_6,
64 A4XX_VSC_PIPE_DATA_LENGTH_6),
65 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_7,
66 A4XX_VSC_PIPE_DATA_ADDRESS_7),
67 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_7,
68 A4XX_VSC_PIPE_DATA_LENGTH_7),
69 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_0,
70 A4XX_VFD_FETCH_INSTR_1_0),
71 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_1,
72 A4XX_VFD_FETCH_INSTR_1_1),
73 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_2,
74 A4XX_VFD_FETCH_INSTR_1_2),
75 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_3,
76 A4XX_VFD_FETCH_INSTR_1_3),
77 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_4,
78 A4XX_VFD_FETCH_INSTR_1_4),
79 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_5,
80 A4XX_VFD_FETCH_INSTR_1_5),
81 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_6,
82 A4XX_VFD_FETCH_INSTR_1_6),
83 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_7,
84 A4XX_VFD_FETCH_INSTR_1_7),
85 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_8,
86 A4XX_VFD_FETCH_INSTR_1_8),
87 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_9,
88 A4XX_VFD_FETCH_INSTR_1_9),
89 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_10,
90 A4XX_VFD_FETCH_INSTR_1_10),
91 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_11,
92 A4XX_VFD_FETCH_INSTR_1_11),
93 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_12,
94 A4XX_VFD_FETCH_INSTR_1_12),
95 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_13,
96 A4XX_VFD_FETCH_INSTR_1_13),
97 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_14,
98 A4XX_VFD_FETCH_INSTR_1_14),
99 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_15,
100 A4XX_VFD_FETCH_INSTR_1_15),
101 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_16,
102 A4XX_VFD_FETCH_INSTR_1_16),
103 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_17,
104 A4XX_VFD_FETCH_INSTR_1_17),
105 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_18,
106 A4XX_VFD_FETCH_INSTR_1_18),
107 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_19,
108 A4XX_VFD_FETCH_INSTR_1_19),
109 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_20,
110 A4XX_VFD_FETCH_INSTR_1_20),
111 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_21,
112 A4XX_VFD_FETCH_INSTR_1_21),
113 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_22,
114 A4XX_VFD_FETCH_INSTR_1_22),
115 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_23,
116 A4XX_VFD_FETCH_INSTR_1_23),
117 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_24,
118 A4XX_VFD_FETCH_INSTR_1_24),
119 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_25,
120 A4XX_VFD_FETCH_INSTR_1_25),
121 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_26,
122 A4XX_VFD_FETCH_INSTR_1_26),
123 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_27,
124 A4XX_VFD_FETCH_INSTR_1_27),
125 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_28,
126 A4XX_VFD_FETCH_INSTR_1_28),
127 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_29,
128 A4XX_VFD_FETCH_INSTR_1_29),
129 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_30,
130 A4XX_VFD_FETCH_INSTR_1_30),
131 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_31,
132 A4XX_VFD_FETCH_INSTR_1_31),
133 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_SIZE_ADDRESS,
134 A4XX_VSC_SIZE_ADDRESS),
135 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_SP_VS_PVT_MEM_ADDR,
136 A4XX_SP_VS_PVT_MEM_ADDR),
137 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_SP_FS_PVT_MEM_ADDR,
138 A4XX_SP_FS_PVT_MEM_ADDR),
139 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_SP_VS_OBJ_START_REG,
140 A4XX_SP_VS_OBJ_START),
141 ADRENO_REG_DEFINE(ADRENO_CP_ADDR_SP_FS_OBJ_START_REG,
142 A4XX_SP_FS_OBJ_START),
143 ADRENO_REG_DEFINE(ADRENO_CP_UCHE_INVALIDATE0,
144 A4XX_UCHE_INVALIDATE0),
145 ADRENO_REG_DEFINE(ADRENO_CP_UCHE_INVALIDATE1,
146 A4XX_UCHE_INVALIDATE1),
147};
148
149static const struct adreno_vbif_data a405_vbif[] = {
150 { A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003 },
151 {0, 0},
152};
153
154static const struct adreno_vbif_data a420_vbif[] = {
155 { A4XX_VBIF_ABIT_SORT, 0x0001001F },
156 { A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
157 { A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001 },
158 { A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818 },
159 { A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018 },
160 { A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818 },
161 { A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018 },
162 { A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003 },
163 {0, 0},
164};
165
166static const struct adreno_vbif_data a430_vbif[] = {
167 { A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001 },
168 { A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818 },
169 { A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018 },
170 { A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818 },
171 { A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018 },
172 { A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003 },
173 {0, 0},
174};
175
176static const struct adreno_vbif_platform a4xx_vbif_platforms[] = {
177 { adreno_is_a405, a405_vbif },
178 { adreno_is_a420, a420_vbif },
179 { adreno_is_a430, a430_vbif },
180 { adreno_is_a418, a430_vbif },
181};
182
183/*
184 * a4xx_is_sptp_idle() - A430 SP/TP should be off to be considered idle
185 * @adreno_dev: The adreno device pointer
186 */
187static bool a4xx_is_sptp_idle(struct adreno_device *adreno_dev)
188{
189 unsigned int reg;
190 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
191
192 if (!ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
193 return true;
194
195 /* If SP/TP pc isn't enabled, don't worry about power */
196 kgsl_regread(device, A4XX_CP_POWER_COLLAPSE_CNTL, &reg);
197 if (!(reg & 0x10))
198 return true;
199
200 /* Check that SP/TP is off */
201 kgsl_regread(device, A4XX_RBBM_POWER_STATUS, &reg);
202 return !(reg & SP_TP_PWR_ON);
203}
204
205/*
206 * a4xx_enable_hwcg() - Program the clock control registers
207 * @device: The adreno device pointer
208 */
209static void a4xx_enable_hwcg(struct kgsl_device *device)
210{
211 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
212
213 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP0, 0x02222202);
214 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP1, 0x02222202);
215 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP2, 0x02222202);
216 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP3, 0x02222202);
217 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP0, 0x00002222);
218 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP1, 0x00002222);
219 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP2, 0x00002222);
220 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP3, 0x00002222);
221 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP0, 0x0E739CE7);
222 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP1, 0x0E739CE7);
223 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP2, 0x0E739CE7);
224 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP3, 0x0E739CE7);
225 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP0, 0x00111111);
226 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP1, 0x00111111);
227 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP2, 0x00111111);
228 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP3, 0x00111111);
229 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP0, 0x22222222);
230 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP1, 0x22222222);
231 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP2, 0x22222222);
232 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP3, 0x22222222);
233 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP0, 0x00222222);
234 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP1, 0x00222222);
235 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP2, 0x00222222);
236 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP3, 0x00222222);
237 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP0, 0x00000104);
238 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP1, 0x00000104);
239 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP2, 0x00000104);
240 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP3, 0x00000104);
241 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP0, 0x00000081);
242 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP1, 0x00000081);
243 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP2, 0x00000081);
244 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP3, 0x00000081);
245 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
246 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
247 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
248 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
249 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
250 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
251 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB0, 0x22222222);
252 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB1, 0x22222222);
253 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB2, 0x22222222);
254 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB3, 0x22222222);
255 /* Disable L1 clocking in A420 due to CCU issues with it */
256 if (adreno_is_a420(adreno_dev)) {
257 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB0, 0x00002020);
258 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB1, 0x00002020);
259 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB2, 0x00002020);
260 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB3, 0x00002020);
261 } else {
262 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB0, 0x00022020);
263 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB1, 0x00022020);
264 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB2, 0x00022020);
265 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB3, 0x00022020);
266 }
267 /* No CCU for A405 */
268 if (!adreno_is_a405(adreno_dev)) {
269 kgsl_regwrite(device,
270 A4XX_RBBM_CLOCK_CTL_MARB_CCU0, 0x00000922);
271 kgsl_regwrite(device,
272 A4XX_RBBM_CLOCK_CTL_MARB_CCU1, 0x00000922);
273 kgsl_regwrite(device,
274 A4XX_RBBM_CLOCK_CTL_MARB_CCU2, 0x00000922);
275 kgsl_regwrite(device,
276 A4XX_RBBM_CLOCK_CTL_MARB_CCU3, 0x00000922);
277 kgsl_regwrite(device,
278 A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU0, 0x00000000);
279 kgsl_regwrite(device,
280 A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU1, 0x00000000);
281 kgsl_regwrite(device,
282 A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU2, 0x00000000);
283 kgsl_regwrite(device,
284 A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU3, 0x00000000);
285 kgsl_regwrite(device,
286 A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_0,
287 0x00000001);
288 kgsl_regwrite(device,
289 A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_1,
290 0x00000001);
291 kgsl_regwrite(device,
292 A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_2,
293 0x00000001);
294 kgsl_regwrite(device,
295 A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_3,
296 0x00000001);
297 }
298 kgsl_regwrite(device, A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
299 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
300 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
301 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
302 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
303 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
304 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
305 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
306 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
307 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_HLSQ, 0x00000000);
308 kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
309 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
310 /*
311 * Due to a HW timing issue, top level HW clock gating is causing
312 * register read/writes to be dropped in adreno a430.
313 * This timing issue started happening because of SP/TP power collapse.
314 * On targets that do not have SP/TP PC there is no timing issue.
315 * The HW timing issue could be fixed by
316 * a) disabling SP/TP power collapse
317 * b) or disabling HW clock gating.
318 * Disabling HW clock gating + NAP enabled combination has
319 * minimal power impact. So this option is chosen over disabling
320 * SP/TP power collapse.
321 * Revisions of A430 which chipid 2 and above do not have the issue.
322 */
323 if (adreno_is_a430(adreno_dev) &&
324 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) < 2))
325 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL, 0);
326 else
327 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
328 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2, 0);
329}
330/*
331 * a4xx_regulator_enable() - Enable any necessary HW regulators
332 * @adreno_dev: The adreno device pointer
333 *
334 * Some HW blocks may need their regulators explicitly enabled
335 * on a restart. Clocks must be on during this call.
336 */
337static int a4xx_regulator_enable(struct adreno_device *adreno_dev)
338{
339 unsigned int reg;
340 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
341
342 if (!(adreno_is_a430(adreno_dev) || adreno_is_a418(adreno_dev))) {
343 /* Halt the sp_input_clk at HM level */
344 kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL, 0x00000055);
345 a4xx_enable_hwcg(device);
346 return 0;
347 }
348
349 /* Set the default register values; set SW_COLLAPSE to 0 */
350 kgsl_regwrite(device, A4XX_RBBM_POWER_CNTL_IP, 0x778000);
351 do {
352 udelay(5);
353 kgsl_regread(device, A4XX_RBBM_POWER_STATUS, &reg);
354 } while (!(reg & SP_TP_PWR_ON));
355
356 /* Disable SP clock */
357 kgsl_regrmw(device, A4XX_RBBM_CLOCK_CTL_IP, CNTL_IP_SW_COLLAPSE, 0);
358 /* Enable hardware clockgating */
359 a4xx_enable_hwcg(device);
360 /* Enable SP clock */
361 kgsl_regrmw(device, A4XX_RBBM_CLOCK_CTL_IP, CNTL_IP_SW_COLLAPSE, 1);
362 return 0;
363}
364
365/*
366 * a4xx_regulator_disable() - Disable any necessary HW regulators
367 * @adreno_dev: The adreno device pointer
368 *
369 * Some HW blocks may need their regulators explicitly disabled
370 * on a power down to prevent current spikes. Clocks must be on
371 * during this call.
372 */
373static void a4xx_regulator_disable(struct adreno_device *adreno_dev)
374{
375 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
376
377 if (!(adreno_is_a430(adreno_dev) || adreno_is_a418(adreno_dev)))
378 return;
379
380 /* Set the default register values; set SW_COLLAPSE to 1 */
381 kgsl_regwrite(device, A4XX_RBBM_POWER_CNTL_IP, 0x778001);
382}
383
384/*
385 * a4xx_enable_pc() - Enable the SP/TP block power collapse
386 * @adreno_dev: The adreno device pointer
387 */
388static void a4xx_enable_pc(struct adreno_device *adreno_dev)
389{
390 if (!ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC) ||
391 !test_bit(ADRENO_SPTP_PC_CTRL, &adreno_dev->pwrctrl_flag))
392 return;
393
394 kgsl_regwrite(KGSL_DEVICE(adreno_dev), A4XX_CP_POWER_COLLAPSE_CNTL,
395 0x00400010);
396 trace_adreno_sp_tp((unsigned long) __builtin_return_address(0));
397};
398
399/*
400 * a4xx_enable_ppd() - Enable the Peak power detect logic in the h/w
401 * @adreno_dev: The adreno device pointer
402 *
403 * A430 can detect peak current conditions inside h/w and throttle
404 * the workload to ALUs to mitigate it.
405 */
406static void a4xx_enable_ppd(struct adreno_device *adreno_dev)
407{
408 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
409
410 if (!ADRENO_FEATURE(adreno_dev, ADRENO_PPD) ||
411 !test_bit(ADRENO_PPD_CTRL, &adreno_dev->pwrctrl_flag) ||
412 !adreno_is_a430v2(adreno_dev))
413 return;
414
415 /* Program thresholds */
416 kgsl_regwrite(device, A4XX_RBBM_PPD_EPOCH_INTER_TH_HIGH_CLEAR_THR,
417 0x003F0101);
418 kgsl_regwrite(device, A4XX_RBBM_PPD_EPOCH_INTER_TH_LOW, 0x00000101);
419 kgsl_regwrite(device, A4XX_RBBM_PPD_V2_SP_PWR_WEIGHTS, 0x00085014);
420 kgsl_regwrite(device, A4XX_RBBM_PPD_V2_SP_RB_EPOCH_TH, 0x00000B46);
421 kgsl_regwrite(device, A4XX_RBBM_PPD_V2_TP_CONFIG, 0xE4525111);
422 kgsl_regwrite(device, A4XX_RBBM_PPD_RAMP_V2_CONTROL, 0x0000000B);
423
424 /* Enable PPD*/
425 kgsl_regwrite(device, A4XX_RBBM_PPD_CTRL, 0x1002E40C);
426};
427
428/*
429 * a4xx_pwrlevel_change_settings() - Program the hardware during power level
430 * transitions
431 * @adreno_dev: The adreno device pointer
432 * @prelevel: The previous power level
433 * @postlevel: The new power level
434 * @post: True if called after the clock change has taken effect
435 */
436static void a4xx_pwrlevel_change_settings(struct adreno_device *adreno_dev,
437 unsigned int prelevel, unsigned int postlevel,
438 bool post)
439{
440 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
441 static int pre;
442
443 /* PPD programming only for A430v2 */
444 if (!ADRENO_FEATURE(adreno_dev, ADRENO_PPD) ||
445 !test_bit(ADRENO_PPD_CTRL, &adreno_dev->pwrctrl_flag) ||
446 !adreno_is_a430v2(adreno_dev))
447 return;
448
449 /* if this is a real pre, or a post without a previous pre, set pre */
450 if ((post == 0) || (pre == 0 && post == 1))
451 pre = 1;
452 else if (post == 1)
453 pre = 0;
454
455 if ((prelevel == 0) && pre) {
456 /* Going to Non-Turbo mode - mask the throttle and reset */
457 kgsl_regwrite(device, A4XX_RBBM_PPD_CTRL, 0x1002E40E);
458 kgsl_regwrite(device, A4XX_RBBM_PPD_CTRL, 0x1002E40C);
459 } else if ((postlevel == 0) && post) {
460 /* Going to Turbo mode - unmask the throttle and reset */
461 kgsl_regwrite(device, A4XX_RBBM_PPD_CTRL, 0x1002E40A);
462 kgsl_regwrite(device, A4XX_RBBM_PPD_CTRL, 0x1002E408);
463 }
464
465 if (post)
466 pre = 0;
467}
468
469/**
470 * a4xx_protect_init() - Initializes register protection on a4xx
471 * @adreno_dev: Pointer to the device structure
472 * Performs register writes to enable protected access to sensitive
473 * registers
474 */
475static void a4xx_protect_init(struct adreno_device *adreno_dev)
476{
477 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
478 int index = 0;
479 struct kgsl_protected_registers *iommu_regs;
480
481 /* enable access protection to privileged registers */
482 kgsl_regwrite(device, A4XX_CP_PROTECT_CTRL, 0x00000007);
483 /* RBBM registers */
484 adreno_set_protected_registers(adreno_dev, &index, 0x4, 2);
485 adreno_set_protected_registers(adreno_dev, &index, 0x8, 3);
486 adreno_set_protected_registers(adreno_dev, &index, 0x10, 4);
487 adreno_set_protected_registers(adreno_dev, &index, 0x20, 5);
488 adreno_set_protected_registers(adreno_dev, &index, 0x40, 6);
489 adreno_set_protected_registers(adreno_dev, &index, 0x80, 4);
490
491 /* Content protection registers */
492 if (kgsl_mmu_is_secured(&device->mmu)) {
493 adreno_set_protected_registers(adreno_dev, &index,
494 A4XX_RBBM_SECVID_TSB_TRUSTED_BASE, 3);
495 adreno_set_protected_registers(adreno_dev, &index,
496 A4XX_RBBM_SECVID_TRUST_CONTROL, 1);
497 }
498
499 /* CP registers */
500 adreno_set_protected_registers(adreno_dev, &index, 0x200, 7);
501 adreno_set_protected_registers(adreno_dev, &index, 0x580, 4);
502 adreno_set_protected_registers(adreno_dev, &index, A4XX_CP_PREEMPT, 1);
503 /* RB registers */
504 adreno_set_protected_registers(adreno_dev, &index, 0xCC0, 0);
505
506 /* HLSQ registers */
507 adreno_set_protected_registers(adreno_dev, &index, 0xE00, 0);
508
509 /* VPC registers */
510 adreno_set_protected_registers(adreno_dev, &index, 0xE60, 1);
511
512 if (adreno_is_a430(adreno_dev) || adreno_is_a420(adreno_dev) ||
513 adreno_is_a418(adreno_dev)) {
514 /*
515 * Protect registers that might cause XPU violation if
516 * accessed by GPU
517 */
518 adreno_set_protected_registers(adreno_dev, &index, 0x2c00, 10);
519 adreno_set_protected_registers(adreno_dev, &index, 0x3300, 8);
520 adreno_set_protected_registers(adreno_dev, &index, 0x3400, 10);
521 }
522
523 /* SMMU registers */
524 iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
525 if (iommu_regs)
526 adreno_set_protected_registers(adreno_dev, &index,
527 iommu_regs->base, iommu_regs->range);
528}
529
530static struct adreno_snapshot_sizes a4xx_snap_sizes = {
531 .cp_pfp = 0x14,
532 .vpc_mem = 2048,
533 .cp_meq = 64,
534 .shader_mem = 0x4000,
535 .cp_merciu = 64,
536 .roq = 512,
537};
538
539
540static void a4xx_start(struct adreno_device *adreno_dev)
541{
542 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
543 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
544 unsigned int cp_debug = A4XX_CP_DEBUG_DEFAULT;
545
546 adreno_vbif_start(adreno_dev, a4xx_vbif_platforms,
547 ARRAY_SIZE(a4xx_vbif_platforms));
548 /* Make all blocks contribute to the GPU BUSY perf counter */
549 kgsl_regwrite(device, A4XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF);
550
551 /* Tune the hystersis counters for SP and CP idle detection */
552 kgsl_regwrite(device, A4XX_RBBM_SP_HYST_CNT, 0x10);
553 kgsl_regwrite(device, A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
554 if (adreno_is_a430(adreno_dev))
555 kgsl_regwrite(device, A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
556
557 /*
558 * Enable the RBBM error reporting bits. This lets us get
559 * useful information on failure
560 */
561
562 kgsl_regwrite(device, A4XX_RBBM_AHB_CTL0, 0x00000001);
563
564 /* Enable AHB error reporting */
565 kgsl_regwrite(device, A4XX_RBBM_AHB_CTL1, 0xA6FFFFFF);
566
567 /* Turn on the power counters */
568 kgsl_regwrite(device, A4XX_RBBM_RBBM_CTL, 0x00000030);
569
570 /*
571 * Turn on hang detection - this spews a lot of useful information
572 * into the RBBM registers on a hang
573 */
574 set_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
575 gpudev->irq->mask |= (1 << A4XX_INT_MISC_HANG_DETECT);
576 kgsl_regwrite(device, A4XX_RBBM_INTERFACE_HANG_INT_CTL,
577 (1 << 30) | 0xFFFF);
578
579 /* Set the GMEM/OCMEM base address for A4XX */
580 kgsl_regwrite(device, A4XX_RB_GMEM_BASE_ADDR,
581 (unsigned int)(adreno_dev->gmem_base >> 14));
582
583 /* Turn on performance counters */
584 kgsl_regwrite(device, A4XX_RBBM_PERFCTR_CTL, 0x01);
585
586 /* Enable VFD to access most of the UCHE (7 ways out of 8) */
587 kgsl_regwrite(device, A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
588
589 /* Disable L2 bypass to avoid UCHE out of bounds errors */
590 kgsl_regwrite(device, UCHE_TRAP_BASE_LO, 0xffff0000);
591 kgsl_regwrite(device, UCHE_TRAP_BASE_HI, 0xffff0000);
592
593 /* On A420 cores turn on SKIP_IB2_DISABLE in addition to the default */
594 if (adreno_is_a420(adreno_dev))
595 cp_debug |= (1 << 29);
596 /*
597 * Set chicken bit to disable the speed up of bootstrap on A430
598 * and its derivatives
599 */
600 else
601 cp_debug |= (1 << 14);
602
603 kgsl_regwrite(device, A4XX_CP_DEBUG, cp_debug);
604
605 /* On A430 enable SP regfile sleep for power savings */
606 if (!adreno_is_a420(adreno_dev)) {
607 kgsl_regwrite(device, A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
608 0x00000441);
609 kgsl_regwrite(device, A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
610 0x00000441);
611 }
612
613 /*
614 * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
615 * due to timing issue with HLSQ_TP_CLK_EN
616 */
617 if (adreno_is_a420(adreno_dev)) {
618 unsigned int val;
619
620 kgsl_regread(device, A4XX_RBBM_CLOCK_DELAY_HLSQ, &val);
621 val &= ~A4XX_CGC_HLSQ_TP_EARLY_CYC_MASK;
622 val |= 2 << A4XX_CGC_HLSQ_TP_EARLY_CYC_SHIFT;
623 kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
624 }
625
626 /* A430 and derivatives offers bigger chunk of CP_STATE_DEBUG regs */
627 if (!adreno_is_a420(adreno_dev))
628 a4xx_snap_sizes.cp_pfp = 0x34;
629
630 if (adreno_is_a405(adreno_dev))
631 gpudev->vbif_xin_halt_ctrl0_mask =
632 A405_VBIF_XIN_HALT_CTRL0_MASK;
633
634 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
635
636 a4xx_protect_init(adreno_dev);
637}
638
639/*
640 * a4xx_err_callback() - Callback for a4xx error interrupts
641 * @adreno_dev: Pointer to device
642 * @bit: Interrupt bit
643 */
644static void a4xx_err_callback(struct adreno_device *adreno_dev, int bit)
645{
646 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
647 unsigned int reg;
648
649 switch (bit) {
650 case A4XX_INT_RBBM_AHB_ERROR: {
651 kgsl_regread(device, A4XX_RBBM_AHB_ERROR_STATUS, &reg);
652
653 /*
654 * Return the word address of the erroring register so that it
655 * matches the register specification
656 */
657 KGSL_DRV_CRIT_RATELIMIT(device,
658 "RBBM | AHB bus error | %s | addr=%x | ports=%x:%x\n",
659 reg & (1 << 28) ? "WRITE" : "READ",
660 (reg & 0xFFFFF) >> 2, (reg >> 20) & 0x3,
661 (reg >> 24) & 0xF);
662
663 /* Clear the error */
664 kgsl_regwrite(device, A4XX_RBBM_AHB_CMD, (1 << 4));
665 break;
666 }
667 case A4XX_INT_RBBM_REG_TIMEOUT:
668 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: AHB register timeout\n");
669 break;
670 case A4XX_INT_RBBM_ME_MS_TIMEOUT:
671 kgsl_regread(device, A4XX_RBBM_AHB_ME_SPLIT_STATUS, &reg);
672 KGSL_DRV_CRIT_RATELIMIT(device,
673 "RBBM | ME master split timeout | status=%x\n", reg);
674 break;
675 case A4XX_INT_RBBM_PFP_MS_TIMEOUT:
676 kgsl_regread(device, A4XX_RBBM_AHB_PFP_SPLIT_STATUS, &reg);
677 KGSL_DRV_CRIT_RATELIMIT(device,
678 "RBBM | PFP master split timeout | status=%x\n", reg);
679 break;
680 case A4XX_INT_RBBM_ETS_MS_TIMEOUT:
681 KGSL_DRV_CRIT_RATELIMIT(device,
682 "RBBM: ME master split timeout\n");
683 break;
684 case A4XX_INT_RBBM_ASYNC_OVERFLOW:
685 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ASYNC overflow\n");
686 break;
687 case A4XX_INT_CP_OPCODE_ERROR:
688 KGSL_DRV_CRIT_RATELIMIT(device,
689 "ringbuffer opcode error interrupt\n");
690 break;
691 case A4XX_INT_CP_RESERVED_BIT_ERROR:
692 KGSL_DRV_CRIT_RATELIMIT(device,
693 "ringbuffer reserved bit error interrupt\n");
694 break;
695 case A4XX_INT_CP_HW_FAULT:
696 {
697 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
698
699 kgsl_regread(device, A4XX_CP_HW_FAULT, &reg);
700 KGSL_DRV_CRIT_RATELIMIT(device,
701 "CP | Ringbuffer HW fault | status=%x\n", reg);
702 /*
703 * mask off this interrupt since it can spam, it will be
704 * turned on again when device resets
705 */
706 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK,
707 gpudev->irq->mask & ~(1 << A4XX_INT_CP_HW_FAULT));
708 break;
709 }
710 case A4XX_INT_CP_REG_PROTECT_FAULT:
711 kgsl_regread(device, A4XX_CP_PROTECT_STATUS, &reg);
712 KGSL_DRV_CRIT_RATELIMIT(device,
713 "CP | Protected mode error| %s | addr=%x\n",
714 reg & (1 << 24) ? "WRITE" : "READ",
715 (reg & 0xFFFFF) >> 2);
716 break;
717 case A4XX_INT_CP_AHB_ERROR_HALT:
718 KGSL_DRV_CRIT_RATELIMIT(device,
719 "ringbuffer AHB error interrupt\n");
720 break;
721 case A4XX_INT_RBBM_ATB_BUS_OVERFLOW:
722 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB bus overflow\n");
723 break;
724 case A4XX_INT_UCHE_OOB_ACCESS:
725 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Out of bounds access\n");
726 break;
727 case A4XX_INT_RBBM_DPM_CALC_ERR:
728 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: dpm calc error\n");
729 break;
730 case A4XX_INT_RBBM_DPM_EPOCH_ERR:
731 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: dpm epoch error\n");
732 break;
733 case A4XX_INT_RBBM_DPM_THERMAL_YELLOW_ERR:
734 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: dpm thermal yellow\n");
735 break;
736 case A4XX_INT_RBBM_DPM_THERMAL_RED_ERR:
737 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: dpm thermal red\n");
738 break;
739 default:
740 KGSL_DRV_CRIT_RATELIMIT(device, "Unknown interrupt\n");
741 }
742}
743
744static unsigned int a4xx_int_bits[ADRENO_INT_BITS_MAX] = {
745 ADRENO_INT_DEFINE(ADRENO_INT_RBBM_AHB_ERROR, A4XX_INT_RBBM_AHB_ERROR),
746};
747
748/* Register offset defines for A4XX, in order of enum adreno_regs */
749static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
750 ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_WADDR, A4XX_CP_ME_RAM_WADDR),
751 ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_DATA, A4XX_CP_ME_RAM_DATA),
752 ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_DATA, A4XX_CP_PFP_UCODE_DATA),
753 ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_ADDR, A4XX_CP_PFP_UCODE_ADDR),
754 ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A4XX_CP_WFI_PEND_CTR),
755 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A4XX_CP_RB_BASE),
756 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, ADRENO_REG_SKIP),
757 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO, A4XX_CP_RB_RPTR_ADDR),
758 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A4XX_CP_RB_RPTR),
759 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A4XX_CP_RB_WPTR),
760 ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A4XX_CP_CNTL),
761 ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A4XX_CP_ME_CNTL),
762 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A4XX_CP_RB_CNTL),
763 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A4XX_CP_IB1_BASE),
764 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, ADRENO_REG_SKIP),
765 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A4XX_CP_IB1_BUFSZ),
766 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A4XX_CP_IB2_BASE),
767 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, ADRENO_REG_SKIP),
768 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A4XX_CP_IB2_BUFSZ),
769 ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_RADDR, A4XX_CP_ME_RAM_RADDR),
770 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A4XX_CP_ROQ_ADDR),
771 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A4XX_CP_ROQ_DATA),
772 ADRENO_REG_DEFINE(ADRENO_REG_CP_MERCIU_ADDR, A4XX_CP_MERCIU_ADDR),
773 ADRENO_REG_DEFINE(ADRENO_REG_CP_MERCIU_DATA, A4XX_CP_MERCIU_DATA),
774 ADRENO_REG_DEFINE(ADRENO_REG_CP_MERCIU_DATA2, A4XX_CP_MERCIU_DATA2),
775 ADRENO_REG_DEFINE(ADRENO_REG_CP_MEQ_ADDR, A4XX_CP_MEQ_ADDR),
776 ADRENO_REG_DEFINE(ADRENO_REG_CP_MEQ_DATA, A4XX_CP_MEQ_DATA),
777 ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A4XX_CP_HW_FAULT),
778 ADRENO_REG_DEFINE(ADRENO_REG_CP_PROTECT_STATUS, A4XX_CP_PROTECT_STATUS),
779 ADRENO_REG_DEFINE(ADRENO_REG_CP_SCRATCH_REG6, A4XX_CP_SCRATCH_REG6),
780 ADRENO_REG_DEFINE(ADRENO_REG_CP_SCRATCH_REG7, A4XX_CP_SCRATCH_REG7),
781 ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT, A4XX_CP_PREEMPT),
782 ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_DEBUG, A4XX_CP_PREEMPT_DEBUG),
783 ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_DISABLE,
784 A4XX_CP_PREEMPT_DISABLE),
785 ADRENO_REG_DEFINE(ADRENO_REG_CP_PROTECT_REG_0, A4XX_CP_PROTECT_REG_0),
786 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A4XX_RBBM_STATUS),
787 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A4XX_RBBM_PERFCTR_CTL),
788 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
789 A4XX_RBBM_PERFCTR_LOAD_CMD0),
790 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
791 A4XX_RBBM_PERFCTR_LOAD_CMD1),
792 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
793 A4XX_RBBM_PERFCTR_LOAD_CMD2),
794 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
795 ADRENO_REG_SKIP),
796 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
797 A4XX_RBBM_PERFCTR_PWR_1_LO),
798 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A4XX_RBBM_INT_0_MASK),
799 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A4XX_RBBM_INT_0_STATUS),
800 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A4XX_RBBM_CLOCK_CTL),
801 ADRENO_REG_DEFINE(ADRENO_REG_VPC_DEBUG_RAM_SEL,
802 A4XX_VPC_DEBUG_RAM_SEL),
803 ADRENO_REG_DEFINE(ADRENO_REG_VPC_DEBUG_RAM_READ,
804 A4XX_VPC_DEBUG_RAM_READ),
805 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
806 A4XX_RBBM_INT_CLEAR_CMD),
807 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_RBBM_CTL, A4XX_RBBM_RBBM_CTL),
808 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A4XX_RBBM_SW_RESET_CMD),
809 ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A4XX_UCHE_INVALIDATE0),
810 ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1, A4XX_UCHE_INVALIDATE1),
811 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
812 A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
813 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
814 A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
815 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
816 A4XX_RBBM_SECVID_TRUST_CONTROL),
817 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
818 A4XX_RBBM_ALWAYSON_COUNTER_LO),
819 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
820 A4XX_RBBM_ALWAYSON_COUNTER_HI),
821 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONFIG,
822 A4XX_RBBM_SECVID_TRUST_CONFIG),
823 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
824 A4XX_RBBM_SECVID_TSB_CONTROL),
825 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
826 A4XX_RBBM_SECVID_TSB_TRUSTED_BASE),
827 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
828 A4XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
829 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
830 A4XX_VBIF_XIN_HALT_CTRL0),
831 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
832 A4XX_VBIF_XIN_HALT_CTRL1),
833 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION,
834 A4XX_VBIF_VERSION),
835};
836
837static const struct adreno_reg_offsets a4xx_reg_offsets = {
838 .offsets = a4xx_register_offsets,
839 .offset_0 = ADRENO_REG_REGISTER_MAX,
840};
841
842static struct adreno_perfcount_register a4xx_perfcounters_cp[] = {
843 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_0_LO,
844 A4XX_RBBM_PERFCTR_CP_0_HI, 0, A4XX_CP_PERFCTR_CP_SEL_0 },
845 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_1_LO,
846 A4XX_RBBM_PERFCTR_CP_1_HI, 1, A4XX_CP_PERFCTR_CP_SEL_1 },
847 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_2_LO,
848 A4XX_RBBM_PERFCTR_CP_2_HI, 2, A4XX_CP_PERFCTR_CP_SEL_2 },
849 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_3_LO,
850 A4XX_RBBM_PERFCTR_CP_3_HI, 3, A4XX_CP_PERFCTR_CP_SEL_3 },
851 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_4_LO,
852 A4XX_RBBM_PERFCTR_CP_4_HI, 4, A4XX_CP_PERFCTR_CP_SEL_4 },
853 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_5_LO,
854 A4XX_RBBM_PERFCTR_CP_5_HI, 5, A4XX_CP_PERFCTR_CP_SEL_5 },
855 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_6_LO,
856 A4XX_RBBM_PERFCTR_CP_6_HI, 6, A4XX_CP_PERFCTR_CP_SEL_6 },
857 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_7_LO,
858 A4XX_RBBM_PERFCTR_CP_7_HI, 7, A4XX_CP_PERFCTR_CP_SEL_7 },
859};
860
861/*
862 * Special list of CP registers for 420 to account for flaws. This array is
863 * inserted into the tables during perfcounter init
864 */
865static struct adreno_perfcount_register a420_perfcounters_cp[] = {
866 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_0_LO,
867 A4XX_RBBM_PERFCTR_CP_0_HI, 0, A4XX_CP_PERFCTR_CP_SEL_0 },
868 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_1_LO,
869 A4XX_RBBM_PERFCTR_CP_1_HI, 1, A4XX_CP_PERFCTR_CP_SEL_1 },
870 /*
871 * The selector registers for 3, 5, and 7 are swizzled on the hardware.
872 * CP_4 and CP_6 are duped to SEL_2 and SEL_3 so we don't enable them
873 * here
874 */
875 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_3_LO,
876 A4XX_RBBM_PERFCTR_CP_3_HI, 3, A4XX_CP_PERFCTR_CP_SEL_2 },
877 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_5_LO,
878 A4XX_RBBM_PERFCTR_CP_5_HI, 5, A4XX_CP_PERFCTR_CP_SEL_3 },
879 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CP_7_LO,
880 A4XX_RBBM_PERFCTR_CP_7_HI, 7, A4XX_CP_PERFCTR_CP_SEL_4 },
881};
882
883static struct adreno_perfcount_register a4xx_perfcounters_rbbm[] = {
884 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RBBM_0_LO,
885 A4XX_RBBM_PERFCTR_RBBM_0_HI, 8, A4XX_RBBM_PERFCTR_RBBM_SEL_0 },
886 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RBBM_1_LO,
887 A4XX_RBBM_PERFCTR_RBBM_1_HI, 9, A4XX_RBBM_PERFCTR_RBBM_SEL_1 },
888 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RBBM_2_LO,
889 A4XX_RBBM_PERFCTR_RBBM_2_HI, 10, A4XX_RBBM_PERFCTR_RBBM_SEL_2 },
890 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RBBM_3_LO,
891 A4XX_RBBM_PERFCTR_RBBM_3_HI, 11, A4XX_RBBM_PERFCTR_RBBM_SEL_3 },
892};
893
894static struct adreno_perfcount_register a4xx_perfcounters_pc[] = {
895 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_PC_0_LO,
896 A4XX_RBBM_PERFCTR_PC_0_HI, 12, A4XX_PC_PERFCTR_PC_SEL_0 },
897 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_PC_1_LO,
898 A4XX_RBBM_PERFCTR_PC_1_HI, 13, A4XX_PC_PERFCTR_PC_SEL_1 },
899 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_PC_2_LO,
900 A4XX_RBBM_PERFCTR_PC_2_HI, 14, A4XX_PC_PERFCTR_PC_SEL_2 },
901 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_PC_3_LO,
902 A4XX_RBBM_PERFCTR_PC_3_HI, 15, A4XX_PC_PERFCTR_PC_SEL_3 },
903 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_PC_4_LO,
904 A4XX_RBBM_PERFCTR_PC_4_HI, 16, A4XX_PC_PERFCTR_PC_SEL_4 },
905 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_PC_5_LO,
906 A4XX_RBBM_PERFCTR_PC_5_HI, 17, A4XX_PC_PERFCTR_PC_SEL_5 },
907 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_PC_6_LO,
908 A4XX_RBBM_PERFCTR_PC_6_HI, 18, A4XX_PC_PERFCTR_PC_SEL_6 },
909 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_PC_7_LO,
910 A4XX_RBBM_PERFCTR_PC_7_HI, 19, A4XX_PC_PERFCTR_PC_SEL_7 },
911};
912
913static struct adreno_perfcount_register a4xx_perfcounters_vfd[] = {
914 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VFD_0_LO,
915 A4XX_RBBM_PERFCTR_VFD_0_HI, 20, A4XX_VFD_PERFCTR_VFD_SEL_0 },
916 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VFD_1_LO,
917 A4XX_RBBM_PERFCTR_VFD_1_HI, 21, A4XX_VFD_PERFCTR_VFD_SEL_1 },
918 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VFD_2_LO,
919 A4XX_RBBM_PERFCTR_VFD_2_HI, 22, A4XX_VFD_PERFCTR_VFD_SEL_2 },
920 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VFD_3_LO,
921 A4XX_RBBM_PERFCTR_VFD_3_HI, 23, A4XX_VFD_PERFCTR_VFD_SEL_3 },
922 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VFD_4_LO,
923 A4XX_RBBM_PERFCTR_VFD_4_HI, 24, A4XX_VFD_PERFCTR_VFD_SEL_4 },
924 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VFD_5_LO,
925 A4XX_RBBM_PERFCTR_VFD_5_HI, 25, A4XX_VFD_PERFCTR_VFD_SEL_5 },
926 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VFD_6_LO,
927 A4XX_RBBM_PERFCTR_VFD_6_HI, 26, A4XX_VFD_PERFCTR_VFD_SEL_6 },
928 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VFD_7_LO,
929 A4XX_RBBM_PERFCTR_VFD_7_HI, 27, A4XX_VFD_PERFCTR_VFD_SEL_7 },
930};
931
932static struct adreno_perfcount_register a4xx_perfcounters_hlsq[] = {
933 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_HLSQ_0_LO,
934 A4XX_RBBM_PERFCTR_HLSQ_0_HI, 28, A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
935 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_HLSQ_1_LO,
936 A4XX_RBBM_PERFCTR_HLSQ_1_HI, 29, A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
937 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_HLSQ_2_LO,
938 A4XX_RBBM_PERFCTR_HLSQ_2_HI, 30, A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
939 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_HLSQ_3_LO,
940 A4XX_RBBM_PERFCTR_HLSQ_3_HI, 31, A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
941 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_HLSQ_4_LO,
942 A4XX_RBBM_PERFCTR_HLSQ_4_HI, 32, A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
943 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_HLSQ_5_LO,
944 A4XX_RBBM_PERFCTR_HLSQ_5_HI, 33, A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
945 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_HLSQ_6_LO,
946 A4XX_RBBM_PERFCTR_HLSQ_6_HI, 34, A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 },
947 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_HLSQ_7_LO,
948 A4XX_RBBM_PERFCTR_HLSQ_7_HI, 35, A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 },
949};
950
951static struct adreno_perfcount_register a4xx_perfcounters_vpc[] = {
952 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VPC_0_LO,
953 A4XX_RBBM_PERFCTR_VPC_0_HI, 36, A4XX_VPC_PERFCTR_VPC_SEL_0 },
954 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VPC_1_LO,
955 A4XX_RBBM_PERFCTR_VPC_1_HI, 37, A4XX_VPC_PERFCTR_VPC_SEL_1 },
956 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VPC_2_LO,
957 A4XX_RBBM_PERFCTR_VPC_2_HI, 38, A4XX_VPC_PERFCTR_VPC_SEL_2 },
958 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VPC_3_LO,
959 A4XX_RBBM_PERFCTR_VPC_3_HI, 39, A4XX_VPC_PERFCTR_VPC_SEL_3 },
960};
961
962static struct adreno_perfcount_register a4xx_perfcounters_ccu[] = {
963 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CCU_0_LO,
964 A4XX_RBBM_PERFCTR_CCU_0_HI, 40, A4XX_RB_PERFCTR_CCU_SEL_0 },
965 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CCU_1_LO,
966 A4XX_RBBM_PERFCTR_CCU_1_HI, 41, A4XX_RB_PERFCTR_CCU_SEL_1 },
967 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CCU_2_LO,
968 A4XX_RBBM_PERFCTR_CCU_2_HI, 42, A4XX_RB_PERFCTR_CCU_SEL_2 },
969 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_CCU_3_LO,
970 A4XX_RBBM_PERFCTR_CCU_3_HI, 43, A4XX_RB_PERFCTR_CCU_SEL_3 },
971};
972
973static struct adreno_perfcount_register a4xx_perfcounters_tse[] = {
974 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TSE_0_LO,
975 A4XX_RBBM_PERFCTR_TSE_0_HI, 44, A4XX_GRAS_PERFCTR_TSE_SEL_0 },
976 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TSE_1_LO,
977 A4XX_RBBM_PERFCTR_TSE_1_HI, 45, A4XX_GRAS_PERFCTR_TSE_SEL_1 },
978 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TSE_2_LO,
979 A4XX_RBBM_PERFCTR_TSE_2_HI, 46, A4XX_GRAS_PERFCTR_TSE_SEL_2 },
980 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TSE_3_LO,
981 A4XX_RBBM_PERFCTR_TSE_3_HI, 47, A4XX_GRAS_PERFCTR_TSE_SEL_3 },
982};
983
984
985static struct adreno_perfcount_register a4xx_perfcounters_ras[] = {
986 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RAS_0_LO,
987 A4XX_RBBM_PERFCTR_RAS_0_HI, 48, A4XX_GRAS_PERFCTR_RAS_SEL_0 },
988 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RAS_1_LO,
989 A4XX_RBBM_PERFCTR_RAS_1_HI, 49, A4XX_GRAS_PERFCTR_RAS_SEL_1 },
990 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RAS_2_LO,
991 A4XX_RBBM_PERFCTR_RAS_2_HI, 50, A4XX_GRAS_PERFCTR_RAS_SEL_2 },
992 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RAS_3_LO,
993 A4XX_RBBM_PERFCTR_RAS_3_HI, 51, A4XX_GRAS_PERFCTR_RAS_SEL_3 },
994};
995
996static struct adreno_perfcount_register a4xx_perfcounters_uche[] = {
997 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_UCHE_0_LO,
998 A4XX_RBBM_PERFCTR_UCHE_0_HI, 52, A4XX_UCHE_PERFCTR_UCHE_SEL_0 },
999 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_UCHE_1_LO,
1000 A4XX_RBBM_PERFCTR_UCHE_1_HI, 53, A4XX_UCHE_PERFCTR_UCHE_SEL_1 },
1001 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_UCHE_2_LO,
1002 A4XX_RBBM_PERFCTR_UCHE_2_HI, 54, A4XX_UCHE_PERFCTR_UCHE_SEL_2 },
1003 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_UCHE_3_LO,
1004 A4XX_RBBM_PERFCTR_UCHE_3_HI, 55, A4XX_UCHE_PERFCTR_UCHE_SEL_3 },
1005 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_UCHE_4_LO,
1006 A4XX_RBBM_PERFCTR_UCHE_4_HI, 56, A4XX_UCHE_PERFCTR_UCHE_SEL_4 },
1007 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_UCHE_5_LO,
1008 A4XX_RBBM_PERFCTR_UCHE_5_HI, 57, A4XX_UCHE_PERFCTR_UCHE_SEL_5 },
1009 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_UCHE_6_LO,
1010 A4XX_RBBM_PERFCTR_UCHE_6_HI, 58, A4XX_UCHE_PERFCTR_UCHE_SEL_6 },
1011 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_UCHE_7_LO,
1012 A4XX_RBBM_PERFCTR_UCHE_7_HI, 59, A4XX_UCHE_PERFCTR_UCHE_SEL_7 },
1013};
1014
1015static struct adreno_perfcount_register a4xx_perfcounters_tp[] = {
1016 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TP_0_LO,
1017 A4XX_RBBM_PERFCTR_TP_0_HI, 60, A4XX_TPL1_PERFCTR_TP_SEL_0 },
1018 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TP_1_LO,
1019 A4XX_RBBM_PERFCTR_TP_1_HI, 61, A4XX_TPL1_PERFCTR_TP_SEL_1 },
1020 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TP_2_LO,
1021 A4XX_RBBM_PERFCTR_TP_2_HI, 62, A4XX_TPL1_PERFCTR_TP_SEL_2 },
1022 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TP_3_LO,
1023 A4XX_RBBM_PERFCTR_TP_3_HI, 63, A4XX_TPL1_PERFCTR_TP_SEL_3 },
1024 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TP_4_LO,
1025 A4XX_RBBM_PERFCTR_TP_4_HI, 64, A4XX_TPL1_PERFCTR_TP_SEL_4 },
1026 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TP_5_LO,
1027 A4XX_RBBM_PERFCTR_TP_5_HI, 65, A4XX_TPL1_PERFCTR_TP_SEL_5 },
1028 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TP_6_LO,
1029 A4XX_RBBM_PERFCTR_TP_6_HI, 66, A4XX_TPL1_PERFCTR_TP_SEL_6 },
1030 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_TP_7_LO,
1031 A4XX_RBBM_PERFCTR_TP_7_HI, 67, A4XX_TPL1_PERFCTR_TP_SEL_7 },
1032};
1033
1034static struct adreno_perfcount_register a4xx_perfcounters_sp[] = {
1035 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_0_LO,
1036 A4XX_RBBM_PERFCTR_SP_0_HI, 68, A4XX_SP_PERFCTR_SP_SEL_0 },
1037 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_1_LO,
1038 A4XX_RBBM_PERFCTR_SP_1_HI, 69, A4XX_SP_PERFCTR_SP_SEL_1 },
1039 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_2_LO,
1040 A4XX_RBBM_PERFCTR_SP_2_HI, 70, A4XX_SP_PERFCTR_SP_SEL_2 },
1041 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_3_LO,
1042 A4XX_RBBM_PERFCTR_SP_3_HI, 71, A4XX_SP_PERFCTR_SP_SEL_3 },
1043 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_4_LO,
1044 A4XX_RBBM_PERFCTR_SP_4_HI, 72, A4XX_SP_PERFCTR_SP_SEL_4 },
1045 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_5_LO,
1046 A4XX_RBBM_PERFCTR_SP_5_HI, 73, A4XX_SP_PERFCTR_SP_SEL_5 },
1047 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_6_LO,
1048 A4XX_RBBM_PERFCTR_SP_6_HI, 74, A4XX_SP_PERFCTR_SP_SEL_6 },
1049 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_7_LO,
1050 A4XX_RBBM_PERFCTR_SP_7_HI, 75, A4XX_SP_PERFCTR_SP_SEL_7 },
1051 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_8_LO,
1052 A4XX_RBBM_PERFCTR_SP_8_HI, 76, A4XX_SP_PERFCTR_SP_SEL_8 },
1053 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_9_LO,
1054 A4XX_RBBM_PERFCTR_SP_9_HI, 77, A4XX_SP_PERFCTR_SP_SEL_9 },
1055 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_10_LO,
1056 A4XX_RBBM_PERFCTR_SP_10_HI, 78, A4XX_SP_PERFCTR_SP_SEL_10 },
1057 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_SP_11_LO,
1058 A4XX_RBBM_PERFCTR_SP_11_HI, 79, A4XX_SP_PERFCTR_SP_SEL_11 },
1059};
1060
1061static struct adreno_perfcount_register a4xx_perfcounters_rb[] = {
1062 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RB_0_LO,
1063 A4XX_RBBM_PERFCTR_RB_0_HI, 80, A4XX_RB_PERFCTR_RB_SEL_0 },
1064 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RB_1_LO,
1065 A4XX_RBBM_PERFCTR_RB_1_HI, 81, A4XX_RB_PERFCTR_RB_SEL_1 },
1066 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RB_2_LO,
1067 A4XX_RBBM_PERFCTR_RB_2_HI, 82, A4XX_RB_PERFCTR_RB_SEL_2 },
1068 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RB_3_LO,
1069 A4XX_RBBM_PERFCTR_RB_3_HI, 83, A4XX_RB_PERFCTR_RB_SEL_3 },
1070 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RB_4_LO,
1071 A4XX_RBBM_PERFCTR_RB_4_HI, 84, A4XX_RB_PERFCTR_RB_SEL_4 },
1072 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RB_5_LO,
1073 A4XX_RBBM_PERFCTR_RB_5_HI, 85, A4XX_RB_PERFCTR_RB_SEL_5 },
1074 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RB_6_LO,
1075 A4XX_RBBM_PERFCTR_RB_6_HI, 86, A4XX_RB_PERFCTR_RB_SEL_6 },
1076 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_RB_7_LO,
1077 A4XX_RBBM_PERFCTR_RB_7_HI, 87, A4XX_RB_PERFCTR_RB_SEL_7 },
1078};
1079
1080static struct adreno_perfcount_register a4xx_perfcounters_vsc[] = {
1081 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VSC_0_LO,
1082 A4XX_RBBM_PERFCTR_VSC_0_HI, 88, A4XX_VSC_PERFCTR_VSC_SEL_0 },
1083 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_VSC_1_LO,
1084 A4XX_RBBM_PERFCTR_VSC_1_HI, 89, A4XX_VSC_PERFCTR_VSC_SEL_1 },
1085};
1086
1087static struct adreno_perfcount_register a4xx_perfcounters_pwr[] = {
1088 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_PWR_0_LO,
1089 A4XX_RBBM_PERFCTR_PWR_0_HI, -1, 0 },
1090 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_PERFCTR_PWR_1_LO,
1091 A4XX_RBBM_PERFCTR_PWR_1_HI, -1, 0},
1092};
1093
1094static struct adreno_perfcount_register a4xx_perfcounters_vbif[] = {
1095 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_VBIF_PERF_CNT_LOW0,
1096 A4XX_VBIF_PERF_CNT_HIGH0, -1, A4XX_VBIF_PERF_CNT_SEL0 },
1097 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_VBIF_PERF_CNT_LOW1,
1098 A4XX_VBIF_PERF_CNT_HIGH1, -1, A4XX_VBIF_PERF_CNT_SEL1 },
1099 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_VBIF_PERF_CNT_LOW2,
1100 A4XX_VBIF_PERF_CNT_HIGH2, -1, A4XX_VBIF_PERF_CNT_SEL2 },
1101 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_VBIF_PERF_CNT_LOW3,
1102 A4XX_VBIF_PERF_CNT_HIGH3, -1, A4XX_VBIF_PERF_CNT_SEL3 },
1103};
1104
1105static struct adreno_perfcount_register a4xx_perfcounters_vbif_pwr[] = {
1106 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_VBIF_PERF_PWR_CNT_LOW0,
1107 A4XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A4XX_VBIF_PERF_PWR_CNT_EN0 },
1108 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_VBIF_PERF_PWR_CNT_LOW1,
1109 A4XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A4XX_VBIF_PERF_PWR_CNT_EN1 },
1110 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_VBIF_PERF_PWR_CNT_LOW2,
1111 A4XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A4XX_VBIF_PERF_PWR_CNT_EN2 },
1112 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_VBIF_PERF_PWR_CNT_LOW3,
1113 A4XX_VBIF_PERF_PWR_CNT_HIGH3, -1, A4XX_VBIF_PERF_PWR_CNT_EN3 },
1114};
1115
1116static struct adreno_perfcount_register a4xx_perfcounters_alwayson[] = {
1117 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A4XX_RBBM_ALWAYSON_COUNTER_LO,
1118 A4XX_RBBM_ALWAYSON_COUNTER_HI, -1 },
1119};
1120
1121#define A4XX_PERFCOUNTER_GROUP(offset, name) \
1122 ADRENO_PERFCOUNTER_GROUP(a4xx, offset, name)
1123
1124#define A4XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
1125 ADRENO_PERFCOUNTER_GROUP_FLAGS(a4xx, offset, name, flags)
1126
1127static struct adreno_perfcount_group a4xx_perfcounter_groups
1128 [KGSL_PERFCOUNTER_GROUP_MAX] = {
1129 A4XX_PERFCOUNTER_GROUP(CP, cp),
1130 A4XX_PERFCOUNTER_GROUP(RBBM, rbbm),
1131 A4XX_PERFCOUNTER_GROUP(PC, pc),
1132 A4XX_PERFCOUNTER_GROUP(VFD, vfd),
1133 A4XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
1134 A4XX_PERFCOUNTER_GROUP(VPC, vpc),
1135 A4XX_PERFCOUNTER_GROUP(CCU, ccu),
1136 A4XX_PERFCOUNTER_GROUP(TSE, tse),
1137 A4XX_PERFCOUNTER_GROUP(RAS, ras),
1138 A4XX_PERFCOUNTER_GROUP(UCHE, uche),
1139 A4XX_PERFCOUNTER_GROUP(TP, tp),
1140 A4XX_PERFCOUNTER_GROUP(SP, sp),
1141 A4XX_PERFCOUNTER_GROUP(RB, rb),
1142 A4XX_PERFCOUNTER_GROUP(VSC, vsc),
1143 A4XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
1144 ADRENO_PERFCOUNTER_GROUP_FIXED),
1145 A4XX_PERFCOUNTER_GROUP(VBIF, vbif),
1146 A4XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
1147 ADRENO_PERFCOUNTER_GROUP_FIXED),
1148 A4XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
1149 ADRENO_PERFCOUNTER_GROUP_FIXED),
1150};
1151
1152static struct adreno_perfcounters a4xx_perfcounters = {
1153 a4xx_perfcounter_groups,
1154 ARRAY_SIZE(a4xx_perfcounter_groups),
1155};
1156
1157static struct adreno_ft_perf_counters a4xx_ft_perf_counters[] = {
1158 {KGSL_PERFCOUNTER_GROUP_SP, A4XX_SP_ALU_ACTIVE_CYCLES},
1159 {KGSL_PERFCOUNTER_GROUP_SP, A4XX_SP0_ICL1_MISSES},
1160 {KGSL_PERFCOUNTER_GROUP_SP, A4XX_SP_FS_CFLOW_INSTRUCTIONS},
1161 {KGSL_PERFCOUNTER_GROUP_TSE, A4XX_TSE_INPUT_PRIM_NUM},
1162};
1163
1164/*
1165 * On A420 a number of perfcounters are un-usable. The following defines the
1166 * array of countables that do not work and should not be used
1167 */
1168static const unsigned int a420_pc_invalid_countables[] = {
1169 PC_INSTANCES, PC_VERTEX_HITS, PC_GENERATED_FIBERS, PC_GENERATED_WAVES,
1170};
1171
1172static const unsigned int a420_vfd_invalid_countables[] = {
1173 VFD_VPC_BYPASS_TRANS, VFD_UPPER_SHADER_FIBERS, VFD_LOWER_SHADER_FIBERS,
1174};
1175
1176static const unsigned int a420_hlsq_invalid_countables[] = {
1177 HLSQ_SP_VS_STAGE_CONSTANT, HLSQ_SP_VS_STAGE_INSTRUCTIONS,
1178 HLSQ_SP_FS_STAGE_CONSTANT, HLSQ_SP_FS_STAGE_INSTRUCTIONS,
1179 HLSQ_FS_STAGE_16_WAVES, HLSQ_FS_STAGE_32_WAVES, HLSQ_FS_STAGE_64_WAVES,
1180 HLSQ_VS_STAGE_16_WAVES, HLSQ_VS_STAGE_32_WAVES,
1181};
1182
1183static const unsigned int a420_uche_invalid_countables[] = {
1184 UCHE_READ_REQUESTS_MARB, UCHE_READ_REQUESTS_SP,
1185 UCHE_WRITE_REQUESTS_MARB, UCHE_WRITE_REQUESTS_SP,
1186 UCHE_WRITE_REQUESTS_VPC
1187};
1188
1189static const unsigned int a420_tp_invalid_countables[] = {
1190 TP_OUTPUT_TEXELS_POINT, TP_OUTPUT_TEXELS_BILINEAR, TP_OUTPUT_TEXELS_MIP,
1191 TP_OUTPUT_TEXELS_ANISO, TP_OUTPUT_TEXELS_OPS16, TP_OUTPUT_TEXELS_OPS32,
1192 TP_ZERO_LOD, TP_LATENCY, TP_LATENCY_TRANS,
1193};
1194
1195static const unsigned int a420_sp_invalid_countables[] = {
1196 SP_FS_STAGE_BARY_INSTRUCTIONS,
1197};
1198
1199static const unsigned int a420_rb_invalid_countables[] = {
1200 RB_VALID_SAMPLES, RB_Z_FAIL, RB_S_FAIL,
1201};
1202
1203static const unsigned int a420_ccu_invalid_countables[] = {
1204 CCU_VBIF_STALL, CCU_VBIF_LATENCY_CYCLES, CCU_VBIF_LATENCY_SAMPLES,
1205 CCU_Z_READ, CCU_Z_WRITE, CCU_C_READ, CCU_C_WRITE,
1206};
1207
1208static const struct adreno_invalid_countables
1209 a420_perfctr_invalid_countables[KGSL_PERFCOUNTER_GROUP_MAX] = {
1210 ADRENO_PERFCOUNTER_INVALID_COUNTABLE(a420_pc, PC),
1211 ADRENO_PERFCOUNTER_INVALID_COUNTABLE(a420_vfd, VFD),
1212 ADRENO_PERFCOUNTER_INVALID_COUNTABLE(a420_hlsq, HLSQ),
1213 ADRENO_PERFCOUNTER_INVALID_COUNTABLE(a420_tp, TP),
1214 ADRENO_PERFCOUNTER_INVALID_COUNTABLE(a420_sp, SP),
1215 ADRENO_PERFCOUNTER_INVALID_COUNTABLE(a420_rb, RB),
1216 ADRENO_PERFCOUNTER_INVALID_COUNTABLE(a420_ccu, CCU),
1217 ADRENO_PERFCOUNTER_INVALID_COUNTABLE(a420_uche, UCHE),
1218};
1219
1220static struct adreno_coresight_register a4xx_coresight_registers[] = {
1221 { A4XX_RBBM_CFG_DEBBUS_CTLT },
1222 { A4XX_RBBM_CFG_DEBBUS_SEL_A },
1223 { A4XX_RBBM_CFG_DEBBUS_SEL_B },
1224 { A4XX_RBBM_CFG_DEBBUS_SEL_C },
1225 { A4XX_RBBM_CFG_DEBBUS_SEL_D },
1226 { A4XX_RBBM_CFG_DEBBUS_OPL },
1227 { A4XX_RBBM_CFG_DEBBUS_OPE },
1228 { A4XX_RBBM_CFG_DEBBUS_IVTL_0 },
1229 { A4XX_RBBM_CFG_DEBBUS_IVTL_1 },
1230 { A4XX_RBBM_CFG_DEBBUS_IVTL_2 },
1231 { A4XX_RBBM_CFG_DEBBUS_IVTL_3 },
1232 { A4XX_RBBM_CFG_DEBBUS_MASKL_0 },
1233 { A4XX_RBBM_CFG_DEBBUS_MASKL_1 },
1234 { A4XX_RBBM_CFG_DEBBUS_MASKL_2 },
1235 { A4XX_RBBM_CFG_DEBBUS_MASKL_3 },
1236 { A4XX_RBBM_CFG_DEBBUS_BYTEL_0 },
1237 { A4XX_RBBM_CFG_DEBBUS_BYTEL_1 },
1238 { A4XX_RBBM_CFG_DEBBUS_IVTE_0 },
1239 { A4XX_RBBM_CFG_DEBBUS_IVTE_1 },
1240 { A4XX_RBBM_CFG_DEBBUS_IVTE_2 },
1241 { A4XX_RBBM_CFG_DEBBUS_IVTE_3 },
1242 { A4XX_RBBM_CFG_DEBBUS_MASKE_0 },
1243 { A4XX_RBBM_CFG_DEBBUS_MASKE_1 },
1244 { A4XX_RBBM_CFG_DEBBUS_MASKE_2 },
1245 { A4XX_RBBM_CFG_DEBBUS_MASKE_3 },
1246 { A4XX_RBBM_CFG_DEBBUS_NIBBLEE },
1247 { A4XX_RBBM_CFG_DEBBUS_PTRC0 },
1248 { A4XX_RBBM_CFG_DEBBUS_PTRC1 },
1249 { A4XX_RBBM_CFG_DEBBUS_CLRC },
1250 { A4XX_RBBM_CFG_DEBBUS_LOADIVT },
1251 { A4XX_RBBM_CFG_DEBBUS_IDX },
1252 { A4XX_RBBM_CFG_DEBBUS_LOADREG },
1253 { A4XX_RBBM_EXT_TRACE_BUS_CTL },
1254 { A4XX_RBBM_CFG_DEBBUS_CTLM },
1255};
1256
1257static void a4xx_perfcounter_init(struct adreno_device *adreno_dev)
1258{
1259 if (adreno_is_a420(adreno_dev)) {
1260 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1261 struct adreno_perfcounters *counters = gpudev->perfcounters;
1262
1263 /*
1264 * The CP counters on A420 are... special. Some of the counters
1265 * are swizzled so only a subset of them are usable
1266 */
1267
1268 if (counters != NULL) {
1269 counters->groups[KGSL_PERFCOUNTER_GROUP_CP].regs =
1270 a420_perfcounters_cp;
1271 counters->groups[KGSL_PERFCOUNTER_GROUP_CP].reg_count =
1272 ARRAY_SIZE(a420_perfcounters_cp);
1273 }
1274
1275 /*
1276 * Also on A420 a number of the countables are not functional so
1277 * we maintain a blacklist of countables to protect the user
1278 */
1279
1280 gpudev->invalid_countables = a420_perfctr_invalid_countables;
1281 }
1282
1283 /*
1284 * Enable the GPU busy count counter. This is a fixed counter on
1285 * A4XX so we don't need to bother checking the return value
1286 */
1287
1288 adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
1289 NULL, NULL, PERFCOUNTER_FLAG_KERNEL);
1290}
1291
1292static void a4xx_perfcounter_close(struct adreno_device *adreno_dev)
1293{
1294 adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
1295 PERFCOUNTER_FLAG_KERNEL);
1296}
1297
1298static const unsigned int _a4xx_pwron_fixup_fs_instructions[] = {
1299 0x00000000, 0x304CC300, 0x00000000, 0x304CC304,
1300 0x00000000, 0x304CC308, 0x00000000, 0x304CC30C,
1301 0x00000000, 0x304CC310, 0x00000000, 0x304CC314,
1302 0x00000000, 0x304CC318, 0x00000000, 0x304CC31C,
1303 0x00000000, 0x304CC320, 0x00000000, 0x304CC324,
1304 0x00000000, 0x304CC328, 0x00000000, 0x304CC32C,
1305 0x00000000, 0x304CC330, 0x00000000, 0x304CC334,
1306 0x00000000, 0x304CC338, 0x00000000, 0x304CC33C,
1307 0x00000000, 0x00000400, 0x00020000, 0x63808003,
1308 0x00060004, 0x63828007, 0x000A0008, 0x6384800B,
1309 0x000E000C, 0x6386800F, 0x00120010, 0x63888013,
1310 0x00160014, 0x638A8017, 0x001A0018, 0x638C801B,
1311 0x001E001C, 0x638E801F, 0x00220020, 0x63908023,
1312 0x00260024, 0x63928027, 0x002A0028, 0x6394802B,
1313 0x002E002C, 0x6396802F, 0x00320030, 0x63988033,
1314 0x00360034, 0x639A8037, 0x003A0038, 0x639C803B,
1315 0x003E003C, 0x639E803F, 0x00000000, 0x00000400,
1316 0x00000003, 0x80D00003, 0x00000007, 0x80D00007,
1317 0x0000000B, 0x80D0000B, 0x0000000F, 0x80D0000F,
1318 0x00000013, 0x80D00013, 0x00000017, 0x80D00017,
1319 0x0000001B, 0x80D0001B, 0x0000001F, 0x80D0001F,
1320 0x00000023, 0x80D00023, 0x00000027, 0x80D00027,
1321 0x0000002B, 0x80D0002B, 0x0000002F, 0x80D0002F,
1322 0x00000033, 0x80D00033, 0x00000037, 0x80D00037,
1323 0x0000003B, 0x80D0003B, 0x0000003F, 0x80D0003F,
1324 0x00000000, 0x00000400, 0xFFFFFFFF, 0x304CC300,
1325 0xFFFFFFFF, 0x304CC304, 0xFFFFFFFF, 0x304CC308,
1326 0xFFFFFFFF, 0x304CC30C, 0xFFFFFFFF, 0x304CC310,
1327 0xFFFFFFFF, 0x304CC314, 0xFFFFFFFF, 0x304CC318,
1328 0xFFFFFFFF, 0x304CC31C, 0xFFFFFFFF, 0x304CC320,
1329 0xFFFFFFFF, 0x304CC324, 0xFFFFFFFF, 0x304CC328,
1330 0xFFFFFFFF, 0x304CC32C, 0xFFFFFFFF, 0x304CC330,
1331 0xFFFFFFFF, 0x304CC334, 0xFFFFFFFF, 0x304CC338,
1332 0xFFFFFFFF, 0x304CC33C, 0x00000000, 0x00000400,
1333 0x00020000, 0x63808003, 0x00060004, 0x63828007,
1334 0x000A0008, 0x6384800B, 0x000E000C, 0x6386800F,
1335 0x00120010, 0x63888013, 0x00160014, 0x638A8017,
1336 0x001A0018, 0x638C801B, 0x001E001C, 0x638E801F,
1337 0x00220020, 0x63908023, 0x00260024, 0x63928027,
1338 0x002A0028, 0x6394802B, 0x002E002C, 0x6396802F,
1339 0x00320030, 0x63988033, 0x00360034, 0x639A8037,
1340 0x003A0038, 0x639C803B, 0x003E003C, 0x639E803F,
1341 0x00000000, 0x00000400, 0x00000003, 0x80D00003,
1342 0x00000007, 0x80D00007, 0x0000000B, 0x80D0000B,
1343 0x0000000F, 0x80D0000F, 0x00000013, 0x80D00013,
1344 0x00000017, 0x80D00017, 0x0000001B, 0x80D0001B,
1345 0x0000001F, 0x80D0001F, 0x00000023, 0x80D00023,
1346 0x00000027, 0x80D00027, 0x0000002B, 0x80D0002B,
1347 0x0000002F, 0x80D0002F, 0x00000033, 0x80D00033,
1348 0x00000037, 0x80D00037, 0x0000003B, 0x80D0003B,
1349 0x0000003F, 0x80D0003F, 0x00000000, 0x03000000,
1350 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1355};
1356
1357/**
1358 * _a4xx_pwron_fixup() - Initialize a special command buffer to run a
1359 * post-power collapse shader workaround
1360 * @adreno_dev: Pointer to a adreno_device struct
1361 *
1362 * Some targets require a special workaround shader to be executed after
1363 * power-collapse. Construct the IB once at init time and keep it
1364 * handy
1365 *
1366 * Returns: 0 on success or negative on error
1367 */
1368static int _a4xx_pwron_fixup(struct adreno_device *adreno_dev)
1369{
1370 unsigned int *cmds;
1371 unsigned int count = ARRAY_SIZE(_a4xx_pwron_fixup_fs_instructions);
1372 unsigned int num_units = count >> 5;
1373 int ret;
1374
1375 /* Return if the fixup is already in place */
1376 if (test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv))
1377 return 0;
1378
1379 ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev),
1380 &adreno_dev->pwron_fixup, PAGE_SIZE,
1381 KGSL_MEMFLAGS_GPUREADONLY, 0, "pwron_fixup");
1382
1383 if (ret)
1384 return ret;
1385
1386 cmds = adreno_dev->pwron_fixup.hostptr;
1387
1388 *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
1389 *cmds++ = 0x00000000;
1390 *cmds++ = cp_type0_packet(A4XX_SP_MODE_CONTROL, 1);
1391 *cmds++ = 0x00000018;
1392 *cmds++ = cp_type0_packet(A4XX_TPL1_TP_MODE_CONTROL, 1);
1393 *cmds++ = 0x00000002;
1394 *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
1395 *cmds++ = 0x00000000;
1396 *cmds++ = cp_type0_packet(A4xx_HLSQ_CONTROL_0, 5);
1397 *cmds++ = 0x800001a0;
1398 *cmds++ = 0xfcfc0000;
1399 *cmds++ = 0xcff3f3f0;
1400 *cmds++ = 0xfcfcfcfc;
1401 *cmds++ = 0xccfcfcfc;
1402 *cmds++ = cp_type0_packet(A4XX_SP_FS_CTRL_1, 1);
1403 *cmds++ = 0x80000000;
1404 *cmds++ = cp_type0_packet(A4XX_HLSQ_UPDATE_CONTROL, 1);
1405 *cmds++ = 0x00000038;
1406 *cmds++ = cp_type0_packet(A4XX_HLSQ_MODE_CONTROL, 1);
1407 *cmds++ = 0x00000003;
1408 *cmds++ = cp_type0_packet(A4XX_HLSQ_UPDATE_CONTROL, 1);
1409 *cmds++ = 0x00000000;
1410 *cmds++ = cp_type0_packet(A4XX_TPL1_TP_TEX_TSIZE_1, 1);
1411 *cmds++ = 0x00008000;
1412 *cmds++ = cp_type0_packet(A4xx_HLSQ_CONTROL_0, 2);
1413 *cmds++ = 0x800001a0;
1414 *cmds++ = 0xfcfc0000;
1415 *cmds++ = cp_type0_packet(A4XX_HLSQ_CS_CONTROL, 1);
1416 *cmds++ = 0x00018030 | (num_units << 24);
1417 *cmds++ = cp_type0_packet(A4XX_HLSQ_CL_NDRANGE_0, 7);
1418 *cmds++ = 0x000000fd;
1419 *cmds++ = 0x00000040;
1420 *cmds++ = 0x00000000;
1421 *cmds++ = 0x00000001;
1422 *cmds++ = 0x00000000;
1423 *cmds++ = 0x00000001;
1424 *cmds++ = 0x00000000;
1425 *cmds++ = cp_type0_packet(A4XX_HLSQ_CL_CONTROL_0, 2);
1426 *cmds++ = 0x0001201f;
1427 *cmds++ = 0x0000f003;
1428 *cmds++ = cp_type0_packet(A4XX_HLSQ_CL_KERNEL_CONST, 1);
1429 *cmds++ = 0x0001800b;
1430 *cmds++ = cp_type0_packet(A4XX_HLSQ_CL_KERNEL_GROUP_X, 3);
1431 *cmds++ = 0x00000001;
1432 *cmds++ = 0x00000001;
1433 *cmds++ = 0x00000001;
1434 *cmds++ = cp_type0_packet(A4XX_HLSQ_CL_WG_OFFSET, 1);
1435 *cmds++ = 0x00000022;
1436 *cmds++ = cp_type0_packet(A4XX_UCHE_INVALIDATE0, 2);
1437 *cmds++ = 0x00000000;
1438 *cmds++ = 0x00000012;
1439 *cmds++ = cp_type0_packet(A4XX_HLSQ_MODE_CONTROL, 1);
1440 *cmds++ = 0x00000003;
1441 *cmds++ = cp_type0_packet(A4XX_SP_SP_CTRL, 1);
1442 *cmds++ = 0x00920000;
1443 *cmds++ = cp_type0_packet(A4XX_SP_INSTR_CACHE_CTRL, 1);
1444 *cmds++ = 0x00000260;
1445 *cmds++ = cp_type0_packet(A4XX_SP_CS_CTRL_0, 1);
1446 *cmds++ = 0x00200400;
1447 *cmds++ = cp_type0_packet(A4XX_SP_CS_OBJ_OFFSET, 1);
1448 *cmds++ = 0x00000000;
1449 *cmds++ = cp_type0_packet(A4XX_SP_CS_OBJ_START, 1);
1450 *cmds++ = 0x00000000;
1451 *cmds++ = cp_type0_packet(A4XX_SP_CS_LENGTH, 1);
1452 *cmds++ = num_units;
1453 *cmds++ = cp_type0_packet(A4XX_SP_MODE_CONTROL, 1);
1454 *cmds++ = 0x00000018;
1455 *cmds++ = cp_type3_packet(CP_LOAD_STATE, 2 + count);
1456 *cmds++ = 0x00340000 | (num_units << CP_LOADSTATE_NUMOFUNITS_SHIFT);
1457 *cmds++ = 0x00000000;
1458
1459 memcpy(cmds, _a4xx_pwron_fixup_fs_instructions, count << 2);
1460 cmds += count;
1461
1462 *cmds++ = cp_type3_packet(CP_EXEC_CL, 1);
1463 *cmds++ = 0x00000000;
1464 *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
1465 *cmds++ = 0x00000000;
1466
1467 /*
1468 * Remember the number of dwords in the command buffer for when we
1469 * program the indirect buffer call in the ringbuffer
1470 */
1471 adreno_dev->pwron_fixup_dwords =
1472 (cmds - (unsigned int *) adreno_dev->pwron_fixup.hostptr);
1473
1474 /* Mark the flag in ->priv to show that we have the fix */
1475 set_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv);
1476 return 0;
1477}
1478
1479/*
1480 * a4xx_init() - Initialize gpu specific data
1481 * @adreno_dev: Pointer to adreno device
1482 */
1483static void a4xx_init(struct adreno_device *adreno_dev)
1484{
1485 if ((adreno_is_a405(adreno_dev)) || (adreno_is_a420(adreno_dev)))
1486 _a4xx_pwron_fixup(adreno_dev);
1487}
1488
1489static int a4xx_send_me_init(struct adreno_device *adreno_dev,
1490 struct adreno_ringbuffer *rb)
1491{
1492 unsigned int *cmds;
1493 int ret;
1494
1495 cmds = adreno_ringbuffer_allocspace(rb, 20);
1496 if (IS_ERR(cmds))
1497 return PTR_ERR(cmds);
1498 if (cmds == NULL)
1499 return -ENOSPC;
1500
1501 *cmds++ = cp_type3_packet(CP_ME_INIT, 17);
1502
1503 /*
1504 * Ordinal 2 of ME_INIT packet, the bits being set are:
1505 * Ordinal 3, 4, 5-12, 14, 15, 16, 17, 18 are present
1506 * Microcode Default Reset Control = 3
1507 */
1508 *cmds++ = 0x000003f7;
1509 *cmds++ = 0x00000000;
1510 *cmds++ = 0x00000000;
1511 *cmds++ = 0x00000000;
1512 *cmds++ = 0x00000080;
1513 *cmds++ = 0x00000100;
1514 *cmds++ = 0x00000180;
1515 *cmds++ = 0x00006600;
1516 *cmds++ = 0x00000150;
1517 *cmds++ = 0x0000014e;
1518 *cmds++ = 0x00000154;
1519 /* MAX Context */
1520 *cmds++ = 0x00000001;
1521 *cmds++ = 0x00000000;
1522 *cmds++ = 0x00000000;
1523
1524 /* Enable protected mode registers for A3XX/A4XX */
1525 *cmds++ = 0x20000000;
1526
1527 *cmds++ = 0x00000000;
1528 *cmds++ = 0x00000000;
1529
1530 *cmds++ = cp_type3_packet(CP_PREEMPT_ENABLE, 1);
1531 *cmds++ = 1;
1532
1533 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
1534 if (ret) {
1535 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1536
1537 dev_err(device->dev, "CP initialization failed to idle\n");
1538 kgsl_device_snapshot(device, NULL);
1539 }
1540
1541 return ret;
1542}
1543
1544/*
1545 * a4xx_rb_start() - Start the ringbuffer
1546 * @adreno_dev: Pointer to adreno device
1547 * @start_type: Warm or cold start
1548 */
1549static int a4xx_rb_start(struct adreno_device *adreno_dev,
1550 unsigned int start_type)
1551{
1552 struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
1553 struct kgsl_device *device = &adreno_dev->dev;
1554 uint64_t addr;
1555 int ret;
1556
1557 addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
1558
1559 adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
1560 ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
1561
1562 /*
1563 * The size of the ringbuffer in the hardware is the log2
1564 * representation of the size in quadwords (sizedwords / 2).
1565 * Also disable the host RPTR shadow register as it might be unreliable
1566 * in certain circumstances.
1567 */
1568
1569 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
1570 ((ilog2(4) << 8) & 0x1F00) |
1571 (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F));
1572
1573 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
1574 rb->buffer_desc.gpuaddr);
1575
1576 ret = a3xx_microcode_load(adreno_dev, start_type);
1577 if (ret)
1578 return ret;
1579
1580 /* clear ME_HALT to start micro engine */
1581 adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0);
1582
1583 ret = a4xx_send_me_init(adreno_dev, rb);
1584 if (ret == 0) {
1585 a4xx_enable_pc(adreno_dev);
1586 a4xx_enable_ppd(adreno_dev);
1587 }
1588
1589 return ret;
1590}
1591
1592static ADRENO_CORESIGHT_ATTR(cfg_debbus_ctrlt, &a4xx_coresight_registers[0]);
1593static ADRENO_CORESIGHT_ATTR(cfg_debbus_sela, &a4xx_coresight_registers[1]);
1594static ADRENO_CORESIGHT_ATTR(cfg_debbus_selb, &a4xx_coresight_registers[2]);
1595static ADRENO_CORESIGHT_ATTR(cfg_debbus_selc, &a4xx_coresight_registers[3]);
1596static ADRENO_CORESIGHT_ATTR(cfg_debbus_seld, &a4xx_coresight_registers[4]);
1597static ADRENO_CORESIGHT_ATTR(cfg_debbus_opl, &a4xx_coresight_registers[5]);
1598static ADRENO_CORESIGHT_ATTR(cfg_debbus_ope, &a4xx_coresight_registers[6]);
1599static ADRENO_CORESIGHT_ATTR(cfg_debbus_ivtl0, &a4xx_coresight_registers[7]);
1600static ADRENO_CORESIGHT_ATTR(cfg_debbus_ivtl1, &a4xx_coresight_registers[8]);
1601static ADRENO_CORESIGHT_ATTR(cfg_debbus_ivtl2, &a4xx_coresight_registers[9]);
1602static ADRENO_CORESIGHT_ATTR(cfg_debbus_ivtl3, &a4xx_coresight_registers[10]);
1603static ADRENO_CORESIGHT_ATTR(cfg_debbus_maskl0, &a4xx_coresight_registers[11]);
1604static ADRENO_CORESIGHT_ATTR(cfg_debbus_maskl1, &a4xx_coresight_registers[12]);
1605static ADRENO_CORESIGHT_ATTR(cfg_debbus_maskl2, &a4xx_coresight_registers[13]);
1606static ADRENO_CORESIGHT_ATTR(cfg_debbus_maskl3, &a4xx_coresight_registers[14]);
1607static ADRENO_CORESIGHT_ATTR(cfg_debbus_bytel0, &a4xx_coresight_registers[15]);
1608static ADRENO_CORESIGHT_ATTR(cfg_debbus_bytel1, &a4xx_coresight_registers[16]);
1609static ADRENO_CORESIGHT_ATTR(cfg_debbus_ivte0, &a4xx_coresight_registers[17]);
1610static ADRENO_CORESIGHT_ATTR(cfg_debbus_ivte1, &a4xx_coresight_registers[18]);
1611static ADRENO_CORESIGHT_ATTR(cfg_debbus_ivte2, &a4xx_coresight_registers[19]);
1612static ADRENO_CORESIGHT_ATTR(cfg_debbus_ivte3, &a4xx_coresight_registers[20]);
1613static ADRENO_CORESIGHT_ATTR(cfg_debbus_maske0, &a4xx_coresight_registers[21]);
1614static ADRENO_CORESIGHT_ATTR(cfg_debbus_maske1, &a4xx_coresight_registers[22]);
1615static ADRENO_CORESIGHT_ATTR(cfg_debbus_maske2, &a4xx_coresight_registers[23]);
1616static ADRENO_CORESIGHT_ATTR(cfg_debbus_maske3, &a4xx_coresight_registers[24]);
1617static ADRENO_CORESIGHT_ATTR(cfg_debbus_nibblee, &a4xx_coresight_registers[25]);
1618static ADRENO_CORESIGHT_ATTR(cfg_debbus_ptrc0, &a4xx_coresight_registers[26]);
1619static ADRENO_CORESIGHT_ATTR(cfg_debbus_ptrc1, &a4xx_coresight_registers[27]);
1620static ADRENO_CORESIGHT_ATTR(cfg_debbus_clrc, &a4xx_coresight_registers[28]);
1621static ADRENO_CORESIGHT_ATTR(cfg_debbus_loadivt, &a4xx_coresight_registers[29]);
1622static ADRENO_CORESIGHT_ATTR(cfg_debbus_idx, &a4xx_coresight_registers[30]);
1623static ADRENO_CORESIGHT_ATTR(cfg_debbus_loadreg, &a4xx_coresight_registers[31]);
1624static ADRENO_CORESIGHT_ATTR(ext_tracebus_ctl, &a4xx_coresight_registers[32]);
1625static ADRENO_CORESIGHT_ATTR(cfg_debbus_ctrlm, &a4xx_coresight_registers[33]);
1626
1627
1628static struct attribute *a4xx_coresight_attrs[] = {
1629 &coresight_attr_cfg_debbus_ctrlt.attr.attr,
1630 &coresight_attr_cfg_debbus_sela.attr.attr,
1631 &coresight_attr_cfg_debbus_selb.attr.attr,
1632 &coresight_attr_cfg_debbus_selc.attr.attr,
1633 &coresight_attr_cfg_debbus_seld.attr.attr,
1634 &coresight_attr_cfg_debbus_opl.attr.attr,
1635 &coresight_attr_cfg_debbus_ope.attr.attr,
1636 &coresight_attr_cfg_debbus_ivtl0.attr.attr,
1637 &coresight_attr_cfg_debbus_ivtl1.attr.attr,
1638 &coresight_attr_cfg_debbus_ivtl2.attr.attr,
1639 &coresight_attr_cfg_debbus_ivtl3.attr.attr,
1640 &coresight_attr_cfg_debbus_maskl0.attr.attr,
1641 &coresight_attr_cfg_debbus_maskl1.attr.attr,
1642 &coresight_attr_cfg_debbus_maskl2.attr.attr,
1643 &coresight_attr_cfg_debbus_maskl3.attr.attr,
1644 &coresight_attr_cfg_debbus_bytel0.attr.attr,
1645 &coresight_attr_cfg_debbus_bytel1.attr.attr,
1646 &coresight_attr_cfg_debbus_ivte0.attr.attr,
1647 &coresight_attr_cfg_debbus_ivte1.attr.attr,
1648 &coresight_attr_cfg_debbus_ivte2.attr.attr,
1649 &coresight_attr_cfg_debbus_ivte3.attr.attr,
1650 &coresight_attr_cfg_debbus_maske0.attr.attr,
1651 &coresight_attr_cfg_debbus_maske1.attr.attr,
1652 &coresight_attr_cfg_debbus_maske2.attr.attr,
1653 &coresight_attr_cfg_debbus_maske3.attr.attr,
1654 &coresight_attr_cfg_debbus_nibblee.attr.attr,
1655 &coresight_attr_cfg_debbus_ptrc0.attr.attr,
1656 &coresight_attr_cfg_debbus_ptrc1.attr.attr,
1657 &coresight_attr_cfg_debbus_clrc.attr.attr,
1658 &coresight_attr_cfg_debbus_loadivt.attr.attr,
1659 &coresight_attr_cfg_debbus_idx.attr.attr,
1660 &coresight_attr_cfg_debbus_loadreg.attr.attr,
1661 &coresight_attr_ext_tracebus_ctl.attr.attr,
1662 &coresight_attr_cfg_debbus_ctrlm.attr.attr,
1663 NULL,
1664};
1665
1666static const struct attribute_group a4xx_coresight_group = {
1667 .attrs = a4xx_coresight_attrs,
1668};
1669
1670static const struct attribute_group *a4xx_coresight_groups[] = {
1671 &a4xx_coresight_group,
1672 NULL,
1673};
1674
1675static struct adreno_coresight a4xx_coresight = {
1676 .registers = a4xx_coresight_registers,
1677 .count = ARRAY_SIZE(a4xx_coresight_registers),
1678 .groups = a4xx_coresight_groups,
1679};
1680
1681static void a4xx_preempt_callback(struct adreno_device *adreno_dev, int bit)
1682{
1683 if (atomic_read(&adreno_dev->preempt.state) != ADRENO_PREEMPT_TRIGGERED)
1684 return;
1685
1686 trace_adreno_hw_preempt_trig_to_comp_int(adreno_dev->cur_rb,
1687 adreno_dev->next_rb,
1688 adreno_get_rptr(adreno_dev->cur_rb),
1689 adreno_get_rptr(adreno_dev->next_rb));
1690
1691 adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
1692}
1693
1694#define A4XX_INT_MASK \
1695 ((1 << A4XX_INT_RBBM_AHB_ERROR) | \
1696 (1 << A4XX_INT_RBBM_REG_TIMEOUT) | \
1697 (1 << A4XX_INT_RBBM_ME_MS_TIMEOUT) | \
1698 (1 << A4XX_INT_RBBM_PFP_MS_TIMEOUT) | \
1699 (1 << A4XX_INT_RBBM_ETS_MS_TIMEOUT) | \
1700 (1 << A4XX_INT_RBBM_ASYNC_OVERFLOW) | \
1701 (1 << A4XX_INT_CP_SW) | \
1702 (1 << A4XX_INT_CP_OPCODE_ERROR) | \
1703 (1 << A4XX_INT_CP_RESERVED_BIT_ERROR) | \
1704 (1 << A4XX_INT_CP_HW_FAULT) | \
1705 (1 << A4XX_INT_CP_IB1_INT) | \
1706 (1 << A4XX_INT_CP_IB2_INT) | \
1707 (1 << A4XX_INT_CP_RB_INT) | \
1708 (1 << A4XX_INT_CACHE_FLUSH_TS) | \
1709 (1 << A4XX_INT_CP_REG_PROTECT_FAULT) | \
1710 (1 << A4XX_INT_CP_AHB_ERROR_HALT) | \
1711 (1 << A4XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
1712 (1 << A4XX_INT_UCHE_OOB_ACCESS) | \
1713 (1 << A4XX_INT_RBBM_DPM_CALC_ERR) | \
1714 (1 << A4XX_INT_RBBM_DPM_EPOCH_ERR) | \
1715 (1 << A4XX_INT_RBBM_DPM_THERMAL_YELLOW_ERR) |\
1716 (1 << A4XX_INT_RBBM_DPM_THERMAL_RED_ERR))
1717
1718
1719static struct adreno_irq_funcs a4xx_irq_funcs[32] = {
1720 ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
1721 ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 1 - RBBM_AHB_ERROR */
1722 ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 2 - RBBM_REG_TIMEOUT */
1723 /* 3 - RBBM_ME_MS_TIMEOUT */
1724 ADRENO_IRQ_CALLBACK(a4xx_err_callback),
1725 /* 4 - RBBM_PFP_MS_TIMEOUT */
1726 ADRENO_IRQ_CALLBACK(a4xx_err_callback),
1727 ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 5 - RBBM_ETS_MS_TIMEOUT */
1728 /* 6 - RBBM_ATB_ASYNC_OVERFLOW */
1729 ADRENO_IRQ_CALLBACK(a4xx_err_callback),
1730 ADRENO_IRQ_CALLBACK(NULL), /* 7 - RBBM_GPC_ERR */
1731 ADRENO_IRQ_CALLBACK(a4xx_preempt_callback), /* 8 - CP_SW */
1732 ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 9 - CP_OPCODE_ERROR */
1733 /* 10 - CP_RESERVED_BIT_ERROR */
1734 ADRENO_IRQ_CALLBACK(a4xx_err_callback),
1735 ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 11 - CP_HW_FAULT */
1736 ADRENO_IRQ_CALLBACK(NULL), /* 12 - CP_DMA */
1737 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 13 - CP_IB2_INT */
1738 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 14 - CP_IB1_INT */
1739 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 15 - CP_RB_INT */
1740 /* 16 - CP_REG_PROTECT_FAULT */
1741 ADRENO_IRQ_CALLBACK(a4xx_err_callback),
1742 ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
1743 ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_VS_DONE_TS */
1744 ADRENO_IRQ_CALLBACK(NULL), /* 19 - CP_PS_DONE_TS */
1745 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
1746 /* 21 - CP_AHB_ERROR_FAULT */
1747 ADRENO_IRQ_CALLBACK(a4xx_err_callback),
1748 ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 22 - RBBM_ATB_BUS_OVERFLOW */
1749 ADRENO_IRQ_CALLBACK(NULL), /* 23 - Unused */
1750 /* 24 - MISC_HANG_DETECT */
1751 ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
1752 ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 25 - UCHE_OOB_ACCESS */
1753 ADRENO_IRQ_CALLBACK(NULL), /* 26 - Unused */
1754 ADRENO_IRQ_CALLBACK(NULL), /* 27 - RBBM_TRACE_MISR */
1755 ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 28 - RBBM_DPM_CALC_ERR */
1756 ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 29 - RBBM_DPM_EPOCH_ERR */
1757 /* 30 - RBBM_DPM_THERMAL_YELLOW_ERR */
1758 ADRENO_IRQ_CALLBACK(a4xx_err_callback),
1759 /* 31 - RBBM_DPM_THERMAL_RED_ERR */
1760 ADRENO_IRQ_CALLBACK(a4xx_err_callback),
1761};
1762
1763static struct adreno_irq a4xx_irq = {
1764 .funcs = a4xx_irq_funcs,
1765 .mask = A4XX_INT_MASK,
1766};
1767
1768static struct adreno_snapshot_data a4xx_snapshot_data = {
1769 .sect_sizes = &a4xx_snap_sizes,
1770};
1771
1772struct adreno_gpudev adreno_a4xx_gpudev = {
1773 .reg_offsets = &a4xx_reg_offsets,
1774 .int_bits = a4xx_int_bits,
1775 .ft_perf_counters = a4xx_ft_perf_counters,
1776 .ft_perf_counters_count = ARRAY_SIZE(a4xx_ft_perf_counters),
1777 .perfcounters = &a4xx_perfcounters,
1778 .irq = &a4xx_irq,
1779 .irq_trace = trace_kgsl_a4xx_irq_status,
1780 .snapshot_data = &a4xx_snapshot_data,
1781 .num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
1782 .vbif_xin_halt_ctrl0_mask = A4XX_VBIF_XIN_HALT_CTRL0_MASK,
1783
1784 .perfcounter_init = a4xx_perfcounter_init,
1785 .perfcounter_close = a4xx_perfcounter_close,
1786 .rb_start = a4xx_rb_start,
1787 .init = a4xx_init,
1788 .microcode_read = a3xx_microcode_read,
1789 .coresight = &a4xx_coresight,
1790 .start = a4xx_start,
1791 .snapshot = a4xx_snapshot,
1792 .is_sptp_idle = a4xx_is_sptp_idle,
1793 .pwrlevel_change_settings = a4xx_pwrlevel_change_settings,
1794 .regulator_enable = a4xx_regulator_enable,
1795 .regulator_disable = a4xx_regulator_disable,
1796 .preemption_pre_ibsubmit = a4xx_preemption_pre_ibsubmit,
1797 .preemption_schedule = a4xx_preemption_schedule,
1798 .preemption_init = a4xx_preemption_init,
1799};