blob: 01ecb01712fcdf2965a81ba40debdda83cb99308 [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
24
25static const unsigned int a6xx_gras_cluster[] = {
26 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
27 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
28 0x8400, 0x840B,
29};
30
31static const unsigned int a6xx_ps_cluster[] = {
32 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
33 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
34 0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
35 0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
36 0x9218, 0x9236, 0x9300, 0x9306,
37};
38
39static const unsigned int a6xx_fe_cluster[] = {
40 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
41 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
42};
43
44static const unsigned int a6xx_pc_vs_cluster[] = {
45 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
46};
47
48static struct a6xx_cluster_registers {
49 unsigned int id;
50 const unsigned int *regs;
51 unsigned int num_sets;
52 unsigned int offset0;
53 unsigned int offset1;
54} a6xx_clusters[] = {
55 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
56 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
57 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
58 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
59 ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
60};
61
62struct a6xx_cluster_regs_info {
63 struct a6xx_cluster_registers *cluster;
64 unsigned int ctxt_id;
65};
66
Lynus Vaz461e2382017-01-16 19:35:41 +053067static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
68 0xB800, 0xB803, 0xB820, 0xB822,
69};
70
71static const unsigned int a6xx_sp_vs_sp_cluster[] = {
72 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
73 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
74};
75
76static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
77 0xBB10, 0xBB11, 0xBB20, 0xBB29,
78};
79
80static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
81 0xBD80, 0xBD80,
82};
83
84static const unsigned int a6xx_sp_duplicate_cluster[] = {
85 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
86};
87
88static const unsigned int a6xx_tp_duplicate_cluster[] = {
89 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
90};
91
92static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
93 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
94 0xB9C0, 0xB9C9,
95};
96
97static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
98 0xBD80, 0xBD80,
99};
100
101static const unsigned int a6xx_sp_ps_sp_cluster[] = {
102 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
103 0xAA00, 0xAA00, 0xAA30, 0xAA31,
104};
105
106static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
107 0xACC0, 0xACC0,
108};
109
110static const unsigned int a6xx_sp_ps_tp_cluster[] = {
111 0xB180, 0xB183, 0xB190, 0xB191,
112};
113
114static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
115 0xB4C0, 0xB4D1,
116};
117
118static struct a6xx_cluster_dbgahb_registers {
119 unsigned int id;
120 unsigned int regbase;
121 unsigned int statetype;
122 const unsigned int *regs;
123 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530124 unsigned int offset0;
125 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530126} a6xx_dbgahb_ctx_clusters[] = {
127 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
128 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
129 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
130 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700131 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530132 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
133 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
134 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700135 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530136 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700137 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530138 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700139 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530140 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700141 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530142 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700143 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530144 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700145 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530146 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700147 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530148 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700149 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530150 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700151 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530152 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700153 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530154 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700155 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530156 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
157};
158
159struct a6xx_cluster_dbgahb_regs_info {
160 struct a6xx_cluster_dbgahb_registers *cluster;
161 unsigned int ctxt_id;
162};
163
164static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
165 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
166 0xBE20, 0xBE23,
167};
168
169static const unsigned int a6xx_sp_non_ctx_registers[] = {
170 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
171 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
172};
173
174static const unsigned int a6xx_tp_non_ctx_registers[] = {
175 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
176};
177
178static struct a6xx_non_ctx_dbgahb_registers {
179 unsigned int regbase;
180 unsigned int statetype;
181 const unsigned int *regs;
182 unsigned int num_sets;
183} a6xx_non_ctx_dbgahb[] = {
184 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
185 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
186 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
187 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
188 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
189 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
190};
191
Shrenuj Bansal41665402016-12-16 15:25:54 -0800192static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
193 /* VBIF */
194 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
195 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
196 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
197 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
198 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
199 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
200 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
201 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
202 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
203 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
204 0x3410, 0x3410, 0x3800, 0x3801,
205};
206
Kyle Piefer60733aa2017-03-21 11:24:01 -0700207static const unsigned int a6xx_gmu_registers[] = {
208 /* GMU */
209 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
210};
211
Shrenuj Bansal41665402016-12-16 15:25:54 -0800212static const struct adreno_vbif_snapshot_registers
213a6xx_vbif_snapshot_registers[] = {
214 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
215 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
216};
217
218/*
219 * Set of registers to dump for A6XX on snapshot.
220 * Registers in pairs - first value is the start offset, second
221 * is the stop offset (inclusive)
222 */
223
224static const unsigned int a6xx_registers[] = {
225 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530226 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
227 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
228 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
229 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213, 0x0218, 0x023D,
230 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511,
231 0x0533, 0x0533, 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800232 /* CP */
233 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
234 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
235 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
236 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
237 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
238 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
239 /* VSC */
240 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
241 /* UCHE */
242 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
243 0x0E38, 0x0E39,
244 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530245 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
246 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800247 /* RB */
248 0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
249 0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
250 0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
251 /* VPC */
252 0x9600, 0x9604, 0x9624, 0x9637,
253 /* PC */
254 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
255 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
256 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
257 /* VFD */
258 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530259 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800260};
261
Lynus Vaz20c81272017-02-10 16:22:12 +0530262enum a6xx_debugbus_id {
263 A6XX_DBGBUS_CP = 0x1,
264 A6XX_DBGBUS_RBBM = 0x2,
265 A6XX_DBGBUS_VBIF = 0x3,
266 A6XX_DBGBUS_HLSQ = 0x4,
267 A6XX_DBGBUS_UCHE = 0x5,
268 A6XX_DBGBUS_DPM = 0x6,
269 A6XX_DBGBUS_TESS = 0x7,
270 A6XX_DBGBUS_PC = 0x8,
271 A6XX_DBGBUS_VFDP = 0x9,
272 A6XX_DBGBUS_VPC = 0xa,
273 A6XX_DBGBUS_TSE = 0xb,
274 A6XX_DBGBUS_RAS = 0xc,
275 A6XX_DBGBUS_VSC = 0xd,
276 A6XX_DBGBUS_COM = 0xe,
277 A6XX_DBGBUS_LRZ = 0x10,
278 A6XX_DBGBUS_A2D = 0x11,
279 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530280 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530281 A6XX_DBGBUS_RBP = 0x14,
282 A6XX_DBGBUS_DCS = 0x15,
283 A6XX_DBGBUS_RBBM_CFG = 0x16,
284 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530285 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530286 A6XX_DBGBUS_TPFCHE = 0x19,
287 A6XX_DBGBUS_GPC = 0x1d,
288 A6XX_DBGBUS_LARC = 0x1e,
289 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
290 A6XX_DBGBUS_RB_0 = 0x20,
291 A6XX_DBGBUS_RB_1 = 0x21,
292 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
293 A6XX_DBGBUS_CCU_0 = 0x28,
294 A6XX_DBGBUS_CCU_1 = 0x29,
295 A6XX_DBGBUS_VFD_0 = 0x38,
296 A6XX_DBGBUS_VFD_1 = 0x39,
297 A6XX_DBGBUS_VFD_2 = 0x3a,
298 A6XX_DBGBUS_VFD_3 = 0x3b,
299 A6XX_DBGBUS_SP_0 = 0x40,
300 A6XX_DBGBUS_SP_1 = 0x41,
301 A6XX_DBGBUS_TPL1_0 = 0x48,
302 A6XX_DBGBUS_TPL1_1 = 0x49,
303 A6XX_DBGBUS_TPL1_2 = 0x4a,
304 A6XX_DBGBUS_TPL1_3 = 0x4b,
305};
306
307static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
308 { A6XX_DBGBUS_CP, 0x100, },
309 { A6XX_DBGBUS_RBBM, 0x100, },
310 { A6XX_DBGBUS_HLSQ, 0x100, },
311 { A6XX_DBGBUS_UCHE, 0x100, },
312 { A6XX_DBGBUS_DPM, 0x100, },
313 { A6XX_DBGBUS_TESS, 0x100, },
314 { A6XX_DBGBUS_PC, 0x100, },
315 { A6XX_DBGBUS_VFDP, 0x100, },
316 { A6XX_DBGBUS_VPC, 0x100, },
317 { A6XX_DBGBUS_TSE, 0x100, },
318 { A6XX_DBGBUS_RAS, 0x100, },
319 { A6XX_DBGBUS_VSC, 0x100, },
320 { A6XX_DBGBUS_COM, 0x100, },
321 { A6XX_DBGBUS_LRZ, 0x100, },
322 { A6XX_DBGBUS_A2D, 0x100, },
323 { A6XX_DBGBUS_CCUFCHE, 0x100, },
324 { A6XX_DBGBUS_RBP, 0x100, },
325 { A6XX_DBGBUS_DCS, 0x100, },
326 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530327 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530328 { A6XX_DBGBUS_TPFCHE, 0x100, },
329 { A6XX_DBGBUS_GPC, 0x100, },
330 { A6XX_DBGBUS_LARC, 0x100, },
331 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
332 { A6XX_DBGBUS_RB_0, 0x100, },
333 { A6XX_DBGBUS_RB_1, 0x100, },
334 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
335 { A6XX_DBGBUS_CCU_0, 0x100, },
336 { A6XX_DBGBUS_CCU_1, 0x100, },
337 { A6XX_DBGBUS_VFD_0, 0x100, },
338 { A6XX_DBGBUS_VFD_1, 0x100, },
339 { A6XX_DBGBUS_VFD_2, 0x100, },
340 { A6XX_DBGBUS_VFD_3, 0x100, },
341 { A6XX_DBGBUS_SP_0, 0x100, },
342 { A6XX_DBGBUS_SP_1, 0x100, },
343 { A6XX_DBGBUS_TPL1_0, 0x100, },
344 { A6XX_DBGBUS_TPL1_1, 0x100, },
345 { A6XX_DBGBUS_TPL1_2, 0x100, },
346 { A6XX_DBGBUS_TPL1_3, 0x100, },
347};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800348
Lynus Vazff24c972017-03-07 19:27:46 +0530349static void __iomem *a6xx_cx_dbgc;
350static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
351 { A6XX_DBGBUS_VBIF, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530352 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530353 { A6XX_DBGBUS_CX, 0x100, },
354};
355
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530356#define A6XX_NUM_SHADER_BANKS 3
357#define A6XX_SHADER_STATETYPE_SHIFT 8
358
359enum a6xx_shader_obj {
360 A6XX_TP0_TMO_DATA = 0x9,
361 A6XX_TP0_SMO_DATA = 0xa,
362 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
363 A6XX_TP1_TMO_DATA = 0x19,
364 A6XX_TP1_SMO_DATA = 0x1a,
365 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
366 A6XX_SP_INST_DATA = 0x29,
367 A6XX_SP_LB_0_DATA = 0x2a,
368 A6XX_SP_LB_1_DATA = 0x2b,
369 A6XX_SP_LB_2_DATA = 0x2c,
370 A6XX_SP_LB_3_DATA = 0x2d,
371 A6XX_SP_LB_4_DATA = 0x2e,
372 A6XX_SP_LB_5_DATA = 0x2f,
373 A6XX_SP_CB_BINDLESS_DATA = 0x30,
374 A6XX_SP_CB_LEGACY_DATA = 0x31,
375 A6XX_SP_UAV_DATA = 0x32,
376 A6XX_SP_INST_TAG = 0x33,
377 A6XX_SP_CB_BINDLESS_TAG = 0x34,
378 A6XX_SP_TMO_UMO_TAG = 0x35,
379 A6XX_SP_SMO_TAG = 0x36,
380 A6XX_SP_STATE_DATA = 0x37,
381 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
382 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
383 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
384 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
385 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
386 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
387 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
388 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
389 A6XX_HLSQ_INST_RAM = 0x52,
390 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
391 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
392 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
393 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
394 A6XX_HLSQ_INST_RAM_TAG = 0x57,
395 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
396 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
397 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
398 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
399 A6XX_HLSQ_DATAPATH_META = 0x60,
400 A6XX_HLSQ_FRONTEND_META = 0x61,
401 A6XX_HLSQ_INDIRECT_META = 0x62,
402 A6XX_HLSQ_BACKEND_META = 0x63
403};
404
405struct a6xx_shader_block {
406 unsigned int statetype;
407 unsigned int sz;
408 uint64_t offset;
409};
410
411struct a6xx_shader_block_info {
412 struct a6xx_shader_block *block;
413 unsigned int bank;
414 uint64_t offset;
415};
416
417static struct a6xx_shader_block a6xx_shader_blocks[] = {
418 {A6XX_TP0_TMO_DATA, 0x200},
419 {A6XX_TP0_SMO_DATA, 0x80,},
420 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
421 {A6XX_TP1_TMO_DATA, 0x200},
422 {A6XX_TP1_SMO_DATA, 0x80,},
423 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
424 {A6XX_SP_INST_DATA, 0x800},
425 {A6XX_SP_LB_0_DATA, 0x800},
426 {A6XX_SP_LB_1_DATA, 0x800},
427 {A6XX_SP_LB_2_DATA, 0x800},
428 {A6XX_SP_LB_3_DATA, 0x800},
429 {A6XX_SP_LB_4_DATA, 0x800},
430 {A6XX_SP_LB_5_DATA, 0x200},
431 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
432 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
433 {A6XX_SP_UAV_DATA, 0x80,},
434 {A6XX_SP_INST_TAG, 0x80,},
435 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
436 {A6XX_SP_TMO_UMO_TAG, 0x80,},
437 {A6XX_SP_SMO_TAG, 0x80},
438 {A6XX_SP_STATE_DATA, 0x3F},
439 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
440 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
441 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
442 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
443 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
444 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
445 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
446 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
447 {A6XX_HLSQ_INST_RAM, 0x800},
448 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
449 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
450 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
451 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
452 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
453 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
454 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
455 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
456 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
457 {A6XX_HLSQ_DATAPATH_META, 0x40,},
458 {A6XX_HLSQ_FRONTEND_META, 0x40},
459 {A6XX_HLSQ_INDIRECT_META, 0x40,}
460};
461
Shrenuj Bansal41665402016-12-16 15:25:54 -0800462static struct kgsl_memdesc a6xx_capturescript;
463static struct kgsl_memdesc a6xx_crashdump_registers;
464static bool crash_dump_valid;
465
466static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
467 u8 *buf, size_t remain)
468{
469 struct kgsl_snapshot_registers regs = {
470 .regs = a6xx_registers,
471 .count = ARRAY_SIZE(a6xx_registers) / 2,
472 };
473
474 return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
475}
476
477static struct cdregs {
478 const unsigned int *regs;
479 unsigned int size;
480} _a6xx_cd_registers[] = {
481 { a6xx_registers, ARRAY_SIZE(a6xx_registers) },
482};
483
484#define REG_PAIR_COUNT(_a, _i) \
485 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
486
487static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
488 size_t remain, void *priv)
489{
490 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
491 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
492 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
493 unsigned int i, j, k;
494 unsigned int count = 0;
495
496 if (crash_dump_valid == false)
497 return a6xx_legacy_snapshot_registers(device, buf, remain);
498
499 if (remain < sizeof(*header)) {
500 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
501 return 0;
502 }
503
504 remain -= sizeof(*header);
505
506 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
507 struct cdregs *regs = &_a6xx_cd_registers[i];
508
509 for (j = 0; j < regs->size / 2; j++) {
510 unsigned int start = regs->regs[2 * j];
511 unsigned int end = regs->regs[(2 * j) + 1];
512
513 if (remain < ((end - start) + 1) * 8) {
514 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
515 goto out;
516 }
517
518 remain -= ((end - start) + 1) * 8;
519
520 for (k = start; k <= end; k++, count++) {
521 *data++ = k;
522 *data++ = *src++;
523 }
524 }
525 }
526
527out:
528 header->count = count;
529
530 /* Return the size of the section */
531 return (count * 8) + sizeof(*header);
532}
533
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530534static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
535 u8 *buf, size_t remain, void *priv)
536{
537 struct kgsl_snapshot_shader *header =
538 (struct kgsl_snapshot_shader *) buf;
539 struct a6xx_shader_block_info *info =
540 (struct a6xx_shader_block_info *) priv;
541 struct a6xx_shader_block *block = info->block;
542 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
543
544 if (remain < SHADER_SECTION_SZ(block->sz)) {
545 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
546 return 0;
547 }
548
549 header->type = block->statetype;
550 header->index = info->bank;
551 header->size = block->sz;
552
553 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
554 block->sz);
555
556 return SHADER_SECTION_SZ(block->sz);
557}
558
559static void a6xx_snapshot_shader(struct kgsl_device *device,
560 struct kgsl_snapshot *snapshot)
561{
562 unsigned int i, j;
563 struct a6xx_shader_block_info info;
564
565 /* Shader blocks can only be read by the crash dumper */
566 if (crash_dump_valid == false)
567 return;
568
569 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
570 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
571 info.block = &a6xx_shader_blocks[i];
572 info.bank = j;
573 info.offset = a6xx_shader_blocks[i].offset +
574 (j * a6xx_shader_blocks[i].sz);
575
576 /* Shader working/shadow memory */
577 kgsl_snapshot_add_section(device,
578 KGSL_SNAPSHOT_SECTION_SHADER,
579 snapshot, a6xx_snapshot_shader_memory, &info);
580 }
581 }
582}
583
Lynus Vaza5922742017-03-14 18:50:54 +0530584static void a6xx_snapshot_mempool(struct kgsl_device *device,
585 struct kgsl_snapshot *snapshot)
586{
587 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530588 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530589
Lynus Vazb8e43d52017-04-20 14:47:37 +0530590 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530591 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
592 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
593
594 kgsl_snapshot_indexed_registers(device, snapshot,
595 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
596 0, 0x2060);
597
Lynus Vazb8e43d52017-04-20 14:47:37 +0530598 /*
599 * Data at offset 0x2000 in the mempool section is the mempool size.
600 * Since we set it to 0, patch in the original size so that the data
601 * is consistent.
602 */
603 if (buf < snapshot->ptr) {
604 unsigned int *data;
605
606 /* Skip over the headers */
607 buf += sizeof(struct kgsl_snapshot_section_header) +
608 sizeof(struct kgsl_snapshot_indexed_regs);
609
610 data = (unsigned int *)buf + 0x2000;
611 *data = pool_size;
612 }
613
Lynus Vaza5922742017-03-14 18:50:54 +0530614 /* Restore the saved mempool size */
615 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
616}
617
Lynus Vaz461e2382017-01-16 19:35:41 +0530618static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
619 unsigned int regbase, unsigned int reg)
620{
621 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
622 reg - regbase / 4;
623 unsigned int val;
624
625 kgsl_regread(device, read_reg, &val);
626 return val;
627}
628
Lynus Vaz1e258612017-04-27 21:35:22 +0530629static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
630 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530631{
632 struct kgsl_snapshot_mvc_regs *header =
633 (struct kgsl_snapshot_mvc_regs *)buf;
634 struct a6xx_cluster_dbgahb_regs_info *info =
635 (struct a6xx_cluster_dbgahb_regs_info *)priv;
636 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
637 unsigned int read_sel;
638 unsigned int data_size = 0;
639 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
640 int i, j;
641
642 if (remain < sizeof(*header)) {
643 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
644 return 0;
645 }
646
647 remain -= sizeof(*header);
648
649 header->ctxt_id = info->ctxt_id;
650 header->cluster_id = cur_cluster->id;
651
652 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
653 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
654
655 for (i = 0; i < cur_cluster->num_sets; i++) {
656 unsigned int start = cur_cluster->regs[2 * i];
657 unsigned int end = cur_cluster->regs[2 * i + 1];
658
659 if (remain < (end - start + 3) * 4) {
660 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
661 goto out;
662 }
663
664 remain -= (end - start + 3) * 4;
665 data_size += (end - start + 3) * 4;
666
667 *data++ = start | (1 << 31);
668 *data++ = end;
669
670 for (j = start; j <= end; j++) {
671 unsigned int val;
672
673 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
674 *data++ = val;
675
676 }
677 }
678
679out:
680 return data_size + sizeof(*header);
681}
682
Lynus Vaz1e258612017-04-27 21:35:22 +0530683static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
684 size_t remain, void *priv)
685{
686 struct kgsl_snapshot_mvc_regs *header =
687 (struct kgsl_snapshot_mvc_regs *)buf;
688 struct a6xx_cluster_dbgahb_regs_info *info =
689 (struct a6xx_cluster_dbgahb_regs_info *)priv;
690 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
691 unsigned int data_size = 0;
692 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
693 int i, j;
694 unsigned int *src;
695
696
697 if (crash_dump_valid == false)
698 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
699 info);
700
701 if (remain < sizeof(*header)) {
702 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
703 return 0;
704 }
705
706 remain -= sizeof(*header);
707
708 header->ctxt_id = info->ctxt_id;
709 header->cluster_id = cluster->id;
710
711 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
712 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
713
714 for (i = 0; i < cluster->num_sets; i++) {
715 unsigned int start;
716 unsigned int end;
717
718 start = cluster->regs[2 * i];
719 end = cluster->regs[2 * i + 1];
720
721 if (remain < (end - start + 3) * 4) {
722 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
723 goto out;
724 }
725
726 remain -= (end - start + 3) * 4;
727 data_size += (end - start + 3) * 4;
728
729 *data++ = start | (1 << 31);
730 *data++ = end;
731 for (j = start; j <= end; j++)
732 *data++ = *src++;
733 }
734out:
735 return data_size + sizeof(*header);
736}
737
738
739
Lynus Vaz461e2382017-01-16 19:35:41 +0530740static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
741 size_t remain, void *priv)
742{
743 struct kgsl_snapshot_regs *header =
744 (struct kgsl_snapshot_regs *)buf;
745 struct a6xx_non_ctx_dbgahb_registers *regs =
746 (struct a6xx_non_ctx_dbgahb_registers *)priv;
747 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
748 int count = 0;
749 unsigned int read_sel;
750 int i, j;
751
752 /* Figure out how many registers we are going to dump */
753 for (i = 0; i < regs->num_sets; i++) {
754 int start = regs->regs[i * 2];
755 int end = regs->regs[i * 2 + 1];
756
757 count += (end - start + 1);
758 }
759
760 if (remain < (count * 8) + sizeof(*header)) {
761 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
762 return 0;
763 }
764
765 header->count = count;
766
767 read_sel = (regs->statetype & 0xff) << 8;
768 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
769
770 for (i = 0; i < regs->num_sets; i++) {
771 unsigned int start = regs->regs[2 * i];
772 unsigned int end = regs->regs[2 * i + 1];
773
774 for (j = start; j <= end; j++) {
775 unsigned int val;
776
777 val = a6xx_read_dbgahb(device, regs->regbase, j);
778 *data++ = j;
779 *data++ = val;
780
781 }
782 }
783 return (count * 8) + sizeof(*header);
784}
785
786static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
787 struct kgsl_snapshot *snapshot)
788{
789 int i, j;
790
791 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
792 struct a6xx_cluster_dbgahb_registers *cluster =
793 &a6xx_dbgahb_ctx_clusters[i];
794 struct a6xx_cluster_dbgahb_regs_info info;
795
796 info.cluster = cluster;
797 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
798 info.ctxt_id = j;
799
800 kgsl_snapshot_add_section(device,
801 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
802 a6xx_snapshot_cluster_dbgahb, &info);
803 }
804 }
805
806 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
807 kgsl_snapshot_add_section(device,
808 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
809 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
810 }
811}
812
Shrenuj Bansal41665402016-12-16 15:25:54 -0800813static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
814 size_t remain, void *priv)
815{
816 struct kgsl_snapshot_mvc_regs *header =
817 (struct kgsl_snapshot_mvc_regs *)buf;
818 struct a6xx_cluster_regs_info *info =
819 (struct a6xx_cluster_regs_info *)priv;
820 struct a6xx_cluster_registers *cur_cluster = info->cluster;
821 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
822 unsigned int ctxt = info->ctxt_id;
823 unsigned int start, end, i, j, aperture_cntl = 0;
824 unsigned int data_size = 0;
825
826 if (remain < sizeof(*header)) {
827 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
828 return 0;
829 }
830
831 remain -= sizeof(*header);
832
833 header->ctxt_id = info->ctxt_id;
834 header->cluster_id = cur_cluster->id;
835
836 /*
837 * Set the AHB control for the Host to read from the
838 * cluster/context for this iteration.
839 */
840 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
841 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
842
843 for (i = 0; i < cur_cluster->num_sets; i++) {
844 start = cur_cluster->regs[2 * i];
845 end = cur_cluster->regs[2 * i + 1];
846
847 if (remain < (end - start + 3) * 4) {
848 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
849 goto out;
850 }
851
852 remain -= (end - start + 3) * 4;
853 data_size += (end - start + 3) * 4;
854
855 *data++ = start | (1 << 31);
856 *data++ = end;
857 for (j = start; j <= end; j++) {
858 unsigned int val;
859
860 kgsl_regread(device, j, &val);
861 *data++ = val;
862 }
863 }
864out:
865 return data_size + sizeof(*header);
866}
867
868static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
869 size_t remain, void *priv)
870{
871 struct kgsl_snapshot_mvc_regs *header =
872 (struct kgsl_snapshot_mvc_regs *)buf;
873 struct a6xx_cluster_regs_info *info =
874 (struct a6xx_cluster_regs_info *)priv;
875 struct a6xx_cluster_registers *cluster = info->cluster;
876 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
877 unsigned int *src;
878 int i, j;
879 unsigned int start, end;
880 size_t data_size = 0;
881
882 if (crash_dump_valid == false)
883 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
884
885 if (remain < sizeof(*header)) {
886 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
887 return 0;
888 }
889
890 remain -= sizeof(*header);
891
892 header->ctxt_id = info->ctxt_id;
893 header->cluster_id = cluster->id;
894
895 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
896 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
897
898 for (i = 0; i < cluster->num_sets; i++) {
899 start = cluster->regs[2 * i];
900 end = cluster->regs[2 * i + 1];
901
902 if (remain < (end - start + 3) * 4) {
903 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
904 goto out;
905 }
906
907 remain -= (end - start + 3) * 4;
908 data_size += (end - start + 3) * 4;
909
910 *data++ = start | (1 << 31);
911 *data++ = end;
912 for (j = start; j <= end; j++)
913 *data++ = *src++;
914 }
915
916out:
917 return data_size + sizeof(*header);
918
919}
920
921static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
922 struct kgsl_snapshot *snapshot)
923{
924 int i, j;
925 struct a6xx_cluster_regs_info info;
926
927 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
928 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
929
930 info.cluster = cluster;
931 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
932 info.ctxt_id = j;
933
934 kgsl_snapshot_add_section(device,
935 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
936 a6xx_snapshot_mvc, &info);
937 }
938 }
939}
940
Lynus Vaz20c81272017-02-10 16:22:12 +0530941/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
942static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
943 unsigned int block_id, unsigned int index, unsigned int *val)
944{
945 unsigned int reg;
946
947 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
948 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
949
950 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
951 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
952 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
953 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
954
955 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
956 val++;
957 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
958}
959
960/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
961static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
962 u8 *buf, size_t remain, void *priv)
963{
Lynus Vazecd472c2017-04-18 14:15:57 +0530964 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +0530965 struct kgsl_snapshot_debugbus *header =
966 (struct kgsl_snapshot_debugbus *)buf;
967 struct adreno_debugbus_block *block = priv;
968 int i;
969 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
970 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +0530971 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +0530972 size_t size;
973
974 dwords = block->dwords;
975
976 /* For a6xx each debug bus data unit is 2 DWORDS */
977 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
978
979 if (remain < size) {
980 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
981 return 0;
982 }
983
984 header->id = block->block_id;
985 header->count = dwords * 2;
986
Lynus Vazecd472c2017-04-18 14:15:57 +0530987 block_id = block->block_id;
988 /* GMU_GX data is read using the GMU_CX block id on A630 */
989 if (adreno_is_a630(adreno_dev) &&
990 (block_id == A6XX_DBGBUS_GMU_GX))
991 block_id = A6XX_DBGBUS_GMU_CX;
992
Lynus Vaz20c81272017-02-10 16:22:12 +0530993 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +0530994 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +0530995
996 return size;
997}
998
Lynus Vazff24c972017-03-07 19:27:46 +0530999static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1000{
1001 void __iomem *reg;
1002
1003 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1004 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1005 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1006 return;
1007
1008 reg = a6xx_cx_dbgc +
1009 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1010 *value = __raw_readl(reg);
1011
1012 /*
1013 * ensure this read finishes before the next one.
1014 * i.e. act like normal readl()
1015 */
1016 rmb();
1017}
1018
1019static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1020{
1021 void __iomem *reg;
1022
1023 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1024 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1025 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1026 return;
1027
1028 reg = a6xx_cx_dbgc +
1029 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1030
1031 /*
1032 * ensure previous writes post before this one,
1033 * i.e. act like normal writel()
1034 */
1035 wmb();
1036 __raw_writel(value, reg);
1037}
1038
1039/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1040static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1041 unsigned int block_id, unsigned int index, unsigned int *val)
1042{
1043 unsigned int reg;
1044
1045 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1046 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1047
1048 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1049 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1050 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1051 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1052
1053 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1054 val++;
1055 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1056}
1057
1058/*
1059 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1060 * block from the CX DBGC block
1061 */
1062static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1063 u8 *buf, size_t remain, void *priv)
1064{
1065 struct kgsl_snapshot_debugbus *header =
1066 (struct kgsl_snapshot_debugbus *)buf;
1067 struct adreno_debugbus_block *block = priv;
1068 int i;
1069 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1070 unsigned int dwords;
1071 size_t size;
1072
1073 dwords = block->dwords;
1074
1075 /* For a6xx each debug bus data unit is 2 DWRODS */
1076 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1077
1078 if (remain < size) {
1079 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1080 return 0;
1081 }
1082
1083 header->id = block->block_id;
1084 header->count = dwords * 2;
1085
1086 for (i = 0; i < dwords; i++)
1087 a6xx_cx_debug_bus_read(device, block->block_id, i,
1088 &data[i*2]);
1089
1090 return size;
1091}
1092
Lynus Vaz20c81272017-02-10 16:22:12 +05301093/* a6xx_snapshot_debugbus() - Capture debug bus data */
1094static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1095 struct kgsl_snapshot *snapshot)
1096{
1097 int i;
1098
1099 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1100 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
1101 (0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1102 (0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
1103
1104 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1105 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1106
1107 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1108 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1109 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1110 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1111
1112 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1113 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1114 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1115 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1116 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1117 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1118 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1119 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1120 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1121 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1122 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1123 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1124 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1125 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1126 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1127 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1128 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1129 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1130
1131 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1132 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1133 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1134 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1135
Lynus Vazff24c972017-03-07 19:27:46 +05301136 a6xx_cx_dbgc = ioremap(device->reg_phys +
1137 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1138 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1139 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1140
1141 if (a6xx_cx_dbgc) {
1142 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1143 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
1144 (0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1145 (0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
1146
1147 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1148 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1149
1150 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1151 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1152 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1153 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1154
1155 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1156 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1157 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1158 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1159 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1160 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1161 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1162 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1163 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1164 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1165 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1166 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1167 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1168 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1169 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1170 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1171 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1172 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1173
1174 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1175 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1176 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1177 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1178 } else
1179 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1180
Lynus Vaz20c81272017-02-10 16:22:12 +05301181 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1182 kgsl_snapshot_add_section(device,
1183 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1184 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1185 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1186 }
Lynus Vazff24c972017-03-07 19:27:46 +05301187
1188 if (a6xx_cx_dbgc) {
1189 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1190 kgsl_snapshot_add_section(device,
1191 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1192 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1193 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1194 }
1195 iounmap(a6xx_cx_dbgc);
1196 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301197}
1198
Kyle Piefer60733aa2017-03-21 11:24:01 -07001199static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
1200 u8 *buf, size_t remain, void *priv)
1201{
1202 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
1203 struct kgsl_snapshot_registers *regs = priv;
1204 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1205 int count = 0, j, k;
1206
1207 /* Figure out how many registers we are going to dump */
1208 for (j = 0; j < regs->count; j++) {
1209 int start = regs->regs[j * 2];
1210 int end = regs->regs[j * 2 + 1];
1211
1212 count += (end - start + 1);
1213 }
1214
1215 if (remain < (count * 8) + sizeof(*header)) {
1216 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
1217 return 0;
1218 }
1219
1220 for (j = 0; j < regs->count; j++) {
1221 unsigned int start = regs->regs[j * 2];
1222 unsigned int end = regs->regs[j * 2 + 1];
1223
1224 for (k = start; k <= end; k++) {
1225 unsigned int val;
1226
1227 kgsl_gmu_regread(device, k, &val);
1228 *data++ = k;
1229 *data++ = val;
1230 }
1231 }
1232
1233 header->count = count;
1234
1235 /* Return the size of the section */
1236 return (count * 8) + sizeof(*header);
1237}
1238
1239static void a6xx_snapshot_gmu(struct kgsl_device *device,
1240 struct kgsl_snapshot *snapshot)
1241{
1242 struct kgsl_snapshot_registers gmu_regs = {
1243 .regs = a6xx_gmu_registers,
1244 .count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
1245 };
1246
1247 if (!kgsl_gmu_isenabled(device))
1248 return;
1249
1250 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1251 snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
1252}
1253
Lynus Vaz85150052017-02-21 17:57:48 +05301254/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1255static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1256 size_t remain, void *priv)
1257{
1258 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1259 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1260 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1261 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1262
1263 if (remain < DEBUG_SECTION_SZ(1)) {
1264 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1265 return 0;
1266 }
1267
1268 /* Dump the SQE firmware version */
1269 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1270 header->size = 1;
1271 *data = fw->version;
1272
1273 return DEBUG_SECTION_SZ(1);
1274}
1275
Shrenuj Bansal41665402016-12-16 15:25:54 -08001276static void _a6xx_do_crashdump(struct kgsl_device *device)
1277{
1278 unsigned long wait_time;
1279 unsigned int reg = 0;
1280 unsigned int val;
1281
1282 crash_dump_valid = false;
1283
1284 if (a6xx_capturescript.gpuaddr == 0 ||
1285 a6xx_crashdump_registers.gpuaddr == 0)
1286 return;
1287
1288 /* IF the SMMU is stalled we cannot do a crash dump */
1289 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1290 if (val & BIT(24))
1291 return;
1292
1293 /* Turn on APRIV so we can access the buffers */
1294 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1295
1296 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1297 lower_32_bits(a6xx_capturescript.gpuaddr));
1298 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1299 upper_32_bits(a6xx_capturescript.gpuaddr));
1300 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1301
1302 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1303 while (!time_after(jiffies, wait_time)) {
1304 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1305 if (reg & 0x2)
1306 break;
1307 cpu_relax();
1308 }
1309
1310 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1311
1312 if (!(reg & 0x2)) {
1313 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1314 return;
1315 }
1316
1317 crash_dump_valid = true;
1318}
1319
1320/*
1321 * a6xx_snapshot() - A6XX GPU snapshot function
1322 * @adreno_dev: Device being snapshotted
1323 * @snapshot: Pointer to the snapshot instance
1324 *
1325 * This is where all of the A6XX specific bits and pieces are grabbed
1326 * into the snapshot memory
1327 */
1328void a6xx_snapshot(struct adreno_device *adreno_dev,
1329 struct kgsl_snapshot *snapshot)
1330{
1331 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1332 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1333 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
1334
1335 /* Try to run the crash dumper */
1336 _a6xx_do_crashdump(device);
1337
1338 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1339 snapshot, a6xx_snapshot_registers, NULL);
1340
1341 adreno_snapshot_vbif_registers(device, snapshot,
1342 a6xx_vbif_snapshot_registers,
1343 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1344
1345 /* CP_SQE indexed registers */
1346 kgsl_snapshot_indexed_registers(device, snapshot,
1347 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1348 0, snap_data->sect_sizes->cp_pfp);
1349
1350 /* CP_DRAW_STATE */
1351 kgsl_snapshot_indexed_registers(device, snapshot,
1352 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1353 0, 0x100);
1354
1355 /* SQE_UCODE Cache */
1356 kgsl_snapshot_indexed_registers(device, snapshot,
1357 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1358 0, 0x6000);
1359
1360 /* CP ROQ */
1361 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1362 snapshot, adreno_snapshot_cp_roq,
1363 &snap_data->sect_sizes->roq);
1364
Lynus Vaz85150052017-02-21 17:57:48 +05301365 /* SQE Firmware */
1366 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1367 snapshot, a6xx_snapshot_sqe, NULL);
1368
Lynus Vaza5922742017-03-14 18:50:54 +05301369 /* Mempool debug data */
1370 a6xx_snapshot_mempool(device, snapshot);
1371
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301372 /* Shader memory */
1373 a6xx_snapshot_shader(device, snapshot);
1374
Shrenuj Bansal41665402016-12-16 15:25:54 -08001375 /* MVC register section */
1376 a6xx_snapshot_mvc_regs(device, snapshot);
1377
Lynus Vaz461e2382017-01-16 19:35:41 +05301378 /* registers dumped through DBG AHB */
1379 a6xx_snapshot_dbgahb_regs(device, snapshot);
1380
Lynus Vaz20c81272017-02-10 16:22:12 +05301381 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001382
1383 /* GMU TCM data dumped through AHB */
1384 a6xx_snapshot_gmu(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001385}
1386
1387static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1388{
1389 int qwords = 0;
1390 unsigned int i, j, k;
1391 unsigned int count;
1392
1393 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1394 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1395
1396 cluster->offset0 = *offset;
1397 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1398
1399 if (j == 1)
1400 cluster->offset1 = *offset;
1401
1402 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1403 ptr[qwords++] =
1404 ((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
1405 (1 << 21) | 1;
1406
1407 for (k = 0; k < cluster->num_sets; k++) {
1408 count = REG_PAIR_COUNT(cluster->regs, k);
1409 ptr[qwords++] =
1410 a6xx_crashdump_registers.gpuaddr + *offset;
1411 ptr[qwords++] =
1412 (((uint64_t)cluster->regs[2 * k]) << 44) |
1413 count;
1414
1415 *offset += count * sizeof(unsigned int);
1416 }
1417 }
1418 }
1419
1420 return qwords;
1421}
1422
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301423static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1424 uint64_t *ptr, uint64_t *offset)
1425{
1426 int qwords = 0;
1427 unsigned int j;
1428
1429 /* Capture each bank in the block */
1430 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1431 /* Program the aperture */
1432 ptr[qwords++] =
1433 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1434 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1435 (1 << 21) | 1;
1436
1437 /* Read all the data in one chunk */
1438 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1439 ptr[qwords++] =
1440 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1441 block->sz;
1442
1443 /* Remember the offset of the first bank for easy access */
1444 if (j == 0)
1445 block->offset = *offset;
1446
1447 *offset += block->sz * sizeof(unsigned int);
1448 }
1449
1450 return qwords;
1451}
1452
Lynus Vaz1e258612017-04-27 21:35:22 +05301453static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1454{
1455 int qwords = 0;
1456 unsigned int i, j, k;
1457 unsigned int count;
1458
1459 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1460 struct a6xx_cluster_dbgahb_registers *cluster =
1461 &a6xx_dbgahb_ctx_clusters[i];
1462
1463 cluster->offset0 = *offset;
1464
1465 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1466 if (j == 1)
1467 cluster->offset1 = *offset;
1468
1469 /* Program the aperture */
1470 ptr[qwords++] =
1471 ((cluster->statetype + j * 2) & 0xff) << 8;
1472 ptr[qwords++] =
1473 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1474 (1 << 21) | 1;
1475
1476 for (k = 0; k < cluster->num_sets; k++) {
1477 unsigned int start = cluster->regs[2 * k];
1478
1479 count = REG_PAIR_COUNT(cluster->regs, k);
1480 ptr[qwords++] =
1481 a6xx_crashdump_registers.gpuaddr + *offset;
1482 ptr[qwords++] =
1483 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1484 start - cluster->regbase / 4) << 44)) |
1485 count;
1486
1487 *offset += count * sizeof(unsigned int);
1488 }
1489 }
1490 }
1491 return qwords;
1492}
1493
Shrenuj Bansal41665402016-12-16 15:25:54 -08001494void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1495{
1496 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1497 unsigned int script_size = 0;
1498 unsigned int data_size = 0;
1499 unsigned int i, j, k;
1500 uint64_t *ptr;
1501 uint64_t offset = 0;
1502
1503 if (a6xx_capturescript.gpuaddr != 0 &&
1504 a6xx_crashdump_registers.gpuaddr != 0)
1505 return;
1506
1507 /*
1508 * We need to allocate two buffers:
1509 * 1 - the buffer to hold the draw script
1510 * 2 - the buffer to hold the data
1511 */
1512
1513 /*
1514 * To save the registers, we need 16 bytes per register pair for the
1515 * script and a dword for each register in the data
1516 */
1517 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1518 struct cdregs *regs = &_a6xx_cd_registers[i];
1519
1520 /* Each pair needs 16 bytes (2 qwords) */
1521 script_size += (regs->size / 2) * 16;
1522
1523 /* Each register needs a dword in the data */
1524 for (j = 0; j < regs->size / 2; j++)
1525 data_size += REG_PAIR_COUNT(regs->regs, j) *
1526 sizeof(unsigned int);
1527
1528 }
1529
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301530 /*
1531 * To save the shader blocks for each block in each type we need 32
1532 * bytes for the script (16 bytes to program the aperture and 16 to
1533 * read the data) and then a block specific number of bytes to hold
1534 * the data
1535 */
1536 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1537 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1538 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1539 A6XX_NUM_SHADER_BANKS;
1540 }
1541
Shrenuj Bansal41665402016-12-16 15:25:54 -08001542 /* Calculate the script and data size for MVC registers */
1543 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1544 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1545
1546 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1547
1548 /* 16 bytes for programming the aperture */
1549 script_size += 16;
1550
1551 /* Reading each pair of registers takes 16 bytes */
1552 script_size += 16 * cluster->num_sets;
1553
1554 /* A dword per register read from the cluster list */
1555 for (k = 0; k < cluster->num_sets; k++)
1556 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1557 sizeof(unsigned int);
1558 }
1559 }
1560
Lynus Vaz1e258612017-04-27 21:35:22 +05301561 /* Calculate the script and data size for debug AHB registers */
1562 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1563 struct a6xx_cluster_dbgahb_registers *cluster =
1564 &a6xx_dbgahb_ctx_clusters[i];
1565
1566 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1567
1568 /* 16 bytes for programming the aperture */
1569 script_size += 16;
1570
1571 /* Reading each pair of registers takes 16 bytes */
1572 script_size += 16 * cluster->num_sets;
1573
1574 /* A dword per register read from the cluster list */
1575 for (k = 0; k < cluster->num_sets; k++)
1576 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1577 sizeof(unsigned int);
1578 }
1579 }
1580
Shrenuj Bansal41665402016-12-16 15:25:54 -08001581 /* Now allocate the script and data buffers */
1582
1583 /* The script buffers needs 2 extra qwords on the end */
1584 if (kgsl_allocate_global(device, &a6xx_capturescript,
1585 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1586 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1587 return;
1588
1589 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1590 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1591 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1592 return;
1593 }
1594
1595 /* Build the crash script */
1596
1597 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1598
1599 /* For the registers, program a read command for each pair */
1600 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1601 struct cdregs *regs = &_a6xx_cd_registers[i];
1602
1603 for (j = 0; j < regs->size / 2; j++) {
1604 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1605 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1606 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1607 offset += r * sizeof(unsigned int);
1608 }
1609 }
1610
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301611 /* Program each shader block */
1612 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1613 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1614 &offset);
1615 }
1616
Shrenuj Bansal41665402016-12-16 15:25:54 -08001617 /* Program the capturescript for the MVC regsiters */
1618 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1619
Lynus Vaz1e258612017-04-27 21:35:22 +05301620 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1621
Shrenuj Bansal41665402016-12-16 15:25:54 -08001622 *ptr++ = 0;
1623 *ptr++ = 0;
1624}