blob: 17ee6e63e5b7c217ea312ce8c6b1488185f7aece [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
24
25static const unsigned int a6xx_gras_cluster[] = {
26 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
27 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
28 0x8400, 0x840B,
29};
30
31static const unsigned int a6xx_ps_cluster[] = {
32 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
33 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
34 0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
35 0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
36 0x9218, 0x9236, 0x9300, 0x9306,
37};
38
39static const unsigned int a6xx_fe_cluster[] = {
40 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
41 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
42};
43
44static const unsigned int a6xx_pc_vs_cluster[] = {
45 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
46};
47
48static struct a6xx_cluster_registers {
49 unsigned int id;
50 const unsigned int *regs;
51 unsigned int num_sets;
52 unsigned int offset0;
53 unsigned int offset1;
54} a6xx_clusters[] = {
55 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
56 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
57 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
58 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
59 ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
60};
61
62struct a6xx_cluster_regs_info {
63 struct a6xx_cluster_registers *cluster;
64 unsigned int ctxt_id;
65};
66
Lynus Vaz461e2382017-01-16 19:35:41 +053067static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
68 0xB800, 0xB803, 0xB820, 0xB822,
69};
70
71static const unsigned int a6xx_sp_vs_sp_cluster[] = {
72 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
73 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
74};
75
76static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
77 0xBB10, 0xBB11, 0xBB20, 0xBB29,
78};
79
80static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
81 0xBD80, 0xBD80,
82};
83
84static const unsigned int a6xx_sp_duplicate_cluster[] = {
85 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
86};
87
88static const unsigned int a6xx_tp_duplicate_cluster[] = {
89 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
90};
91
92static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
93 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
94 0xB9C0, 0xB9C9,
95};
96
97static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
98 0xBD80, 0xBD80,
99};
100
101static const unsigned int a6xx_sp_ps_sp_cluster[] = {
102 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
103 0xAA00, 0xAA00, 0xAA30, 0xAA31,
104};
105
106static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
107 0xACC0, 0xACC0,
108};
109
110static const unsigned int a6xx_sp_ps_tp_cluster[] = {
111 0xB180, 0xB183, 0xB190, 0xB191,
112};
113
114static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
115 0xB4C0, 0xB4D1,
116};
117
118static struct a6xx_cluster_dbgahb_registers {
119 unsigned int id;
120 unsigned int regbase;
121 unsigned int statetype;
122 const unsigned int *regs;
123 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530124 unsigned int offset0;
125 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530126} a6xx_dbgahb_ctx_clusters[] = {
127 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
128 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
129 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
130 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700131 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530132 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
133 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
134 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700135 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530136 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700137 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530138 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700139 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530140 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700141 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530142 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700143 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530144 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700145 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530146 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700147 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530148 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700149 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530150 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700151 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530152 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700153 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530154 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700155 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530156 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
157};
158
159struct a6xx_cluster_dbgahb_regs_info {
160 struct a6xx_cluster_dbgahb_registers *cluster;
161 unsigned int ctxt_id;
162};
163
164static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
165 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
166 0xBE20, 0xBE23,
167};
168
169static const unsigned int a6xx_sp_non_ctx_registers[] = {
170 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
171 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
172};
173
174static const unsigned int a6xx_tp_non_ctx_registers[] = {
175 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
176};
177
178static struct a6xx_non_ctx_dbgahb_registers {
179 unsigned int regbase;
180 unsigned int statetype;
181 const unsigned int *regs;
182 unsigned int num_sets;
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600183 unsigned int offset;
Lynus Vaz461e2382017-01-16 19:35:41 +0530184} a6xx_non_ctx_dbgahb[] = {
185 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
186 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
187 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
188 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
189 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
190 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
191};
192
Shrenuj Bansal41665402016-12-16 15:25:54 -0800193static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
194 /* VBIF */
195 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
196 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
197 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
198 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
199 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
200 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
201 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
202 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
203 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
204 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
205 0x3410, 0x3410, 0x3800, 0x3801,
206};
207
Kyle Piefer60733aa2017-03-21 11:24:01 -0700208static const unsigned int a6xx_gmu_registers[] = {
209 /* GMU */
210 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
211};
212
Shrenuj Bansal41665402016-12-16 15:25:54 -0800213static const struct adreno_vbif_snapshot_registers
214a6xx_vbif_snapshot_registers[] = {
215 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
216 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
217};
218
219/*
220 * Set of registers to dump for A6XX on snapshot.
221 * Registers in pairs - first value is the start offset, second
222 * is the stop offset (inclusive)
223 */
224
225static const unsigned int a6xx_registers[] = {
226 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530227 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
228 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
229 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
230 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213, 0x0218, 0x023D,
231 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511,
232 0x0533, 0x0533, 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800233 /* CP */
234 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
235 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
236 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
237 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
238 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
239 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
240 /* VSC */
241 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
242 /* UCHE */
243 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
244 0x0E38, 0x0E39,
245 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530246 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
247 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800248 /* RB */
249 0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
250 0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
251 0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
252 /* VPC */
253 0x9600, 0x9604, 0x9624, 0x9637,
254 /* PC */
255 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
256 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
257 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
258 /* VFD */
259 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530260 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800261};
262
Lynus Vaz20c81272017-02-10 16:22:12 +0530263enum a6xx_debugbus_id {
264 A6XX_DBGBUS_CP = 0x1,
265 A6XX_DBGBUS_RBBM = 0x2,
266 A6XX_DBGBUS_VBIF = 0x3,
267 A6XX_DBGBUS_HLSQ = 0x4,
268 A6XX_DBGBUS_UCHE = 0x5,
269 A6XX_DBGBUS_DPM = 0x6,
270 A6XX_DBGBUS_TESS = 0x7,
271 A6XX_DBGBUS_PC = 0x8,
272 A6XX_DBGBUS_VFDP = 0x9,
273 A6XX_DBGBUS_VPC = 0xa,
274 A6XX_DBGBUS_TSE = 0xb,
275 A6XX_DBGBUS_RAS = 0xc,
276 A6XX_DBGBUS_VSC = 0xd,
277 A6XX_DBGBUS_COM = 0xe,
278 A6XX_DBGBUS_LRZ = 0x10,
279 A6XX_DBGBUS_A2D = 0x11,
280 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530281 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530282 A6XX_DBGBUS_RBP = 0x14,
283 A6XX_DBGBUS_DCS = 0x15,
284 A6XX_DBGBUS_RBBM_CFG = 0x16,
285 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530286 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530287 A6XX_DBGBUS_TPFCHE = 0x19,
288 A6XX_DBGBUS_GPC = 0x1d,
289 A6XX_DBGBUS_LARC = 0x1e,
290 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
291 A6XX_DBGBUS_RB_0 = 0x20,
292 A6XX_DBGBUS_RB_1 = 0x21,
293 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
294 A6XX_DBGBUS_CCU_0 = 0x28,
295 A6XX_DBGBUS_CCU_1 = 0x29,
296 A6XX_DBGBUS_VFD_0 = 0x38,
297 A6XX_DBGBUS_VFD_1 = 0x39,
298 A6XX_DBGBUS_VFD_2 = 0x3a,
299 A6XX_DBGBUS_VFD_3 = 0x3b,
300 A6XX_DBGBUS_SP_0 = 0x40,
301 A6XX_DBGBUS_SP_1 = 0x41,
302 A6XX_DBGBUS_TPL1_0 = 0x48,
303 A6XX_DBGBUS_TPL1_1 = 0x49,
304 A6XX_DBGBUS_TPL1_2 = 0x4a,
305 A6XX_DBGBUS_TPL1_3 = 0x4b,
306};
307
308static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
309 { A6XX_DBGBUS_CP, 0x100, },
310 { A6XX_DBGBUS_RBBM, 0x100, },
311 { A6XX_DBGBUS_HLSQ, 0x100, },
312 { A6XX_DBGBUS_UCHE, 0x100, },
313 { A6XX_DBGBUS_DPM, 0x100, },
314 { A6XX_DBGBUS_TESS, 0x100, },
315 { A6XX_DBGBUS_PC, 0x100, },
316 { A6XX_DBGBUS_VFDP, 0x100, },
317 { A6XX_DBGBUS_VPC, 0x100, },
318 { A6XX_DBGBUS_TSE, 0x100, },
319 { A6XX_DBGBUS_RAS, 0x100, },
320 { A6XX_DBGBUS_VSC, 0x100, },
321 { A6XX_DBGBUS_COM, 0x100, },
322 { A6XX_DBGBUS_LRZ, 0x100, },
323 { A6XX_DBGBUS_A2D, 0x100, },
324 { A6XX_DBGBUS_CCUFCHE, 0x100, },
325 { A6XX_DBGBUS_RBP, 0x100, },
326 { A6XX_DBGBUS_DCS, 0x100, },
327 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530328 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530329 { A6XX_DBGBUS_TPFCHE, 0x100, },
330 { A6XX_DBGBUS_GPC, 0x100, },
331 { A6XX_DBGBUS_LARC, 0x100, },
332 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
333 { A6XX_DBGBUS_RB_0, 0x100, },
334 { A6XX_DBGBUS_RB_1, 0x100, },
335 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
336 { A6XX_DBGBUS_CCU_0, 0x100, },
337 { A6XX_DBGBUS_CCU_1, 0x100, },
338 { A6XX_DBGBUS_VFD_0, 0x100, },
339 { A6XX_DBGBUS_VFD_1, 0x100, },
340 { A6XX_DBGBUS_VFD_2, 0x100, },
341 { A6XX_DBGBUS_VFD_3, 0x100, },
342 { A6XX_DBGBUS_SP_0, 0x100, },
343 { A6XX_DBGBUS_SP_1, 0x100, },
344 { A6XX_DBGBUS_TPL1_0, 0x100, },
345 { A6XX_DBGBUS_TPL1_1, 0x100, },
346 { A6XX_DBGBUS_TPL1_2, 0x100, },
347 { A6XX_DBGBUS_TPL1_3, 0x100, },
348};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800349
Lynus Vazff24c972017-03-07 19:27:46 +0530350static void __iomem *a6xx_cx_dbgc;
351static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
352 { A6XX_DBGBUS_VBIF, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530353 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530354 { A6XX_DBGBUS_CX, 0x100, },
355};
356
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530357#define A6XX_NUM_SHADER_BANKS 3
358#define A6XX_SHADER_STATETYPE_SHIFT 8
359
360enum a6xx_shader_obj {
361 A6XX_TP0_TMO_DATA = 0x9,
362 A6XX_TP0_SMO_DATA = 0xa,
363 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
364 A6XX_TP1_TMO_DATA = 0x19,
365 A6XX_TP1_SMO_DATA = 0x1a,
366 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
367 A6XX_SP_INST_DATA = 0x29,
368 A6XX_SP_LB_0_DATA = 0x2a,
369 A6XX_SP_LB_1_DATA = 0x2b,
370 A6XX_SP_LB_2_DATA = 0x2c,
371 A6XX_SP_LB_3_DATA = 0x2d,
372 A6XX_SP_LB_4_DATA = 0x2e,
373 A6XX_SP_LB_5_DATA = 0x2f,
374 A6XX_SP_CB_BINDLESS_DATA = 0x30,
375 A6XX_SP_CB_LEGACY_DATA = 0x31,
376 A6XX_SP_UAV_DATA = 0x32,
377 A6XX_SP_INST_TAG = 0x33,
378 A6XX_SP_CB_BINDLESS_TAG = 0x34,
379 A6XX_SP_TMO_UMO_TAG = 0x35,
380 A6XX_SP_SMO_TAG = 0x36,
381 A6XX_SP_STATE_DATA = 0x37,
382 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
383 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
384 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
385 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
386 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
387 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
388 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
389 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
390 A6XX_HLSQ_INST_RAM = 0x52,
391 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
392 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
393 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
394 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
395 A6XX_HLSQ_INST_RAM_TAG = 0x57,
396 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
397 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
398 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
399 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
400 A6XX_HLSQ_DATAPATH_META = 0x60,
401 A6XX_HLSQ_FRONTEND_META = 0x61,
402 A6XX_HLSQ_INDIRECT_META = 0x62,
403 A6XX_HLSQ_BACKEND_META = 0x63
404};
405
406struct a6xx_shader_block {
407 unsigned int statetype;
408 unsigned int sz;
409 uint64_t offset;
410};
411
412struct a6xx_shader_block_info {
413 struct a6xx_shader_block *block;
414 unsigned int bank;
415 uint64_t offset;
416};
417
418static struct a6xx_shader_block a6xx_shader_blocks[] = {
419 {A6XX_TP0_TMO_DATA, 0x200},
420 {A6XX_TP0_SMO_DATA, 0x80,},
421 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
422 {A6XX_TP1_TMO_DATA, 0x200},
423 {A6XX_TP1_SMO_DATA, 0x80,},
424 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
425 {A6XX_SP_INST_DATA, 0x800},
426 {A6XX_SP_LB_0_DATA, 0x800},
427 {A6XX_SP_LB_1_DATA, 0x800},
428 {A6XX_SP_LB_2_DATA, 0x800},
429 {A6XX_SP_LB_3_DATA, 0x800},
430 {A6XX_SP_LB_4_DATA, 0x800},
431 {A6XX_SP_LB_5_DATA, 0x200},
432 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
433 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
434 {A6XX_SP_UAV_DATA, 0x80,},
435 {A6XX_SP_INST_TAG, 0x80,},
436 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
437 {A6XX_SP_TMO_UMO_TAG, 0x80,},
438 {A6XX_SP_SMO_TAG, 0x80},
439 {A6XX_SP_STATE_DATA, 0x3F},
440 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
441 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
442 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
443 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
444 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
445 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
446 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
447 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
448 {A6XX_HLSQ_INST_RAM, 0x800},
449 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
450 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
451 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
452 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
453 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
454 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
455 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
456 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
457 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
458 {A6XX_HLSQ_DATAPATH_META, 0x40,},
459 {A6XX_HLSQ_FRONTEND_META, 0x40},
460 {A6XX_HLSQ_INDIRECT_META, 0x40,}
461};
462
Shrenuj Bansal41665402016-12-16 15:25:54 -0800463static struct kgsl_memdesc a6xx_capturescript;
464static struct kgsl_memdesc a6xx_crashdump_registers;
465static bool crash_dump_valid;
466
467static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
468 u8 *buf, size_t remain)
469{
470 struct kgsl_snapshot_registers regs = {
471 .regs = a6xx_registers,
472 .count = ARRAY_SIZE(a6xx_registers) / 2,
473 };
474
475 return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
476}
477
478static struct cdregs {
479 const unsigned int *regs;
480 unsigned int size;
481} _a6xx_cd_registers[] = {
482 { a6xx_registers, ARRAY_SIZE(a6xx_registers) },
483};
484
485#define REG_PAIR_COUNT(_a, _i) \
486 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
487
488static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
489 size_t remain, void *priv)
490{
491 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
492 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
493 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
494 unsigned int i, j, k;
495 unsigned int count = 0;
496
497 if (crash_dump_valid == false)
498 return a6xx_legacy_snapshot_registers(device, buf, remain);
499
500 if (remain < sizeof(*header)) {
501 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
502 return 0;
503 }
504
505 remain -= sizeof(*header);
506
507 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
508 struct cdregs *regs = &_a6xx_cd_registers[i];
509
510 for (j = 0; j < regs->size / 2; j++) {
511 unsigned int start = regs->regs[2 * j];
512 unsigned int end = regs->regs[(2 * j) + 1];
513
514 if (remain < ((end - start) + 1) * 8) {
515 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
516 goto out;
517 }
518
519 remain -= ((end - start) + 1) * 8;
520
521 for (k = start; k <= end; k++, count++) {
522 *data++ = k;
523 *data++ = *src++;
524 }
525 }
526 }
527
528out:
529 header->count = count;
530
531 /* Return the size of the section */
532 return (count * 8) + sizeof(*header);
533}
534
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530535static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
536 u8 *buf, size_t remain, void *priv)
537{
538 struct kgsl_snapshot_shader *header =
539 (struct kgsl_snapshot_shader *) buf;
540 struct a6xx_shader_block_info *info =
541 (struct a6xx_shader_block_info *) priv;
542 struct a6xx_shader_block *block = info->block;
543 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
544
545 if (remain < SHADER_SECTION_SZ(block->sz)) {
546 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
547 return 0;
548 }
549
550 header->type = block->statetype;
551 header->index = info->bank;
552 header->size = block->sz;
553
554 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
555 block->sz);
556
557 return SHADER_SECTION_SZ(block->sz);
558}
559
560static void a6xx_snapshot_shader(struct kgsl_device *device,
561 struct kgsl_snapshot *snapshot)
562{
563 unsigned int i, j;
564 struct a6xx_shader_block_info info;
565
566 /* Shader blocks can only be read by the crash dumper */
567 if (crash_dump_valid == false)
568 return;
569
570 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
571 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
572 info.block = &a6xx_shader_blocks[i];
573 info.bank = j;
574 info.offset = a6xx_shader_blocks[i].offset +
575 (j * a6xx_shader_blocks[i].sz);
576
577 /* Shader working/shadow memory */
578 kgsl_snapshot_add_section(device,
579 KGSL_SNAPSHOT_SECTION_SHADER,
580 snapshot, a6xx_snapshot_shader_memory, &info);
581 }
582 }
583}
584
Lynus Vaza5922742017-03-14 18:50:54 +0530585static void a6xx_snapshot_mempool(struct kgsl_device *device,
586 struct kgsl_snapshot *snapshot)
587{
588 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530589 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530590
Lynus Vazb8e43d52017-04-20 14:47:37 +0530591 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530592 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
593 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
594
595 kgsl_snapshot_indexed_registers(device, snapshot,
596 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
597 0, 0x2060);
598
Lynus Vazb8e43d52017-04-20 14:47:37 +0530599 /*
600 * Data at offset 0x2000 in the mempool section is the mempool size.
601 * Since we set it to 0, patch in the original size so that the data
602 * is consistent.
603 */
604 if (buf < snapshot->ptr) {
605 unsigned int *data;
606
607 /* Skip over the headers */
608 buf += sizeof(struct kgsl_snapshot_section_header) +
609 sizeof(struct kgsl_snapshot_indexed_regs);
610
611 data = (unsigned int *)buf + 0x2000;
612 *data = pool_size;
613 }
614
Lynus Vaza5922742017-03-14 18:50:54 +0530615 /* Restore the saved mempool size */
616 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
617}
618
Lynus Vaz461e2382017-01-16 19:35:41 +0530619static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
620 unsigned int regbase, unsigned int reg)
621{
622 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
623 reg - regbase / 4;
624 unsigned int val;
625
626 kgsl_regread(device, read_reg, &val);
627 return val;
628}
629
Lynus Vaz1e258612017-04-27 21:35:22 +0530630static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
631 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530632{
633 struct kgsl_snapshot_mvc_regs *header =
634 (struct kgsl_snapshot_mvc_regs *)buf;
635 struct a6xx_cluster_dbgahb_regs_info *info =
636 (struct a6xx_cluster_dbgahb_regs_info *)priv;
637 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
638 unsigned int read_sel;
639 unsigned int data_size = 0;
640 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
641 int i, j;
642
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600643 if (!device->snapshot_legacy)
644 return 0;
645
Lynus Vaz461e2382017-01-16 19:35:41 +0530646 if (remain < sizeof(*header)) {
647 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
648 return 0;
649 }
650
651 remain -= sizeof(*header);
652
653 header->ctxt_id = info->ctxt_id;
654 header->cluster_id = cur_cluster->id;
655
656 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
657 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
658
659 for (i = 0; i < cur_cluster->num_sets; i++) {
660 unsigned int start = cur_cluster->regs[2 * i];
661 unsigned int end = cur_cluster->regs[2 * i + 1];
662
663 if (remain < (end - start + 3) * 4) {
664 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
665 goto out;
666 }
667
668 remain -= (end - start + 3) * 4;
669 data_size += (end - start + 3) * 4;
670
671 *data++ = start | (1 << 31);
672 *data++ = end;
673
674 for (j = start; j <= end; j++) {
675 unsigned int val;
676
677 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
678 *data++ = val;
679
680 }
681 }
682
683out:
684 return data_size + sizeof(*header);
685}
686
Lynus Vaz1e258612017-04-27 21:35:22 +0530687static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
688 size_t remain, void *priv)
689{
690 struct kgsl_snapshot_mvc_regs *header =
691 (struct kgsl_snapshot_mvc_regs *)buf;
692 struct a6xx_cluster_dbgahb_regs_info *info =
693 (struct a6xx_cluster_dbgahb_regs_info *)priv;
694 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
695 unsigned int data_size = 0;
696 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
697 int i, j;
698 unsigned int *src;
699
700
701 if (crash_dump_valid == false)
702 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
703 info);
704
705 if (remain < sizeof(*header)) {
706 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
707 return 0;
708 }
709
710 remain -= sizeof(*header);
711
712 header->ctxt_id = info->ctxt_id;
713 header->cluster_id = cluster->id;
714
715 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
716 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
717
718 for (i = 0; i < cluster->num_sets; i++) {
719 unsigned int start;
720 unsigned int end;
721
722 start = cluster->regs[2 * i];
723 end = cluster->regs[2 * i + 1];
724
725 if (remain < (end - start + 3) * 4) {
726 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
727 goto out;
728 }
729
730 remain -= (end - start + 3) * 4;
731 data_size += (end - start + 3) * 4;
732
733 *data++ = start | (1 << 31);
734 *data++ = end;
735 for (j = start; j <= end; j++)
736 *data++ = *src++;
737 }
738out:
739 return data_size + sizeof(*header);
740}
741
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600742static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
743 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530744{
745 struct kgsl_snapshot_regs *header =
746 (struct kgsl_snapshot_regs *)buf;
747 struct a6xx_non_ctx_dbgahb_registers *regs =
748 (struct a6xx_non_ctx_dbgahb_registers *)priv;
749 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
750 int count = 0;
751 unsigned int read_sel;
752 int i, j;
753
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600754 if (!device->snapshot_legacy)
755 return 0;
756
Lynus Vaz461e2382017-01-16 19:35:41 +0530757 /* Figure out how many registers we are going to dump */
758 for (i = 0; i < regs->num_sets; i++) {
759 int start = regs->regs[i * 2];
760 int end = regs->regs[i * 2 + 1];
761
762 count += (end - start + 1);
763 }
764
765 if (remain < (count * 8) + sizeof(*header)) {
766 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
767 return 0;
768 }
769
770 header->count = count;
771
772 read_sel = (regs->statetype & 0xff) << 8;
773 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
774
775 for (i = 0; i < regs->num_sets; i++) {
776 unsigned int start = regs->regs[2 * i];
777 unsigned int end = regs->regs[2 * i + 1];
778
779 for (j = start; j <= end; j++) {
780 unsigned int val;
781
782 val = a6xx_read_dbgahb(device, regs->regbase, j);
783 *data++ = j;
784 *data++ = val;
785
786 }
787 }
788 return (count * 8) + sizeof(*header);
789}
790
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600791static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
792 size_t remain, void *priv)
793{
794 struct kgsl_snapshot_regs *header =
795 (struct kgsl_snapshot_regs *)buf;
796 struct a6xx_non_ctx_dbgahb_registers *regs =
797 (struct a6xx_non_ctx_dbgahb_registers *)priv;
798 unsigned int count = 0;
799 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
800 unsigned int i, k;
801 unsigned int *src;
802
803 if (crash_dump_valid == false)
804 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
805 regs);
806
807 if (remain < sizeof(*header)) {
808 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
809 return 0;
810 }
811
812 remain -= sizeof(*header);
813
814 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
815
816 for (i = 0; i < regs->num_sets; i++) {
817 unsigned int start;
818 unsigned int end;
819
820 start = regs->regs[2 * i];
821 end = regs->regs[(2 * i) + 1];
822
823 if (remain < (end - start + 1) * 8) {
824 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
825 goto out;
826 }
827
828 remain -= ((end - start) + 1) * 8;
829
830 for (k = start; k <= end; k++, count++) {
831 *data++ = k;
832 *data++ = *src++;
833 }
834 }
835out:
836 header->count = count;
837
838 /* Return the size of the section */
839 return (count * 8) + sizeof(*header);
840}
841
Lynus Vaz461e2382017-01-16 19:35:41 +0530842static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
843 struct kgsl_snapshot *snapshot)
844{
845 int i, j;
846
847 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
848 struct a6xx_cluster_dbgahb_registers *cluster =
849 &a6xx_dbgahb_ctx_clusters[i];
850 struct a6xx_cluster_dbgahb_regs_info info;
851
852 info.cluster = cluster;
853 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
854 info.ctxt_id = j;
855
856 kgsl_snapshot_add_section(device,
857 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
858 a6xx_snapshot_cluster_dbgahb, &info);
859 }
860 }
861
862 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
863 kgsl_snapshot_add_section(device,
864 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
865 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
866 }
867}
868
Shrenuj Bansal41665402016-12-16 15:25:54 -0800869static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
870 size_t remain, void *priv)
871{
872 struct kgsl_snapshot_mvc_regs *header =
873 (struct kgsl_snapshot_mvc_regs *)buf;
874 struct a6xx_cluster_regs_info *info =
875 (struct a6xx_cluster_regs_info *)priv;
876 struct a6xx_cluster_registers *cur_cluster = info->cluster;
877 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
878 unsigned int ctxt = info->ctxt_id;
879 unsigned int start, end, i, j, aperture_cntl = 0;
880 unsigned int data_size = 0;
881
882 if (remain < sizeof(*header)) {
883 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
884 return 0;
885 }
886
887 remain -= sizeof(*header);
888
889 header->ctxt_id = info->ctxt_id;
890 header->cluster_id = cur_cluster->id;
891
892 /*
893 * Set the AHB control for the Host to read from the
894 * cluster/context for this iteration.
895 */
896 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
897 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
898
899 for (i = 0; i < cur_cluster->num_sets; i++) {
900 start = cur_cluster->regs[2 * i];
901 end = cur_cluster->regs[2 * i + 1];
902
903 if (remain < (end - start + 3) * 4) {
904 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
905 goto out;
906 }
907
908 remain -= (end - start + 3) * 4;
909 data_size += (end - start + 3) * 4;
910
911 *data++ = start | (1 << 31);
912 *data++ = end;
913 for (j = start; j <= end; j++) {
914 unsigned int val;
915
916 kgsl_regread(device, j, &val);
917 *data++ = val;
918 }
919 }
920out:
921 return data_size + sizeof(*header);
922}
923
924static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
925 size_t remain, void *priv)
926{
927 struct kgsl_snapshot_mvc_regs *header =
928 (struct kgsl_snapshot_mvc_regs *)buf;
929 struct a6xx_cluster_regs_info *info =
930 (struct a6xx_cluster_regs_info *)priv;
931 struct a6xx_cluster_registers *cluster = info->cluster;
932 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
933 unsigned int *src;
934 int i, j;
935 unsigned int start, end;
936 size_t data_size = 0;
937
938 if (crash_dump_valid == false)
939 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
940
941 if (remain < sizeof(*header)) {
942 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
943 return 0;
944 }
945
946 remain -= sizeof(*header);
947
948 header->ctxt_id = info->ctxt_id;
949 header->cluster_id = cluster->id;
950
951 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
952 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
953
954 for (i = 0; i < cluster->num_sets; i++) {
955 start = cluster->regs[2 * i];
956 end = cluster->regs[2 * i + 1];
957
958 if (remain < (end - start + 3) * 4) {
959 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
960 goto out;
961 }
962
963 remain -= (end - start + 3) * 4;
964 data_size += (end - start + 3) * 4;
965
966 *data++ = start | (1 << 31);
967 *data++ = end;
968 for (j = start; j <= end; j++)
969 *data++ = *src++;
970 }
971
972out:
973 return data_size + sizeof(*header);
974
975}
976
977static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
978 struct kgsl_snapshot *snapshot)
979{
980 int i, j;
981 struct a6xx_cluster_regs_info info;
982
983 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
984 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
985
986 info.cluster = cluster;
987 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
988 info.ctxt_id = j;
989
990 kgsl_snapshot_add_section(device,
991 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
992 a6xx_snapshot_mvc, &info);
993 }
994 }
995}
996
Lynus Vaz20c81272017-02-10 16:22:12 +0530997/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
998static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
999 unsigned int block_id, unsigned int index, unsigned int *val)
1000{
1001 unsigned int reg;
1002
1003 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1004 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1005
1006 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1007 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1008 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1009 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1010
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001011 /*
1012 * There needs to be a delay of 1 us to ensure enough time for correct
1013 * data is funneled into the trace buffer
1014 */
1015 udelay(1);
1016
Lynus Vaz20c81272017-02-10 16:22:12 +05301017 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1018 val++;
1019 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1020}
1021
1022/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
1023static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1024 u8 *buf, size_t remain, void *priv)
1025{
Lynus Vazecd472c2017-04-18 14:15:57 +05301026 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301027 struct kgsl_snapshot_debugbus *header =
1028 (struct kgsl_snapshot_debugbus *)buf;
1029 struct adreno_debugbus_block *block = priv;
1030 int i;
1031 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1032 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301033 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301034 size_t size;
1035
1036 dwords = block->dwords;
1037
1038 /* For a6xx each debug bus data unit is 2 DWORDS */
1039 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1040
1041 if (remain < size) {
1042 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1043 return 0;
1044 }
1045
1046 header->id = block->block_id;
1047 header->count = dwords * 2;
1048
Lynus Vazecd472c2017-04-18 14:15:57 +05301049 block_id = block->block_id;
1050 /* GMU_GX data is read using the GMU_CX block id on A630 */
1051 if (adreno_is_a630(adreno_dev) &&
1052 (block_id == A6XX_DBGBUS_GMU_GX))
1053 block_id = A6XX_DBGBUS_GMU_CX;
1054
Lynus Vaz20c81272017-02-10 16:22:12 +05301055 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301056 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301057
1058 return size;
1059}
1060
Lynus Vazff24c972017-03-07 19:27:46 +05301061static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1062{
1063 void __iomem *reg;
1064
1065 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1066 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1067 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1068 return;
1069
1070 reg = a6xx_cx_dbgc +
1071 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1072 *value = __raw_readl(reg);
1073
1074 /*
1075 * ensure this read finishes before the next one.
1076 * i.e. act like normal readl()
1077 */
1078 rmb();
1079}
1080
1081static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1082{
1083 void __iomem *reg;
1084
1085 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1086 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1087 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1088 return;
1089
1090 reg = a6xx_cx_dbgc +
1091 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1092
1093 /*
1094 * ensure previous writes post before this one,
1095 * i.e. act like normal writel()
1096 */
1097 wmb();
1098 __raw_writel(value, reg);
1099}
1100
1101/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1102static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1103 unsigned int block_id, unsigned int index, unsigned int *val)
1104{
1105 unsigned int reg;
1106
1107 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1108 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1109
1110 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1111 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1112 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1113 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1114
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001115 /*
1116 * There needs to be a delay of 1 us to ensure enough time for correct
1117 * data is funneled into the trace buffer
1118 */
1119 udelay(1);
1120
Lynus Vazff24c972017-03-07 19:27:46 +05301121 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1122 val++;
1123 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1124}
1125
1126/*
1127 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1128 * block from the CX DBGC block
1129 */
1130static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1131 u8 *buf, size_t remain, void *priv)
1132{
1133 struct kgsl_snapshot_debugbus *header =
1134 (struct kgsl_snapshot_debugbus *)buf;
1135 struct adreno_debugbus_block *block = priv;
1136 int i;
1137 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1138 unsigned int dwords;
1139 size_t size;
1140
1141 dwords = block->dwords;
1142
1143 /* For a6xx each debug bus data unit is 2 DWRODS */
1144 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1145
1146 if (remain < size) {
1147 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1148 return 0;
1149 }
1150
1151 header->id = block->block_id;
1152 header->count = dwords * 2;
1153
1154 for (i = 0; i < dwords; i++)
1155 a6xx_cx_debug_bus_read(device, block->block_id, i,
1156 &data[i*2]);
1157
1158 return size;
1159}
1160
Lynus Vaz20c81272017-02-10 16:22:12 +05301161/* a6xx_snapshot_debugbus() - Capture debug bus data */
1162static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1163 struct kgsl_snapshot *snapshot)
1164{
1165 int i;
1166
1167 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1168 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001169 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1170 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301171
1172 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1173 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1174
1175 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1176 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1177 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1178 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1179
1180 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1181 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1182 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1183 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1184 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1185 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1186 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1187 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1188 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1189 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1190 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1191 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1192 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1193 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1194 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1195 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1196 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1197 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1198
1199 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1200 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1201 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1202 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1203
Lynus Vazff24c972017-03-07 19:27:46 +05301204 a6xx_cx_dbgc = ioremap(device->reg_phys +
1205 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1206 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1207 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1208
1209 if (a6xx_cx_dbgc) {
1210 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1211 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001212 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1213 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301214
1215 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1216 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1217
1218 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1219 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1220 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1221 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1222
1223 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1224 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1225 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1226 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1227 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1228 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1229 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1230 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1231 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1232 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1233 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1234 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1235 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1236 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1237 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1238 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1239 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1240 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1241
1242 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1243 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1244 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1245 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1246 } else
1247 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1248
Lynus Vaz20c81272017-02-10 16:22:12 +05301249 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1250 kgsl_snapshot_add_section(device,
1251 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1252 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1253 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1254 }
Lynus Vazff24c972017-03-07 19:27:46 +05301255
1256 if (a6xx_cx_dbgc) {
1257 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1258 kgsl_snapshot_add_section(device,
1259 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1260 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1261 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1262 }
1263 iounmap(a6xx_cx_dbgc);
1264 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301265}
1266
Kyle Piefer60733aa2017-03-21 11:24:01 -07001267static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
1268 u8 *buf, size_t remain, void *priv)
1269{
1270 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
1271 struct kgsl_snapshot_registers *regs = priv;
1272 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1273 int count = 0, j, k;
1274
1275 /* Figure out how many registers we are going to dump */
1276 for (j = 0; j < regs->count; j++) {
1277 int start = regs->regs[j * 2];
1278 int end = regs->regs[j * 2 + 1];
1279
1280 count += (end - start + 1);
1281 }
1282
1283 if (remain < (count * 8) + sizeof(*header)) {
1284 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
1285 return 0;
1286 }
1287
1288 for (j = 0; j < regs->count; j++) {
1289 unsigned int start = regs->regs[j * 2];
1290 unsigned int end = regs->regs[j * 2 + 1];
1291
1292 for (k = start; k <= end; k++) {
1293 unsigned int val;
1294
1295 kgsl_gmu_regread(device, k, &val);
1296 *data++ = k;
1297 *data++ = val;
1298 }
1299 }
1300
1301 header->count = count;
1302
1303 /* Return the size of the section */
1304 return (count * 8) + sizeof(*header);
1305}
1306
1307static void a6xx_snapshot_gmu(struct kgsl_device *device,
1308 struct kgsl_snapshot *snapshot)
1309{
1310 struct kgsl_snapshot_registers gmu_regs = {
1311 .regs = a6xx_gmu_registers,
1312 .count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
1313 };
1314
1315 if (!kgsl_gmu_isenabled(device))
1316 return;
1317
1318 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1319 snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
1320}
1321
Lynus Vaz85150052017-02-21 17:57:48 +05301322/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1323static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1324 size_t remain, void *priv)
1325{
1326 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1327 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1328 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1329 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1330
1331 if (remain < DEBUG_SECTION_SZ(1)) {
1332 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1333 return 0;
1334 }
1335
1336 /* Dump the SQE firmware version */
1337 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1338 header->size = 1;
1339 *data = fw->version;
1340
1341 return DEBUG_SECTION_SZ(1);
1342}
1343
Shrenuj Bansal41665402016-12-16 15:25:54 -08001344static void _a6xx_do_crashdump(struct kgsl_device *device)
1345{
1346 unsigned long wait_time;
1347 unsigned int reg = 0;
1348 unsigned int val;
1349
1350 crash_dump_valid = false;
1351
1352 if (a6xx_capturescript.gpuaddr == 0 ||
1353 a6xx_crashdump_registers.gpuaddr == 0)
1354 return;
1355
1356 /* IF the SMMU is stalled we cannot do a crash dump */
1357 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1358 if (val & BIT(24))
1359 return;
1360
1361 /* Turn on APRIV so we can access the buffers */
1362 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1363
1364 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1365 lower_32_bits(a6xx_capturescript.gpuaddr));
1366 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1367 upper_32_bits(a6xx_capturescript.gpuaddr));
1368 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1369
1370 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1371 while (!time_after(jiffies, wait_time)) {
1372 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1373 if (reg & 0x2)
1374 break;
1375 cpu_relax();
1376 }
1377
1378 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1379
1380 if (!(reg & 0x2)) {
1381 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1382 return;
1383 }
1384
1385 crash_dump_valid = true;
1386}
1387
1388/*
1389 * a6xx_snapshot() - A6XX GPU snapshot function
1390 * @adreno_dev: Device being snapshotted
1391 * @snapshot: Pointer to the snapshot instance
1392 *
1393 * This is where all of the A6XX specific bits and pieces are grabbed
1394 * into the snapshot memory
1395 */
1396void a6xx_snapshot(struct adreno_device *adreno_dev,
1397 struct kgsl_snapshot *snapshot)
1398{
1399 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1400 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1401 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
1402
1403 /* Try to run the crash dumper */
1404 _a6xx_do_crashdump(device);
1405
1406 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1407 snapshot, a6xx_snapshot_registers, NULL);
1408
1409 adreno_snapshot_vbif_registers(device, snapshot,
1410 a6xx_vbif_snapshot_registers,
1411 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1412
1413 /* CP_SQE indexed registers */
1414 kgsl_snapshot_indexed_registers(device, snapshot,
1415 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1416 0, snap_data->sect_sizes->cp_pfp);
1417
1418 /* CP_DRAW_STATE */
1419 kgsl_snapshot_indexed_registers(device, snapshot,
1420 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1421 0, 0x100);
1422
1423 /* SQE_UCODE Cache */
1424 kgsl_snapshot_indexed_registers(device, snapshot,
1425 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1426 0, 0x6000);
1427
1428 /* CP ROQ */
1429 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1430 snapshot, adreno_snapshot_cp_roq,
1431 &snap_data->sect_sizes->roq);
1432
Lynus Vaz85150052017-02-21 17:57:48 +05301433 /* SQE Firmware */
1434 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1435 snapshot, a6xx_snapshot_sqe, NULL);
1436
Lynus Vaza5922742017-03-14 18:50:54 +05301437 /* Mempool debug data */
1438 a6xx_snapshot_mempool(device, snapshot);
1439
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301440 /* Shader memory */
1441 a6xx_snapshot_shader(device, snapshot);
1442
Shrenuj Bansal41665402016-12-16 15:25:54 -08001443 /* MVC register section */
1444 a6xx_snapshot_mvc_regs(device, snapshot);
1445
Lynus Vaz461e2382017-01-16 19:35:41 +05301446 /* registers dumped through DBG AHB */
1447 a6xx_snapshot_dbgahb_regs(device, snapshot);
1448
Lynus Vaz20c81272017-02-10 16:22:12 +05301449 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001450
1451 /* GMU TCM data dumped through AHB */
1452 a6xx_snapshot_gmu(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001453}
1454
1455static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1456{
1457 int qwords = 0;
1458 unsigned int i, j, k;
1459 unsigned int count;
1460
1461 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1462 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1463
1464 cluster->offset0 = *offset;
1465 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1466
1467 if (j == 1)
1468 cluster->offset1 = *offset;
1469
1470 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1471 ptr[qwords++] =
1472 ((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
1473 (1 << 21) | 1;
1474
1475 for (k = 0; k < cluster->num_sets; k++) {
1476 count = REG_PAIR_COUNT(cluster->regs, k);
1477 ptr[qwords++] =
1478 a6xx_crashdump_registers.gpuaddr + *offset;
1479 ptr[qwords++] =
1480 (((uint64_t)cluster->regs[2 * k]) << 44) |
1481 count;
1482
1483 *offset += count * sizeof(unsigned int);
1484 }
1485 }
1486 }
1487
1488 return qwords;
1489}
1490
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301491static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1492 uint64_t *ptr, uint64_t *offset)
1493{
1494 int qwords = 0;
1495 unsigned int j;
1496
1497 /* Capture each bank in the block */
1498 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1499 /* Program the aperture */
1500 ptr[qwords++] =
1501 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1502 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1503 (1 << 21) | 1;
1504
1505 /* Read all the data in one chunk */
1506 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1507 ptr[qwords++] =
1508 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1509 block->sz;
1510
1511 /* Remember the offset of the first bank for easy access */
1512 if (j == 0)
1513 block->offset = *offset;
1514
1515 *offset += block->sz * sizeof(unsigned int);
1516 }
1517
1518 return qwords;
1519}
1520
Lynus Vaz1e258612017-04-27 21:35:22 +05301521static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1522{
1523 int qwords = 0;
1524 unsigned int i, j, k;
1525 unsigned int count;
1526
1527 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1528 struct a6xx_cluster_dbgahb_registers *cluster =
1529 &a6xx_dbgahb_ctx_clusters[i];
1530
1531 cluster->offset0 = *offset;
1532
1533 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1534 if (j == 1)
1535 cluster->offset1 = *offset;
1536
1537 /* Program the aperture */
1538 ptr[qwords++] =
1539 ((cluster->statetype + j * 2) & 0xff) << 8;
1540 ptr[qwords++] =
1541 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1542 (1 << 21) | 1;
1543
1544 for (k = 0; k < cluster->num_sets; k++) {
1545 unsigned int start = cluster->regs[2 * k];
1546
1547 count = REG_PAIR_COUNT(cluster->regs, k);
1548 ptr[qwords++] =
1549 a6xx_crashdump_registers.gpuaddr + *offset;
1550 ptr[qwords++] =
1551 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1552 start - cluster->regbase / 4) << 44)) |
1553 count;
1554
1555 *offset += count * sizeof(unsigned int);
1556 }
1557 }
1558 }
1559 return qwords;
1560}
1561
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001562static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1563{
1564 int qwords = 0;
1565 unsigned int i, k;
1566 unsigned int count;
1567
1568 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1569 struct a6xx_non_ctx_dbgahb_registers *regs =
1570 &a6xx_non_ctx_dbgahb[i];
1571
1572 regs->offset = *offset;
1573
1574 /* Program the aperture */
1575 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1576 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1577 (1 << 21) | 1;
1578
1579 for (k = 0; k < regs->num_sets; k++) {
1580 unsigned int start = regs->regs[2 * k];
1581
1582 count = REG_PAIR_COUNT(regs->regs, k);
1583 ptr[qwords++] =
1584 a6xx_crashdump_registers.gpuaddr + *offset;
1585 ptr[qwords++] =
1586 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1587 start - regs->regbase / 4) << 44)) |
1588 count;
1589
1590 *offset += count * sizeof(unsigned int);
1591 }
1592 }
1593 return qwords;
1594}
1595
Shrenuj Bansal41665402016-12-16 15:25:54 -08001596void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1597{
1598 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1599 unsigned int script_size = 0;
1600 unsigned int data_size = 0;
1601 unsigned int i, j, k;
1602 uint64_t *ptr;
1603 uint64_t offset = 0;
1604
1605 if (a6xx_capturescript.gpuaddr != 0 &&
1606 a6xx_crashdump_registers.gpuaddr != 0)
1607 return;
1608
1609 /*
1610 * We need to allocate two buffers:
1611 * 1 - the buffer to hold the draw script
1612 * 2 - the buffer to hold the data
1613 */
1614
1615 /*
1616 * To save the registers, we need 16 bytes per register pair for the
1617 * script and a dword for each register in the data
1618 */
1619 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1620 struct cdregs *regs = &_a6xx_cd_registers[i];
1621
1622 /* Each pair needs 16 bytes (2 qwords) */
1623 script_size += (regs->size / 2) * 16;
1624
1625 /* Each register needs a dword in the data */
1626 for (j = 0; j < regs->size / 2; j++)
1627 data_size += REG_PAIR_COUNT(regs->regs, j) *
1628 sizeof(unsigned int);
1629
1630 }
1631
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301632 /*
1633 * To save the shader blocks for each block in each type we need 32
1634 * bytes for the script (16 bytes to program the aperture and 16 to
1635 * read the data) and then a block specific number of bytes to hold
1636 * the data
1637 */
1638 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1639 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1640 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1641 A6XX_NUM_SHADER_BANKS;
1642 }
1643
Shrenuj Bansal41665402016-12-16 15:25:54 -08001644 /* Calculate the script and data size for MVC registers */
1645 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1646 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1647
1648 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1649
1650 /* 16 bytes for programming the aperture */
1651 script_size += 16;
1652
1653 /* Reading each pair of registers takes 16 bytes */
1654 script_size += 16 * cluster->num_sets;
1655
1656 /* A dword per register read from the cluster list */
1657 for (k = 0; k < cluster->num_sets; k++)
1658 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1659 sizeof(unsigned int);
1660 }
1661 }
1662
Lynus Vaz1e258612017-04-27 21:35:22 +05301663 /* Calculate the script and data size for debug AHB registers */
1664 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1665 struct a6xx_cluster_dbgahb_registers *cluster =
1666 &a6xx_dbgahb_ctx_clusters[i];
1667
1668 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1669
1670 /* 16 bytes for programming the aperture */
1671 script_size += 16;
1672
1673 /* Reading each pair of registers takes 16 bytes */
1674 script_size += 16 * cluster->num_sets;
1675
1676 /* A dword per register read from the cluster list */
1677 for (k = 0; k < cluster->num_sets; k++)
1678 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1679 sizeof(unsigned int);
1680 }
1681 }
1682
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001683 /*
1684 * Calculate the script and data size for non context debug
1685 * AHB registers
1686 */
1687 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1688 struct a6xx_non_ctx_dbgahb_registers *regs =
1689 &a6xx_non_ctx_dbgahb[i];
1690
1691 /* 16 bytes for programming the aperture */
1692 script_size += 16;
1693
1694 /* Reading each pair of registers takes 16 bytes */
1695 script_size += 16 * regs->num_sets;
1696
1697 /* A dword per register read from the cluster list */
1698 for (k = 0; k < regs->num_sets; k++)
1699 data_size += REG_PAIR_COUNT(regs->regs, k) *
1700 sizeof(unsigned int);
1701 }
1702
Shrenuj Bansal41665402016-12-16 15:25:54 -08001703 /* Now allocate the script and data buffers */
1704
1705 /* The script buffers needs 2 extra qwords on the end */
1706 if (kgsl_allocate_global(device, &a6xx_capturescript,
1707 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1708 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1709 return;
1710
1711 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1712 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1713 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1714 return;
1715 }
1716
1717 /* Build the crash script */
1718
1719 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1720
1721 /* For the registers, program a read command for each pair */
1722 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1723 struct cdregs *regs = &_a6xx_cd_registers[i];
1724
1725 for (j = 0; j < regs->size / 2; j++) {
1726 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1727 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1728 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1729 offset += r * sizeof(unsigned int);
1730 }
1731 }
1732
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301733 /* Program each shader block */
1734 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1735 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1736 &offset);
1737 }
1738
Shrenuj Bansal41665402016-12-16 15:25:54 -08001739 /* Program the capturescript for the MVC regsiters */
1740 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1741
Lynus Vaz1e258612017-04-27 21:35:22 +05301742 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1743
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001744 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1745
Shrenuj Bansal41665402016-12-16 15:25:54 -08001746 *ptr++ = 0;
1747 *ptr++ = 0;
1748}