blob: decbff315a44b13043b7fecb327b7d948b2a911a [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
24
25static const unsigned int a6xx_gras_cluster[] = {
26 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
27 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
28 0x8400, 0x840B,
29};
30
31static const unsigned int a6xx_ps_cluster[] = {
32 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
33 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
34 0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
35 0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
36 0x9218, 0x9236, 0x9300, 0x9306,
37};
38
39static const unsigned int a6xx_fe_cluster[] = {
40 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
41 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
42};
43
44static const unsigned int a6xx_pc_vs_cluster[] = {
45 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
46};
47
48static struct a6xx_cluster_registers {
49 unsigned int id;
50 const unsigned int *regs;
51 unsigned int num_sets;
52 unsigned int offset0;
53 unsigned int offset1;
54} a6xx_clusters[] = {
55 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
56 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
57 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
58 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
59 ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
60};
61
62struct a6xx_cluster_regs_info {
63 struct a6xx_cluster_registers *cluster;
64 unsigned int ctxt_id;
65};
66
Lynus Vaz461e2382017-01-16 19:35:41 +053067static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
68 0xB800, 0xB803, 0xB820, 0xB822,
69};
70
71static const unsigned int a6xx_sp_vs_sp_cluster[] = {
72 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
73 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
74};
75
76static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
77 0xBB10, 0xBB11, 0xBB20, 0xBB29,
78};
79
80static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
81 0xBD80, 0xBD80,
82};
83
84static const unsigned int a6xx_sp_duplicate_cluster[] = {
85 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
86};
87
88static const unsigned int a6xx_tp_duplicate_cluster[] = {
89 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
90};
91
92static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
93 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
94 0xB9C0, 0xB9C9,
95};
96
97static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
98 0xBD80, 0xBD80,
99};
100
101static const unsigned int a6xx_sp_ps_sp_cluster[] = {
102 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
103 0xAA00, 0xAA00, 0xAA30, 0xAA31,
104};
105
106static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
107 0xACC0, 0xACC0,
108};
109
110static const unsigned int a6xx_sp_ps_tp_cluster[] = {
111 0xB180, 0xB183, 0xB190, 0xB191,
112};
113
114static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
115 0xB4C0, 0xB4D1,
116};
117
118static struct a6xx_cluster_dbgahb_registers {
119 unsigned int id;
120 unsigned int regbase;
121 unsigned int statetype;
122 const unsigned int *regs;
123 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530124 unsigned int offset0;
125 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530126} a6xx_dbgahb_ctx_clusters[] = {
127 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
128 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
129 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
130 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700131 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530132 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
133 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
134 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700135 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530136 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700137 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530138 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700139 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530140 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700141 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530142 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700143 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530144 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700145 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530146 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700147 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530148 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700149 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530150 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700151 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530152 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700153 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530154 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700155 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530156 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
157};
158
159struct a6xx_cluster_dbgahb_regs_info {
160 struct a6xx_cluster_dbgahb_registers *cluster;
161 unsigned int ctxt_id;
162};
163
164static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
165 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
166 0xBE20, 0xBE23,
167};
168
169static const unsigned int a6xx_sp_non_ctx_registers[] = {
170 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
171 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
172};
173
174static const unsigned int a6xx_tp_non_ctx_registers[] = {
175 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
176};
177
178static struct a6xx_non_ctx_dbgahb_registers {
179 unsigned int regbase;
180 unsigned int statetype;
181 const unsigned int *regs;
182 unsigned int num_sets;
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600183 unsigned int offset;
Lynus Vaz461e2382017-01-16 19:35:41 +0530184} a6xx_non_ctx_dbgahb[] = {
185 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
186 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
187 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
188 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
189 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
190 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
191};
192
Shrenuj Bansal41665402016-12-16 15:25:54 -0800193static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
194 /* VBIF */
195 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
196 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
197 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
198 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
199 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
200 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
201 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
202 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
203 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
204 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
205 0x3410, 0x3410, 0x3800, 0x3801,
206};
207
Kyle Piefer60733aa2017-03-21 11:24:01 -0700208static const unsigned int a6xx_gmu_registers[] = {
209 /* GMU */
210 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
211};
212
Shrenuj Bansal41665402016-12-16 15:25:54 -0800213static const struct adreno_vbif_snapshot_registers
214a6xx_vbif_snapshot_registers[] = {
215 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
216 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
217};
218
219/*
220 * Set of registers to dump for A6XX on snapshot.
221 * Registers in pairs - first value is the start offset, second
222 * is the stop offset (inclusive)
223 */
224
225static const unsigned int a6xx_registers[] = {
226 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530227 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
228 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
229 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
230 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213, 0x0218, 0x023D,
231 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511,
232 0x0533, 0x0533, 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800233 /* CP */
234 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
235 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
236 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
237 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
238 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
239 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
240 /* VSC */
241 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
242 /* UCHE */
243 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
244 0x0E38, 0x0E39,
245 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530246 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
247 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800248 /* RB */
249 0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
250 0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
251 0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
252 /* VPC */
253 0x9600, 0x9604, 0x9624, 0x9637,
254 /* PC */
255 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
256 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
257 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
258 /* VFD */
259 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530260 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800261};
262
Lynus Vaz20c81272017-02-10 16:22:12 +0530263enum a6xx_debugbus_id {
264 A6XX_DBGBUS_CP = 0x1,
265 A6XX_DBGBUS_RBBM = 0x2,
266 A6XX_DBGBUS_VBIF = 0x3,
267 A6XX_DBGBUS_HLSQ = 0x4,
268 A6XX_DBGBUS_UCHE = 0x5,
269 A6XX_DBGBUS_DPM = 0x6,
270 A6XX_DBGBUS_TESS = 0x7,
271 A6XX_DBGBUS_PC = 0x8,
272 A6XX_DBGBUS_VFDP = 0x9,
273 A6XX_DBGBUS_VPC = 0xa,
274 A6XX_DBGBUS_TSE = 0xb,
275 A6XX_DBGBUS_RAS = 0xc,
276 A6XX_DBGBUS_VSC = 0xd,
277 A6XX_DBGBUS_COM = 0xe,
278 A6XX_DBGBUS_LRZ = 0x10,
279 A6XX_DBGBUS_A2D = 0x11,
280 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530281 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530282 A6XX_DBGBUS_RBP = 0x14,
283 A6XX_DBGBUS_DCS = 0x15,
284 A6XX_DBGBUS_RBBM_CFG = 0x16,
285 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530286 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530287 A6XX_DBGBUS_TPFCHE = 0x19,
288 A6XX_DBGBUS_GPC = 0x1d,
289 A6XX_DBGBUS_LARC = 0x1e,
290 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
291 A6XX_DBGBUS_RB_0 = 0x20,
292 A6XX_DBGBUS_RB_1 = 0x21,
293 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
294 A6XX_DBGBUS_CCU_0 = 0x28,
295 A6XX_DBGBUS_CCU_1 = 0x29,
296 A6XX_DBGBUS_VFD_0 = 0x38,
297 A6XX_DBGBUS_VFD_1 = 0x39,
298 A6XX_DBGBUS_VFD_2 = 0x3a,
299 A6XX_DBGBUS_VFD_3 = 0x3b,
300 A6XX_DBGBUS_SP_0 = 0x40,
301 A6XX_DBGBUS_SP_1 = 0x41,
302 A6XX_DBGBUS_TPL1_0 = 0x48,
303 A6XX_DBGBUS_TPL1_1 = 0x49,
304 A6XX_DBGBUS_TPL1_2 = 0x4a,
305 A6XX_DBGBUS_TPL1_3 = 0x4b,
306};
307
308static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
309 { A6XX_DBGBUS_CP, 0x100, },
310 { A6XX_DBGBUS_RBBM, 0x100, },
311 { A6XX_DBGBUS_HLSQ, 0x100, },
312 { A6XX_DBGBUS_UCHE, 0x100, },
313 { A6XX_DBGBUS_DPM, 0x100, },
314 { A6XX_DBGBUS_TESS, 0x100, },
315 { A6XX_DBGBUS_PC, 0x100, },
316 { A6XX_DBGBUS_VFDP, 0x100, },
317 { A6XX_DBGBUS_VPC, 0x100, },
318 { A6XX_DBGBUS_TSE, 0x100, },
319 { A6XX_DBGBUS_RAS, 0x100, },
320 { A6XX_DBGBUS_VSC, 0x100, },
321 { A6XX_DBGBUS_COM, 0x100, },
322 { A6XX_DBGBUS_LRZ, 0x100, },
323 { A6XX_DBGBUS_A2D, 0x100, },
324 { A6XX_DBGBUS_CCUFCHE, 0x100, },
325 { A6XX_DBGBUS_RBP, 0x100, },
326 { A6XX_DBGBUS_DCS, 0x100, },
327 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530328 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530329 { A6XX_DBGBUS_TPFCHE, 0x100, },
330 { A6XX_DBGBUS_GPC, 0x100, },
331 { A6XX_DBGBUS_LARC, 0x100, },
332 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
333 { A6XX_DBGBUS_RB_0, 0x100, },
334 { A6XX_DBGBUS_RB_1, 0x100, },
335 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
336 { A6XX_DBGBUS_CCU_0, 0x100, },
337 { A6XX_DBGBUS_CCU_1, 0x100, },
338 { A6XX_DBGBUS_VFD_0, 0x100, },
339 { A6XX_DBGBUS_VFD_1, 0x100, },
340 { A6XX_DBGBUS_VFD_2, 0x100, },
341 { A6XX_DBGBUS_VFD_3, 0x100, },
342 { A6XX_DBGBUS_SP_0, 0x100, },
343 { A6XX_DBGBUS_SP_1, 0x100, },
344 { A6XX_DBGBUS_TPL1_0, 0x100, },
345 { A6XX_DBGBUS_TPL1_1, 0x100, },
346 { A6XX_DBGBUS_TPL1_2, 0x100, },
347 { A6XX_DBGBUS_TPL1_3, 0x100, },
348};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800349
Lynus Vazff24c972017-03-07 19:27:46 +0530350static void __iomem *a6xx_cx_dbgc;
351static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
352 { A6XX_DBGBUS_VBIF, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530353 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530354 { A6XX_DBGBUS_CX, 0x100, },
355};
356
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530357#define A6XX_NUM_SHADER_BANKS 3
358#define A6XX_SHADER_STATETYPE_SHIFT 8
359
360enum a6xx_shader_obj {
361 A6XX_TP0_TMO_DATA = 0x9,
362 A6XX_TP0_SMO_DATA = 0xa,
363 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
364 A6XX_TP1_TMO_DATA = 0x19,
365 A6XX_TP1_SMO_DATA = 0x1a,
366 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
367 A6XX_SP_INST_DATA = 0x29,
368 A6XX_SP_LB_0_DATA = 0x2a,
369 A6XX_SP_LB_1_DATA = 0x2b,
370 A6XX_SP_LB_2_DATA = 0x2c,
371 A6XX_SP_LB_3_DATA = 0x2d,
372 A6XX_SP_LB_4_DATA = 0x2e,
373 A6XX_SP_LB_5_DATA = 0x2f,
374 A6XX_SP_CB_BINDLESS_DATA = 0x30,
375 A6XX_SP_CB_LEGACY_DATA = 0x31,
376 A6XX_SP_UAV_DATA = 0x32,
377 A6XX_SP_INST_TAG = 0x33,
378 A6XX_SP_CB_BINDLESS_TAG = 0x34,
379 A6XX_SP_TMO_UMO_TAG = 0x35,
380 A6XX_SP_SMO_TAG = 0x36,
381 A6XX_SP_STATE_DATA = 0x37,
382 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
383 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
384 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
385 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
386 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
387 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
388 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
389 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
390 A6XX_HLSQ_INST_RAM = 0x52,
391 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
392 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
393 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
394 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
395 A6XX_HLSQ_INST_RAM_TAG = 0x57,
396 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
397 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
398 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
399 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
400 A6XX_HLSQ_DATAPATH_META = 0x60,
401 A6XX_HLSQ_FRONTEND_META = 0x61,
402 A6XX_HLSQ_INDIRECT_META = 0x62,
403 A6XX_HLSQ_BACKEND_META = 0x63
404};
405
406struct a6xx_shader_block {
407 unsigned int statetype;
408 unsigned int sz;
409 uint64_t offset;
410};
411
412struct a6xx_shader_block_info {
413 struct a6xx_shader_block *block;
414 unsigned int bank;
415 uint64_t offset;
416};
417
418static struct a6xx_shader_block a6xx_shader_blocks[] = {
419 {A6XX_TP0_TMO_DATA, 0x200},
420 {A6XX_TP0_SMO_DATA, 0x80,},
421 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
422 {A6XX_TP1_TMO_DATA, 0x200},
423 {A6XX_TP1_SMO_DATA, 0x80,},
424 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
425 {A6XX_SP_INST_DATA, 0x800},
426 {A6XX_SP_LB_0_DATA, 0x800},
427 {A6XX_SP_LB_1_DATA, 0x800},
428 {A6XX_SP_LB_2_DATA, 0x800},
429 {A6XX_SP_LB_3_DATA, 0x800},
430 {A6XX_SP_LB_4_DATA, 0x800},
431 {A6XX_SP_LB_5_DATA, 0x200},
432 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
433 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
434 {A6XX_SP_UAV_DATA, 0x80,},
435 {A6XX_SP_INST_TAG, 0x80,},
436 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
437 {A6XX_SP_TMO_UMO_TAG, 0x80,},
438 {A6XX_SP_SMO_TAG, 0x80},
439 {A6XX_SP_STATE_DATA, 0x3F},
440 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
441 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
442 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
443 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
444 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
445 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
446 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
447 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
448 {A6XX_HLSQ_INST_RAM, 0x800},
449 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
450 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
451 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
452 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
453 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
454 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
455 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
456 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
457 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
458 {A6XX_HLSQ_DATAPATH_META, 0x40,},
459 {A6XX_HLSQ_FRONTEND_META, 0x40},
460 {A6XX_HLSQ_INDIRECT_META, 0x40,}
461};
462
Shrenuj Bansal41665402016-12-16 15:25:54 -0800463static struct kgsl_memdesc a6xx_capturescript;
464static struct kgsl_memdesc a6xx_crashdump_registers;
465static bool crash_dump_valid;
466
467static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
468 u8 *buf, size_t remain)
469{
470 struct kgsl_snapshot_registers regs = {
471 .regs = a6xx_registers,
472 .count = ARRAY_SIZE(a6xx_registers) / 2,
473 };
474
475 return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
476}
477
478static struct cdregs {
479 const unsigned int *regs;
480 unsigned int size;
481} _a6xx_cd_registers[] = {
482 { a6xx_registers, ARRAY_SIZE(a6xx_registers) },
483};
484
485#define REG_PAIR_COUNT(_a, _i) \
486 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
487
488static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
489 size_t remain, void *priv)
490{
491 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
492 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
493 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
494 unsigned int i, j, k;
495 unsigned int count = 0;
496
497 if (crash_dump_valid == false)
498 return a6xx_legacy_snapshot_registers(device, buf, remain);
499
500 if (remain < sizeof(*header)) {
501 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
502 return 0;
503 }
504
505 remain -= sizeof(*header);
506
507 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
508 struct cdregs *regs = &_a6xx_cd_registers[i];
509
510 for (j = 0; j < regs->size / 2; j++) {
511 unsigned int start = regs->regs[2 * j];
512 unsigned int end = regs->regs[(2 * j) + 1];
513
514 if (remain < ((end - start) + 1) * 8) {
515 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
516 goto out;
517 }
518
519 remain -= ((end - start) + 1) * 8;
520
521 for (k = start; k <= end; k++, count++) {
522 *data++ = k;
523 *data++ = *src++;
524 }
525 }
526 }
527
528out:
529 header->count = count;
530
531 /* Return the size of the section */
532 return (count * 8) + sizeof(*header);
533}
534
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530535static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
536 u8 *buf, size_t remain, void *priv)
537{
538 struct kgsl_snapshot_shader *header =
539 (struct kgsl_snapshot_shader *) buf;
540 struct a6xx_shader_block_info *info =
541 (struct a6xx_shader_block_info *) priv;
542 struct a6xx_shader_block *block = info->block;
543 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
544
545 if (remain < SHADER_SECTION_SZ(block->sz)) {
546 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
547 return 0;
548 }
549
550 header->type = block->statetype;
551 header->index = info->bank;
552 header->size = block->sz;
553
554 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
555 block->sz);
556
557 return SHADER_SECTION_SZ(block->sz);
558}
559
560static void a6xx_snapshot_shader(struct kgsl_device *device,
561 struct kgsl_snapshot *snapshot)
562{
563 unsigned int i, j;
564 struct a6xx_shader_block_info info;
565
566 /* Shader blocks can only be read by the crash dumper */
567 if (crash_dump_valid == false)
568 return;
569
570 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
571 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
572 info.block = &a6xx_shader_blocks[i];
573 info.bank = j;
574 info.offset = a6xx_shader_blocks[i].offset +
575 (j * a6xx_shader_blocks[i].sz);
576
577 /* Shader working/shadow memory */
578 kgsl_snapshot_add_section(device,
579 KGSL_SNAPSHOT_SECTION_SHADER,
580 snapshot, a6xx_snapshot_shader_memory, &info);
581 }
582 }
583}
584
Lynus Vaza5922742017-03-14 18:50:54 +0530585static void a6xx_snapshot_mempool(struct kgsl_device *device,
586 struct kgsl_snapshot *snapshot)
587{
588 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530589 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530590
Lynus Vazb8e43d52017-04-20 14:47:37 +0530591 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530592 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
593 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
594
595 kgsl_snapshot_indexed_registers(device, snapshot,
596 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
597 0, 0x2060);
598
Lynus Vazb8e43d52017-04-20 14:47:37 +0530599 /*
600 * Data at offset 0x2000 in the mempool section is the mempool size.
601 * Since we set it to 0, patch in the original size so that the data
602 * is consistent.
603 */
604 if (buf < snapshot->ptr) {
605 unsigned int *data;
606
607 /* Skip over the headers */
608 buf += sizeof(struct kgsl_snapshot_section_header) +
609 sizeof(struct kgsl_snapshot_indexed_regs);
610
611 data = (unsigned int *)buf + 0x2000;
612 *data = pool_size;
613 }
614
Lynus Vaza5922742017-03-14 18:50:54 +0530615 /* Restore the saved mempool size */
616 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
617}
618
Lynus Vaz461e2382017-01-16 19:35:41 +0530619static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
620 unsigned int regbase, unsigned int reg)
621{
622 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
623 reg - regbase / 4;
624 unsigned int val;
625
626 kgsl_regread(device, read_reg, &val);
627 return val;
628}
629
Lynus Vaz1e258612017-04-27 21:35:22 +0530630static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
631 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530632{
633 struct kgsl_snapshot_mvc_regs *header =
634 (struct kgsl_snapshot_mvc_regs *)buf;
635 struct a6xx_cluster_dbgahb_regs_info *info =
636 (struct a6xx_cluster_dbgahb_regs_info *)priv;
637 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
638 unsigned int read_sel;
639 unsigned int data_size = 0;
640 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
641 int i, j;
642
643 if (remain < sizeof(*header)) {
644 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
645 return 0;
646 }
647
648 remain -= sizeof(*header);
649
650 header->ctxt_id = info->ctxt_id;
651 header->cluster_id = cur_cluster->id;
652
653 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
654 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
655
656 for (i = 0; i < cur_cluster->num_sets; i++) {
657 unsigned int start = cur_cluster->regs[2 * i];
658 unsigned int end = cur_cluster->regs[2 * i + 1];
659
660 if (remain < (end - start + 3) * 4) {
661 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
662 goto out;
663 }
664
665 remain -= (end - start + 3) * 4;
666 data_size += (end - start + 3) * 4;
667
668 *data++ = start | (1 << 31);
669 *data++ = end;
670
671 for (j = start; j <= end; j++) {
672 unsigned int val;
673
674 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
675 *data++ = val;
676
677 }
678 }
679
680out:
681 return data_size + sizeof(*header);
682}
683
Lynus Vaz1e258612017-04-27 21:35:22 +0530684static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
685 size_t remain, void *priv)
686{
687 struct kgsl_snapshot_mvc_regs *header =
688 (struct kgsl_snapshot_mvc_regs *)buf;
689 struct a6xx_cluster_dbgahb_regs_info *info =
690 (struct a6xx_cluster_dbgahb_regs_info *)priv;
691 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
692 unsigned int data_size = 0;
693 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
694 int i, j;
695 unsigned int *src;
696
697
698 if (crash_dump_valid == false)
699 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
700 info);
701
702 if (remain < sizeof(*header)) {
703 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
704 return 0;
705 }
706
707 remain -= sizeof(*header);
708
709 header->ctxt_id = info->ctxt_id;
710 header->cluster_id = cluster->id;
711
712 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
713 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
714
715 for (i = 0; i < cluster->num_sets; i++) {
716 unsigned int start;
717 unsigned int end;
718
719 start = cluster->regs[2 * i];
720 end = cluster->regs[2 * i + 1];
721
722 if (remain < (end - start + 3) * 4) {
723 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
724 goto out;
725 }
726
727 remain -= (end - start + 3) * 4;
728 data_size += (end - start + 3) * 4;
729
730 *data++ = start | (1 << 31);
731 *data++ = end;
732 for (j = start; j <= end; j++)
733 *data++ = *src++;
734 }
735out:
736 return data_size + sizeof(*header);
737}
738
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600739static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
740 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530741{
742 struct kgsl_snapshot_regs *header =
743 (struct kgsl_snapshot_regs *)buf;
744 struct a6xx_non_ctx_dbgahb_registers *regs =
745 (struct a6xx_non_ctx_dbgahb_registers *)priv;
746 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
747 int count = 0;
748 unsigned int read_sel;
749 int i, j;
750
751 /* Figure out how many registers we are going to dump */
752 for (i = 0; i < regs->num_sets; i++) {
753 int start = regs->regs[i * 2];
754 int end = regs->regs[i * 2 + 1];
755
756 count += (end - start + 1);
757 }
758
759 if (remain < (count * 8) + sizeof(*header)) {
760 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
761 return 0;
762 }
763
764 header->count = count;
765
766 read_sel = (regs->statetype & 0xff) << 8;
767 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
768
769 for (i = 0; i < regs->num_sets; i++) {
770 unsigned int start = regs->regs[2 * i];
771 unsigned int end = regs->regs[2 * i + 1];
772
773 for (j = start; j <= end; j++) {
774 unsigned int val;
775
776 val = a6xx_read_dbgahb(device, regs->regbase, j);
777 *data++ = j;
778 *data++ = val;
779
780 }
781 }
782 return (count * 8) + sizeof(*header);
783}
784
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600785static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
786 size_t remain, void *priv)
787{
788 struct kgsl_snapshot_regs *header =
789 (struct kgsl_snapshot_regs *)buf;
790 struct a6xx_non_ctx_dbgahb_registers *regs =
791 (struct a6xx_non_ctx_dbgahb_registers *)priv;
792 unsigned int count = 0;
793 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
794 unsigned int i, k;
795 unsigned int *src;
796
797 if (crash_dump_valid == false)
798 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
799 regs);
800
801 if (remain < sizeof(*header)) {
802 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
803 return 0;
804 }
805
806 remain -= sizeof(*header);
807
808 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
809
810 for (i = 0; i < regs->num_sets; i++) {
811 unsigned int start;
812 unsigned int end;
813
814 start = regs->regs[2 * i];
815 end = regs->regs[(2 * i) + 1];
816
817 if (remain < (end - start + 1) * 8) {
818 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
819 goto out;
820 }
821
822 remain -= ((end - start) + 1) * 8;
823
824 for (k = start; k <= end; k++, count++) {
825 *data++ = k;
826 *data++ = *src++;
827 }
828 }
829out:
830 header->count = count;
831
832 /* Return the size of the section */
833 return (count * 8) + sizeof(*header);
834}
835
Lynus Vaz461e2382017-01-16 19:35:41 +0530836static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
837 struct kgsl_snapshot *snapshot)
838{
839 int i, j;
840
841 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
842 struct a6xx_cluster_dbgahb_registers *cluster =
843 &a6xx_dbgahb_ctx_clusters[i];
844 struct a6xx_cluster_dbgahb_regs_info info;
845
846 info.cluster = cluster;
847 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
848 info.ctxt_id = j;
849
850 kgsl_snapshot_add_section(device,
851 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
852 a6xx_snapshot_cluster_dbgahb, &info);
853 }
854 }
855
856 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
857 kgsl_snapshot_add_section(device,
858 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
859 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
860 }
861}
862
Shrenuj Bansal41665402016-12-16 15:25:54 -0800863static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
864 size_t remain, void *priv)
865{
866 struct kgsl_snapshot_mvc_regs *header =
867 (struct kgsl_snapshot_mvc_regs *)buf;
868 struct a6xx_cluster_regs_info *info =
869 (struct a6xx_cluster_regs_info *)priv;
870 struct a6xx_cluster_registers *cur_cluster = info->cluster;
871 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
872 unsigned int ctxt = info->ctxt_id;
873 unsigned int start, end, i, j, aperture_cntl = 0;
874 unsigned int data_size = 0;
875
876 if (remain < sizeof(*header)) {
877 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
878 return 0;
879 }
880
881 remain -= sizeof(*header);
882
883 header->ctxt_id = info->ctxt_id;
884 header->cluster_id = cur_cluster->id;
885
886 /*
887 * Set the AHB control for the Host to read from the
888 * cluster/context for this iteration.
889 */
890 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
891 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
892
893 for (i = 0; i < cur_cluster->num_sets; i++) {
894 start = cur_cluster->regs[2 * i];
895 end = cur_cluster->regs[2 * i + 1];
896
897 if (remain < (end - start + 3) * 4) {
898 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
899 goto out;
900 }
901
902 remain -= (end - start + 3) * 4;
903 data_size += (end - start + 3) * 4;
904
905 *data++ = start | (1 << 31);
906 *data++ = end;
907 for (j = start; j <= end; j++) {
908 unsigned int val;
909
910 kgsl_regread(device, j, &val);
911 *data++ = val;
912 }
913 }
914out:
915 return data_size + sizeof(*header);
916}
917
918static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
919 size_t remain, void *priv)
920{
921 struct kgsl_snapshot_mvc_regs *header =
922 (struct kgsl_snapshot_mvc_regs *)buf;
923 struct a6xx_cluster_regs_info *info =
924 (struct a6xx_cluster_regs_info *)priv;
925 struct a6xx_cluster_registers *cluster = info->cluster;
926 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
927 unsigned int *src;
928 int i, j;
929 unsigned int start, end;
930 size_t data_size = 0;
931
932 if (crash_dump_valid == false)
933 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
934
935 if (remain < sizeof(*header)) {
936 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
937 return 0;
938 }
939
940 remain -= sizeof(*header);
941
942 header->ctxt_id = info->ctxt_id;
943 header->cluster_id = cluster->id;
944
945 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
946 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
947
948 for (i = 0; i < cluster->num_sets; i++) {
949 start = cluster->regs[2 * i];
950 end = cluster->regs[2 * i + 1];
951
952 if (remain < (end - start + 3) * 4) {
953 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
954 goto out;
955 }
956
957 remain -= (end - start + 3) * 4;
958 data_size += (end - start + 3) * 4;
959
960 *data++ = start | (1 << 31);
961 *data++ = end;
962 for (j = start; j <= end; j++)
963 *data++ = *src++;
964 }
965
966out:
967 return data_size + sizeof(*header);
968
969}
970
971static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
972 struct kgsl_snapshot *snapshot)
973{
974 int i, j;
975 struct a6xx_cluster_regs_info info;
976
977 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
978 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
979
980 info.cluster = cluster;
981 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
982 info.ctxt_id = j;
983
984 kgsl_snapshot_add_section(device,
985 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
986 a6xx_snapshot_mvc, &info);
987 }
988 }
989}
990
Lynus Vaz20c81272017-02-10 16:22:12 +0530991/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
992static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
993 unsigned int block_id, unsigned int index, unsigned int *val)
994{
995 unsigned int reg;
996
997 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
998 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
999
1000 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1001 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1002 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1003 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1004
1005 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1006 val++;
1007 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1008}
1009
1010/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
1011static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1012 u8 *buf, size_t remain, void *priv)
1013{
Lynus Vazecd472c2017-04-18 14:15:57 +05301014 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301015 struct kgsl_snapshot_debugbus *header =
1016 (struct kgsl_snapshot_debugbus *)buf;
1017 struct adreno_debugbus_block *block = priv;
1018 int i;
1019 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1020 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301021 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301022 size_t size;
1023
1024 dwords = block->dwords;
1025
1026 /* For a6xx each debug bus data unit is 2 DWORDS */
1027 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1028
1029 if (remain < size) {
1030 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1031 return 0;
1032 }
1033
1034 header->id = block->block_id;
1035 header->count = dwords * 2;
1036
Lynus Vazecd472c2017-04-18 14:15:57 +05301037 block_id = block->block_id;
1038 /* GMU_GX data is read using the GMU_CX block id on A630 */
1039 if (adreno_is_a630(adreno_dev) &&
1040 (block_id == A6XX_DBGBUS_GMU_GX))
1041 block_id = A6XX_DBGBUS_GMU_CX;
1042
Lynus Vaz20c81272017-02-10 16:22:12 +05301043 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301044 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301045
1046 return size;
1047}
1048
Lynus Vazff24c972017-03-07 19:27:46 +05301049static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1050{
1051 void __iomem *reg;
1052
1053 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1054 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1055 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1056 return;
1057
1058 reg = a6xx_cx_dbgc +
1059 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1060 *value = __raw_readl(reg);
1061
1062 /*
1063 * ensure this read finishes before the next one.
1064 * i.e. act like normal readl()
1065 */
1066 rmb();
1067}
1068
1069static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1070{
1071 void __iomem *reg;
1072
1073 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1074 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1075 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1076 return;
1077
1078 reg = a6xx_cx_dbgc +
1079 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1080
1081 /*
1082 * ensure previous writes post before this one,
1083 * i.e. act like normal writel()
1084 */
1085 wmb();
1086 __raw_writel(value, reg);
1087}
1088
1089/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1090static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1091 unsigned int block_id, unsigned int index, unsigned int *val)
1092{
1093 unsigned int reg;
1094
1095 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1096 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1097
1098 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1099 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1100 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1101 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1102
1103 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1104 val++;
1105 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1106}
1107
1108/*
1109 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1110 * block from the CX DBGC block
1111 */
1112static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1113 u8 *buf, size_t remain, void *priv)
1114{
1115 struct kgsl_snapshot_debugbus *header =
1116 (struct kgsl_snapshot_debugbus *)buf;
1117 struct adreno_debugbus_block *block = priv;
1118 int i;
1119 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1120 unsigned int dwords;
1121 size_t size;
1122
1123 dwords = block->dwords;
1124
1125 /* For a6xx each debug bus data unit is 2 DWRODS */
1126 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1127
1128 if (remain < size) {
1129 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1130 return 0;
1131 }
1132
1133 header->id = block->block_id;
1134 header->count = dwords * 2;
1135
1136 for (i = 0; i < dwords; i++)
1137 a6xx_cx_debug_bus_read(device, block->block_id, i,
1138 &data[i*2]);
1139
1140 return size;
1141}
1142
Lynus Vaz20c81272017-02-10 16:22:12 +05301143/* a6xx_snapshot_debugbus() - Capture debug bus data */
1144static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1145 struct kgsl_snapshot *snapshot)
1146{
1147 int i;
1148
1149 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1150 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
1151 (0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1152 (0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
1153
1154 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1155 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1156
1157 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1158 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1159 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1160 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1161
1162 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1163 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1164 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1165 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1166 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1167 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1168 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1169 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1170 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1171 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1172 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1173 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1174 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1175 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1176 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1177 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1178 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1179 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1180
1181 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1182 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1183 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1184 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1185
Lynus Vazff24c972017-03-07 19:27:46 +05301186 a6xx_cx_dbgc = ioremap(device->reg_phys +
1187 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1188 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1189 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1190
1191 if (a6xx_cx_dbgc) {
1192 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1193 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
1194 (0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1195 (0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
1196
1197 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1198 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1199
1200 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1201 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1202 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1203 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1204
1205 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1206 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1207 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1208 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1209 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1210 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1211 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1212 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1213 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1214 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1215 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1216 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1217 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1218 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1219 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1220 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1221 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1222 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1223
1224 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1225 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1226 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1227 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1228 } else
1229 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1230
Lynus Vaz20c81272017-02-10 16:22:12 +05301231 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1232 kgsl_snapshot_add_section(device,
1233 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1234 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1235 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1236 }
Lynus Vazff24c972017-03-07 19:27:46 +05301237
1238 if (a6xx_cx_dbgc) {
1239 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1240 kgsl_snapshot_add_section(device,
1241 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1242 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1243 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1244 }
1245 iounmap(a6xx_cx_dbgc);
1246 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301247}
1248
Kyle Piefer60733aa2017-03-21 11:24:01 -07001249static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
1250 u8 *buf, size_t remain, void *priv)
1251{
1252 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
1253 struct kgsl_snapshot_registers *regs = priv;
1254 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1255 int count = 0, j, k;
1256
1257 /* Figure out how many registers we are going to dump */
1258 for (j = 0; j < regs->count; j++) {
1259 int start = regs->regs[j * 2];
1260 int end = regs->regs[j * 2 + 1];
1261
1262 count += (end - start + 1);
1263 }
1264
1265 if (remain < (count * 8) + sizeof(*header)) {
1266 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
1267 return 0;
1268 }
1269
1270 for (j = 0; j < regs->count; j++) {
1271 unsigned int start = regs->regs[j * 2];
1272 unsigned int end = regs->regs[j * 2 + 1];
1273
1274 for (k = start; k <= end; k++) {
1275 unsigned int val;
1276
1277 kgsl_gmu_regread(device, k, &val);
1278 *data++ = k;
1279 *data++ = val;
1280 }
1281 }
1282
1283 header->count = count;
1284
1285 /* Return the size of the section */
1286 return (count * 8) + sizeof(*header);
1287}
1288
1289static void a6xx_snapshot_gmu(struct kgsl_device *device,
1290 struct kgsl_snapshot *snapshot)
1291{
1292 struct kgsl_snapshot_registers gmu_regs = {
1293 .regs = a6xx_gmu_registers,
1294 .count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
1295 };
1296
1297 if (!kgsl_gmu_isenabled(device))
1298 return;
1299
1300 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1301 snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
1302}
1303
Lynus Vaz85150052017-02-21 17:57:48 +05301304/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1305static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1306 size_t remain, void *priv)
1307{
1308 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1309 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1310 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1311 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1312
1313 if (remain < DEBUG_SECTION_SZ(1)) {
1314 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1315 return 0;
1316 }
1317
1318 /* Dump the SQE firmware version */
1319 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1320 header->size = 1;
1321 *data = fw->version;
1322
1323 return DEBUG_SECTION_SZ(1);
1324}
1325
Shrenuj Bansal41665402016-12-16 15:25:54 -08001326static void _a6xx_do_crashdump(struct kgsl_device *device)
1327{
1328 unsigned long wait_time;
1329 unsigned int reg = 0;
1330 unsigned int val;
1331
1332 crash_dump_valid = false;
1333
1334 if (a6xx_capturescript.gpuaddr == 0 ||
1335 a6xx_crashdump_registers.gpuaddr == 0)
1336 return;
1337
1338 /* IF the SMMU is stalled we cannot do a crash dump */
1339 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1340 if (val & BIT(24))
1341 return;
1342
1343 /* Turn on APRIV so we can access the buffers */
1344 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1345
1346 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1347 lower_32_bits(a6xx_capturescript.gpuaddr));
1348 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1349 upper_32_bits(a6xx_capturescript.gpuaddr));
1350 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1351
1352 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1353 while (!time_after(jiffies, wait_time)) {
1354 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1355 if (reg & 0x2)
1356 break;
1357 cpu_relax();
1358 }
1359
1360 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1361
1362 if (!(reg & 0x2)) {
1363 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1364 return;
1365 }
1366
1367 crash_dump_valid = true;
1368}
1369
1370/*
1371 * a6xx_snapshot() - A6XX GPU snapshot function
1372 * @adreno_dev: Device being snapshotted
1373 * @snapshot: Pointer to the snapshot instance
1374 *
1375 * This is where all of the A6XX specific bits and pieces are grabbed
1376 * into the snapshot memory
1377 */
1378void a6xx_snapshot(struct adreno_device *adreno_dev,
1379 struct kgsl_snapshot *snapshot)
1380{
1381 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1382 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1383 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
1384
1385 /* Try to run the crash dumper */
1386 _a6xx_do_crashdump(device);
1387
1388 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1389 snapshot, a6xx_snapshot_registers, NULL);
1390
1391 adreno_snapshot_vbif_registers(device, snapshot,
1392 a6xx_vbif_snapshot_registers,
1393 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1394
1395 /* CP_SQE indexed registers */
1396 kgsl_snapshot_indexed_registers(device, snapshot,
1397 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1398 0, snap_data->sect_sizes->cp_pfp);
1399
1400 /* CP_DRAW_STATE */
1401 kgsl_snapshot_indexed_registers(device, snapshot,
1402 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1403 0, 0x100);
1404
1405 /* SQE_UCODE Cache */
1406 kgsl_snapshot_indexed_registers(device, snapshot,
1407 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1408 0, 0x6000);
1409
1410 /* CP ROQ */
1411 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1412 snapshot, adreno_snapshot_cp_roq,
1413 &snap_data->sect_sizes->roq);
1414
Lynus Vaz85150052017-02-21 17:57:48 +05301415 /* SQE Firmware */
1416 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1417 snapshot, a6xx_snapshot_sqe, NULL);
1418
Lynus Vaza5922742017-03-14 18:50:54 +05301419 /* Mempool debug data */
1420 a6xx_snapshot_mempool(device, snapshot);
1421
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301422 /* Shader memory */
1423 a6xx_snapshot_shader(device, snapshot);
1424
Shrenuj Bansal41665402016-12-16 15:25:54 -08001425 /* MVC register section */
1426 a6xx_snapshot_mvc_regs(device, snapshot);
1427
Lynus Vaz461e2382017-01-16 19:35:41 +05301428 /* registers dumped through DBG AHB */
1429 a6xx_snapshot_dbgahb_regs(device, snapshot);
1430
Lynus Vaz20c81272017-02-10 16:22:12 +05301431 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001432
1433 /* GMU TCM data dumped through AHB */
1434 a6xx_snapshot_gmu(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001435}
1436
1437static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1438{
1439 int qwords = 0;
1440 unsigned int i, j, k;
1441 unsigned int count;
1442
1443 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1444 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1445
1446 cluster->offset0 = *offset;
1447 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1448
1449 if (j == 1)
1450 cluster->offset1 = *offset;
1451
1452 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1453 ptr[qwords++] =
1454 ((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
1455 (1 << 21) | 1;
1456
1457 for (k = 0; k < cluster->num_sets; k++) {
1458 count = REG_PAIR_COUNT(cluster->regs, k);
1459 ptr[qwords++] =
1460 a6xx_crashdump_registers.gpuaddr + *offset;
1461 ptr[qwords++] =
1462 (((uint64_t)cluster->regs[2 * k]) << 44) |
1463 count;
1464
1465 *offset += count * sizeof(unsigned int);
1466 }
1467 }
1468 }
1469
1470 return qwords;
1471}
1472
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301473static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1474 uint64_t *ptr, uint64_t *offset)
1475{
1476 int qwords = 0;
1477 unsigned int j;
1478
1479 /* Capture each bank in the block */
1480 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1481 /* Program the aperture */
1482 ptr[qwords++] =
1483 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1484 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1485 (1 << 21) | 1;
1486
1487 /* Read all the data in one chunk */
1488 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1489 ptr[qwords++] =
1490 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1491 block->sz;
1492
1493 /* Remember the offset of the first bank for easy access */
1494 if (j == 0)
1495 block->offset = *offset;
1496
1497 *offset += block->sz * sizeof(unsigned int);
1498 }
1499
1500 return qwords;
1501}
1502
Lynus Vaz1e258612017-04-27 21:35:22 +05301503static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1504{
1505 int qwords = 0;
1506 unsigned int i, j, k;
1507 unsigned int count;
1508
1509 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1510 struct a6xx_cluster_dbgahb_registers *cluster =
1511 &a6xx_dbgahb_ctx_clusters[i];
1512
1513 cluster->offset0 = *offset;
1514
1515 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1516 if (j == 1)
1517 cluster->offset1 = *offset;
1518
1519 /* Program the aperture */
1520 ptr[qwords++] =
1521 ((cluster->statetype + j * 2) & 0xff) << 8;
1522 ptr[qwords++] =
1523 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1524 (1 << 21) | 1;
1525
1526 for (k = 0; k < cluster->num_sets; k++) {
1527 unsigned int start = cluster->regs[2 * k];
1528
1529 count = REG_PAIR_COUNT(cluster->regs, k);
1530 ptr[qwords++] =
1531 a6xx_crashdump_registers.gpuaddr + *offset;
1532 ptr[qwords++] =
1533 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1534 start - cluster->regbase / 4) << 44)) |
1535 count;
1536
1537 *offset += count * sizeof(unsigned int);
1538 }
1539 }
1540 }
1541 return qwords;
1542}
1543
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001544static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1545{
1546 int qwords = 0;
1547 unsigned int i, k;
1548 unsigned int count;
1549
1550 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1551 struct a6xx_non_ctx_dbgahb_registers *regs =
1552 &a6xx_non_ctx_dbgahb[i];
1553
1554 regs->offset = *offset;
1555
1556 /* Program the aperture */
1557 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1558 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1559 (1 << 21) | 1;
1560
1561 for (k = 0; k < regs->num_sets; k++) {
1562 unsigned int start = regs->regs[2 * k];
1563
1564 count = REG_PAIR_COUNT(regs->regs, k);
1565 ptr[qwords++] =
1566 a6xx_crashdump_registers.gpuaddr + *offset;
1567 ptr[qwords++] =
1568 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1569 start - regs->regbase / 4) << 44)) |
1570 count;
1571
1572 *offset += count * sizeof(unsigned int);
1573 }
1574 }
1575 return qwords;
1576}
1577
Shrenuj Bansal41665402016-12-16 15:25:54 -08001578void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1579{
1580 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1581 unsigned int script_size = 0;
1582 unsigned int data_size = 0;
1583 unsigned int i, j, k;
1584 uint64_t *ptr;
1585 uint64_t offset = 0;
1586
1587 if (a6xx_capturescript.gpuaddr != 0 &&
1588 a6xx_crashdump_registers.gpuaddr != 0)
1589 return;
1590
1591 /*
1592 * We need to allocate two buffers:
1593 * 1 - the buffer to hold the draw script
1594 * 2 - the buffer to hold the data
1595 */
1596
1597 /*
1598 * To save the registers, we need 16 bytes per register pair for the
1599 * script and a dword for each register in the data
1600 */
1601 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1602 struct cdregs *regs = &_a6xx_cd_registers[i];
1603
1604 /* Each pair needs 16 bytes (2 qwords) */
1605 script_size += (regs->size / 2) * 16;
1606
1607 /* Each register needs a dword in the data */
1608 for (j = 0; j < regs->size / 2; j++)
1609 data_size += REG_PAIR_COUNT(regs->regs, j) *
1610 sizeof(unsigned int);
1611
1612 }
1613
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301614 /*
1615 * To save the shader blocks for each block in each type we need 32
1616 * bytes for the script (16 bytes to program the aperture and 16 to
1617 * read the data) and then a block specific number of bytes to hold
1618 * the data
1619 */
1620 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1621 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1622 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1623 A6XX_NUM_SHADER_BANKS;
1624 }
1625
Shrenuj Bansal41665402016-12-16 15:25:54 -08001626 /* Calculate the script and data size for MVC registers */
1627 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1628 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1629
1630 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1631
1632 /* 16 bytes for programming the aperture */
1633 script_size += 16;
1634
1635 /* Reading each pair of registers takes 16 bytes */
1636 script_size += 16 * cluster->num_sets;
1637
1638 /* A dword per register read from the cluster list */
1639 for (k = 0; k < cluster->num_sets; k++)
1640 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1641 sizeof(unsigned int);
1642 }
1643 }
1644
Lynus Vaz1e258612017-04-27 21:35:22 +05301645 /* Calculate the script and data size for debug AHB registers */
1646 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1647 struct a6xx_cluster_dbgahb_registers *cluster =
1648 &a6xx_dbgahb_ctx_clusters[i];
1649
1650 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1651
1652 /* 16 bytes for programming the aperture */
1653 script_size += 16;
1654
1655 /* Reading each pair of registers takes 16 bytes */
1656 script_size += 16 * cluster->num_sets;
1657
1658 /* A dword per register read from the cluster list */
1659 for (k = 0; k < cluster->num_sets; k++)
1660 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1661 sizeof(unsigned int);
1662 }
1663 }
1664
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001665 /*
1666 * Calculate the script and data size for non context debug
1667 * AHB registers
1668 */
1669 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1670 struct a6xx_non_ctx_dbgahb_registers *regs =
1671 &a6xx_non_ctx_dbgahb[i];
1672
1673 /* 16 bytes for programming the aperture */
1674 script_size += 16;
1675
1676 /* Reading each pair of registers takes 16 bytes */
1677 script_size += 16 * regs->num_sets;
1678
1679 /* A dword per register read from the cluster list */
1680 for (k = 0; k < regs->num_sets; k++)
1681 data_size += REG_PAIR_COUNT(regs->regs, k) *
1682 sizeof(unsigned int);
1683 }
1684
Shrenuj Bansal41665402016-12-16 15:25:54 -08001685 /* Now allocate the script and data buffers */
1686
1687 /* The script buffers needs 2 extra qwords on the end */
1688 if (kgsl_allocate_global(device, &a6xx_capturescript,
1689 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1690 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1691 return;
1692
1693 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1694 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1695 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1696 return;
1697 }
1698
1699 /* Build the crash script */
1700
1701 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1702
1703 /* For the registers, program a read command for each pair */
1704 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1705 struct cdregs *regs = &_a6xx_cd_registers[i];
1706
1707 for (j = 0; j < regs->size / 2; j++) {
1708 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1709 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1710 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1711 offset += r * sizeof(unsigned int);
1712 }
1713 }
1714
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301715 /* Program each shader block */
1716 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1717 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1718 &offset);
1719 }
1720
Shrenuj Bansal41665402016-12-16 15:25:54 -08001721 /* Program the capturescript for the MVC regsiters */
1722 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1723
Lynus Vaz1e258612017-04-27 21:35:22 +05301724 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1725
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001726 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1727
Shrenuj Bansal41665402016-12-16 15:25:54 -08001728 *ptr++ = 0;
1729 *ptr++ = 0;
1730}