blob: 63dbde0960f842ee7ff3399e8c9a6e83eac8960c [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
24
25static const unsigned int a6xx_gras_cluster[] = {
26 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
27 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
28 0x8400, 0x840B,
29};
30
31static const unsigned int a6xx_ps_cluster[] = {
32 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
33 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
34 0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
35 0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
36 0x9218, 0x9236, 0x9300, 0x9306,
37};
38
39static const unsigned int a6xx_fe_cluster[] = {
40 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
41 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
42};
43
44static const unsigned int a6xx_pc_vs_cluster[] = {
45 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
46};
47
48static struct a6xx_cluster_registers {
49 unsigned int id;
50 const unsigned int *regs;
51 unsigned int num_sets;
52 unsigned int offset0;
53 unsigned int offset1;
54} a6xx_clusters[] = {
55 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
56 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
57 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
58 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
59 ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
60};
61
62struct a6xx_cluster_regs_info {
63 struct a6xx_cluster_registers *cluster;
64 unsigned int ctxt_id;
65};
66
Lynus Vaz461e2382017-01-16 19:35:41 +053067static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
68 0xB800, 0xB803, 0xB820, 0xB822,
69};
70
71static const unsigned int a6xx_sp_vs_sp_cluster[] = {
72 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
73 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
74};
75
76static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
77 0xBB10, 0xBB11, 0xBB20, 0xBB29,
78};
79
80static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
81 0xBD80, 0xBD80,
82};
83
84static const unsigned int a6xx_sp_duplicate_cluster[] = {
85 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
86};
87
88static const unsigned int a6xx_tp_duplicate_cluster[] = {
89 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
90};
91
92static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
93 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
94 0xB9C0, 0xB9C9,
95};
96
97static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
98 0xBD80, 0xBD80,
99};
100
101static const unsigned int a6xx_sp_ps_sp_cluster[] = {
102 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
103 0xAA00, 0xAA00, 0xAA30, 0xAA31,
104};
105
106static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
107 0xACC0, 0xACC0,
108};
109
110static const unsigned int a6xx_sp_ps_tp_cluster[] = {
111 0xB180, 0xB183, 0xB190, 0xB191,
112};
113
114static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
115 0xB4C0, 0xB4D1,
116};
117
118static struct a6xx_cluster_dbgahb_registers {
119 unsigned int id;
120 unsigned int regbase;
121 unsigned int statetype;
122 const unsigned int *regs;
123 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530124 unsigned int offset0;
125 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530126} a6xx_dbgahb_ctx_clusters[] = {
127 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
128 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
129 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
130 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700131 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530132 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
133 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
134 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700135 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530136 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700137 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530138 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700139 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530140 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700141 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530142 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700143 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530144 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700145 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530146 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700147 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530148 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700149 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530150 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700151 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530152 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700153 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530154 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700155 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530156 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
157};
158
159struct a6xx_cluster_dbgahb_regs_info {
160 struct a6xx_cluster_dbgahb_registers *cluster;
161 unsigned int ctxt_id;
162};
163
164static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
165 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
166 0xBE20, 0xBE23,
167};
168
169static const unsigned int a6xx_sp_non_ctx_registers[] = {
170 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
171 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
172};
173
174static const unsigned int a6xx_tp_non_ctx_registers[] = {
175 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
176};
177
178static struct a6xx_non_ctx_dbgahb_registers {
179 unsigned int regbase;
180 unsigned int statetype;
181 const unsigned int *regs;
182 unsigned int num_sets;
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600183 unsigned int offset;
Lynus Vaz461e2382017-01-16 19:35:41 +0530184} a6xx_non_ctx_dbgahb[] = {
185 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
186 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
187 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
188 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
189 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
190 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
191};
192
Shrenuj Bansal41665402016-12-16 15:25:54 -0800193static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
194 /* VBIF */
195 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
196 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
197 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
198 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
199 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
200 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
201 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
202 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
203 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
204 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
205 0x3410, 0x3410, 0x3800, 0x3801,
206};
207
Kyle Piefer60733aa2017-03-21 11:24:01 -0700208static const unsigned int a6xx_gmu_registers[] = {
209 /* GMU */
210 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
211};
212
Shrenuj Bansal41665402016-12-16 15:25:54 -0800213static const struct adreno_vbif_snapshot_registers
214a6xx_vbif_snapshot_registers[] = {
215 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
216 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
217};
218
219/*
220 * Set of registers to dump for A6XX on snapshot.
221 * Registers in pairs - first value is the start offset, second
222 * is the stop offset (inclusive)
223 */
224
225static const unsigned int a6xx_registers[] = {
226 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530227 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
228 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
229 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
230 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213, 0x0218, 0x023D,
231 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511,
232 0x0533, 0x0533, 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800233 /* CP */
234 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
235 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
236 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
237 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
238 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
239 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
240 /* VSC */
241 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
242 /* UCHE */
243 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
244 0x0E38, 0x0E39,
245 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530246 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
247 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800248 /* RB */
249 0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
250 0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
251 0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
252 /* VPC */
253 0x9600, 0x9604, 0x9624, 0x9637,
254 /* PC */
255 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
256 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
257 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
258 /* VFD */
259 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530260 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800261};
262
Lynus Vaz20c81272017-02-10 16:22:12 +0530263enum a6xx_debugbus_id {
264 A6XX_DBGBUS_CP = 0x1,
265 A6XX_DBGBUS_RBBM = 0x2,
266 A6XX_DBGBUS_VBIF = 0x3,
267 A6XX_DBGBUS_HLSQ = 0x4,
268 A6XX_DBGBUS_UCHE = 0x5,
269 A6XX_DBGBUS_DPM = 0x6,
270 A6XX_DBGBUS_TESS = 0x7,
271 A6XX_DBGBUS_PC = 0x8,
272 A6XX_DBGBUS_VFDP = 0x9,
273 A6XX_DBGBUS_VPC = 0xa,
274 A6XX_DBGBUS_TSE = 0xb,
275 A6XX_DBGBUS_RAS = 0xc,
276 A6XX_DBGBUS_VSC = 0xd,
277 A6XX_DBGBUS_COM = 0xe,
278 A6XX_DBGBUS_LRZ = 0x10,
279 A6XX_DBGBUS_A2D = 0x11,
280 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530281 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530282 A6XX_DBGBUS_RBP = 0x14,
283 A6XX_DBGBUS_DCS = 0x15,
284 A6XX_DBGBUS_RBBM_CFG = 0x16,
285 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530286 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530287 A6XX_DBGBUS_TPFCHE = 0x19,
288 A6XX_DBGBUS_GPC = 0x1d,
289 A6XX_DBGBUS_LARC = 0x1e,
290 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
291 A6XX_DBGBUS_RB_0 = 0x20,
292 A6XX_DBGBUS_RB_1 = 0x21,
293 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
294 A6XX_DBGBUS_CCU_0 = 0x28,
295 A6XX_DBGBUS_CCU_1 = 0x29,
296 A6XX_DBGBUS_VFD_0 = 0x38,
297 A6XX_DBGBUS_VFD_1 = 0x39,
298 A6XX_DBGBUS_VFD_2 = 0x3a,
299 A6XX_DBGBUS_VFD_3 = 0x3b,
300 A6XX_DBGBUS_SP_0 = 0x40,
301 A6XX_DBGBUS_SP_1 = 0x41,
302 A6XX_DBGBUS_TPL1_0 = 0x48,
303 A6XX_DBGBUS_TPL1_1 = 0x49,
304 A6XX_DBGBUS_TPL1_2 = 0x4a,
305 A6XX_DBGBUS_TPL1_3 = 0x4b,
306};
307
308static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
309 { A6XX_DBGBUS_CP, 0x100, },
310 { A6XX_DBGBUS_RBBM, 0x100, },
311 { A6XX_DBGBUS_HLSQ, 0x100, },
312 { A6XX_DBGBUS_UCHE, 0x100, },
313 { A6XX_DBGBUS_DPM, 0x100, },
314 { A6XX_DBGBUS_TESS, 0x100, },
315 { A6XX_DBGBUS_PC, 0x100, },
316 { A6XX_DBGBUS_VFDP, 0x100, },
317 { A6XX_DBGBUS_VPC, 0x100, },
318 { A6XX_DBGBUS_TSE, 0x100, },
319 { A6XX_DBGBUS_RAS, 0x100, },
320 { A6XX_DBGBUS_VSC, 0x100, },
321 { A6XX_DBGBUS_COM, 0x100, },
322 { A6XX_DBGBUS_LRZ, 0x100, },
323 { A6XX_DBGBUS_A2D, 0x100, },
324 { A6XX_DBGBUS_CCUFCHE, 0x100, },
325 { A6XX_DBGBUS_RBP, 0x100, },
326 { A6XX_DBGBUS_DCS, 0x100, },
327 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530328 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530329 { A6XX_DBGBUS_TPFCHE, 0x100, },
330 { A6XX_DBGBUS_GPC, 0x100, },
331 { A6XX_DBGBUS_LARC, 0x100, },
332 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
333 { A6XX_DBGBUS_RB_0, 0x100, },
334 { A6XX_DBGBUS_RB_1, 0x100, },
335 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
336 { A6XX_DBGBUS_CCU_0, 0x100, },
337 { A6XX_DBGBUS_CCU_1, 0x100, },
338 { A6XX_DBGBUS_VFD_0, 0x100, },
339 { A6XX_DBGBUS_VFD_1, 0x100, },
340 { A6XX_DBGBUS_VFD_2, 0x100, },
341 { A6XX_DBGBUS_VFD_3, 0x100, },
342 { A6XX_DBGBUS_SP_0, 0x100, },
343 { A6XX_DBGBUS_SP_1, 0x100, },
344 { A6XX_DBGBUS_TPL1_0, 0x100, },
345 { A6XX_DBGBUS_TPL1_1, 0x100, },
346 { A6XX_DBGBUS_TPL1_2, 0x100, },
347 { A6XX_DBGBUS_TPL1_3, 0x100, },
348};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800349
Lynus Vazff24c972017-03-07 19:27:46 +0530350static void __iomem *a6xx_cx_dbgc;
351static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
352 { A6XX_DBGBUS_VBIF, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530353 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530354 { A6XX_DBGBUS_CX, 0x100, },
355};
356
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530357#define A6XX_NUM_SHADER_BANKS 3
358#define A6XX_SHADER_STATETYPE_SHIFT 8
359
360enum a6xx_shader_obj {
361 A6XX_TP0_TMO_DATA = 0x9,
362 A6XX_TP0_SMO_DATA = 0xa,
363 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
364 A6XX_TP1_TMO_DATA = 0x19,
365 A6XX_TP1_SMO_DATA = 0x1a,
366 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
367 A6XX_SP_INST_DATA = 0x29,
368 A6XX_SP_LB_0_DATA = 0x2a,
369 A6XX_SP_LB_1_DATA = 0x2b,
370 A6XX_SP_LB_2_DATA = 0x2c,
371 A6XX_SP_LB_3_DATA = 0x2d,
372 A6XX_SP_LB_4_DATA = 0x2e,
373 A6XX_SP_LB_5_DATA = 0x2f,
374 A6XX_SP_CB_BINDLESS_DATA = 0x30,
375 A6XX_SP_CB_LEGACY_DATA = 0x31,
376 A6XX_SP_UAV_DATA = 0x32,
377 A6XX_SP_INST_TAG = 0x33,
378 A6XX_SP_CB_BINDLESS_TAG = 0x34,
379 A6XX_SP_TMO_UMO_TAG = 0x35,
380 A6XX_SP_SMO_TAG = 0x36,
381 A6XX_SP_STATE_DATA = 0x37,
382 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
383 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
384 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
385 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
386 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
387 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
388 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
389 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
390 A6XX_HLSQ_INST_RAM = 0x52,
391 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
392 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
393 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
394 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
395 A6XX_HLSQ_INST_RAM_TAG = 0x57,
396 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
397 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
398 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
399 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
400 A6XX_HLSQ_DATAPATH_META = 0x60,
401 A6XX_HLSQ_FRONTEND_META = 0x61,
402 A6XX_HLSQ_INDIRECT_META = 0x62,
403 A6XX_HLSQ_BACKEND_META = 0x63
404};
405
406struct a6xx_shader_block {
407 unsigned int statetype;
408 unsigned int sz;
409 uint64_t offset;
410};
411
412struct a6xx_shader_block_info {
413 struct a6xx_shader_block *block;
414 unsigned int bank;
415 uint64_t offset;
416};
417
418static struct a6xx_shader_block a6xx_shader_blocks[] = {
419 {A6XX_TP0_TMO_DATA, 0x200},
420 {A6XX_TP0_SMO_DATA, 0x80,},
421 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
422 {A6XX_TP1_TMO_DATA, 0x200},
423 {A6XX_TP1_SMO_DATA, 0x80,},
424 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
425 {A6XX_SP_INST_DATA, 0x800},
426 {A6XX_SP_LB_0_DATA, 0x800},
427 {A6XX_SP_LB_1_DATA, 0x800},
428 {A6XX_SP_LB_2_DATA, 0x800},
429 {A6XX_SP_LB_3_DATA, 0x800},
430 {A6XX_SP_LB_4_DATA, 0x800},
431 {A6XX_SP_LB_5_DATA, 0x200},
432 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
433 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
434 {A6XX_SP_UAV_DATA, 0x80,},
435 {A6XX_SP_INST_TAG, 0x80,},
436 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
437 {A6XX_SP_TMO_UMO_TAG, 0x80,},
438 {A6XX_SP_SMO_TAG, 0x80},
439 {A6XX_SP_STATE_DATA, 0x3F},
440 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
441 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
442 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
443 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
444 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
445 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
446 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
447 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
448 {A6XX_HLSQ_INST_RAM, 0x800},
449 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
450 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
451 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
452 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
453 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
454 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
455 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
456 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
457 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
458 {A6XX_HLSQ_DATAPATH_META, 0x40,},
459 {A6XX_HLSQ_FRONTEND_META, 0x40},
460 {A6XX_HLSQ_INDIRECT_META, 0x40,}
461};
462
Shrenuj Bansal41665402016-12-16 15:25:54 -0800463static struct kgsl_memdesc a6xx_capturescript;
464static struct kgsl_memdesc a6xx_crashdump_registers;
465static bool crash_dump_valid;
466
467static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
468 u8 *buf, size_t remain)
469{
470 struct kgsl_snapshot_registers regs = {
471 .regs = a6xx_registers,
472 .count = ARRAY_SIZE(a6xx_registers) / 2,
473 };
474
475 return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
476}
477
478static struct cdregs {
479 const unsigned int *regs;
480 unsigned int size;
481} _a6xx_cd_registers[] = {
482 { a6xx_registers, ARRAY_SIZE(a6xx_registers) },
483};
484
485#define REG_PAIR_COUNT(_a, _i) \
486 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
487
488static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
489 size_t remain, void *priv)
490{
491 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
492 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
493 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
494 unsigned int i, j, k;
495 unsigned int count = 0;
496
497 if (crash_dump_valid == false)
498 return a6xx_legacy_snapshot_registers(device, buf, remain);
499
500 if (remain < sizeof(*header)) {
501 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
502 return 0;
503 }
504
505 remain -= sizeof(*header);
506
507 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
508 struct cdregs *regs = &_a6xx_cd_registers[i];
509
510 for (j = 0; j < regs->size / 2; j++) {
511 unsigned int start = regs->regs[2 * j];
512 unsigned int end = regs->regs[(2 * j) + 1];
513
514 if (remain < ((end - start) + 1) * 8) {
515 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
516 goto out;
517 }
518
519 remain -= ((end - start) + 1) * 8;
520
521 for (k = start; k <= end; k++, count++) {
522 *data++ = k;
523 *data++ = *src++;
524 }
525 }
526 }
527
528out:
529 header->count = count;
530
531 /* Return the size of the section */
532 return (count * 8) + sizeof(*header);
533}
534
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530535static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
536 u8 *buf, size_t remain, void *priv)
537{
538 struct kgsl_snapshot_shader *header =
539 (struct kgsl_snapshot_shader *) buf;
540 struct a6xx_shader_block_info *info =
541 (struct a6xx_shader_block_info *) priv;
542 struct a6xx_shader_block *block = info->block;
543 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
544
545 if (remain < SHADER_SECTION_SZ(block->sz)) {
546 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
547 return 0;
548 }
549
550 header->type = block->statetype;
551 header->index = info->bank;
552 header->size = block->sz;
553
554 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
555 block->sz);
556
557 return SHADER_SECTION_SZ(block->sz);
558}
559
560static void a6xx_snapshot_shader(struct kgsl_device *device,
561 struct kgsl_snapshot *snapshot)
562{
563 unsigned int i, j;
564 struct a6xx_shader_block_info info;
565
566 /* Shader blocks can only be read by the crash dumper */
567 if (crash_dump_valid == false)
568 return;
569
570 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
571 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
572 info.block = &a6xx_shader_blocks[i];
573 info.bank = j;
574 info.offset = a6xx_shader_blocks[i].offset +
575 (j * a6xx_shader_blocks[i].sz);
576
577 /* Shader working/shadow memory */
578 kgsl_snapshot_add_section(device,
579 KGSL_SNAPSHOT_SECTION_SHADER,
580 snapshot, a6xx_snapshot_shader_memory, &info);
581 }
582 }
583}
584
Lynus Vaza5922742017-03-14 18:50:54 +0530585static void a6xx_snapshot_mempool(struct kgsl_device *device,
586 struct kgsl_snapshot *snapshot)
587{
588 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530589 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530590
Lynus Vazb8e43d52017-04-20 14:47:37 +0530591 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530592 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
593 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
594
595 kgsl_snapshot_indexed_registers(device, snapshot,
596 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
597 0, 0x2060);
598
Lynus Vazb8e43d52017-04-20 14:47:37 +0530599 /*
600 * Data at offset 0x2000 in the mempool section is the mempool size.
601 * Since we set it to 0, patch in the original size so that the data
602 * is consistent.
603 */
604 if (buf < snapshot->ptr) {
605 unsigned int *data;
606
607 /* Skip over the headers */
608 buf += sizeof(struct kgsl_snapshot_section_header) +
609 sizeof(struct kgsl_snapshot_indexed_regs);
610
611 data = (unsigned int *)buf + 0x2000;
612 *data = pool_size;
613 }
614
Lynus Vaza5922742017-03-14 18:50:54 +0530615 /* Restore the saved mempool size */
616 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
617}
618
Lynus Vaz461e2382017-01-16 19:35:41 +0530619static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
620 unsigned int regbase, unsigned int reg)
621{
622 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
623 reg - regbase / 4;
624 unsigned int val;
625
626 kgsl_regread(device, read_reg, &val);
627 return val;
628}
629
Lynus Vaz1e258612017-04-27 21:35:22 +0530630static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
631 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530632{
633 struct kgsl_snapshot_mvc_regs *header =
634 (struct kgsl_snapshot_mvc_regs *)buf;
635 struct a6xx_cluster_dbgahb_regs_info *info =
636 (struct a6xx_cluster_dbgahb_regs_info *)priv;
637 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
638 unsigned int read_sel;
639 unsigned int data_size = 0;
640 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
641 int i, j;
642
643 if (remain < sizeof(*header)) {
644 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
645 return 0;
646 }
647
648 remain -= sizeof(*header);
649
650 header->ctxt_id = info->ctxt_id;
651 header->cluster_id = cur_cluster->id;
652
653 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
654 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
655
656 for (i = 0; i < cur_cluster->num_sets; i++) {
657 unsigned int start = cur_cluster->regs[2 * i];
658 unsigned int end = cur_cluster->regs[2 * i + 1];
659
660 if (remain < (end - start + 3) * 4) {
661 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
662 goto out;
663 }
664
665 remain -= (end - start + 3) * 4;
666 data_size += (end - start + 3) * 4;
667
668 *data++ = start | (1 << 31);
669 *data++ = end;
670
671 for (j = start; j <= end; j++) {
672 unsigned int val;
673
674 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
675 *data++ = val;
676
677 }
678 }
679
680out:
681 return data_size + sizeof(*header);
682}
683
Lynus Vaz1e258612017-04-27 21:35:22 +0530684static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
685 size_t remain, void *priv)
686{
687 struct kgsl_snapshot_mvc_regs *header =
688 (struct kgsl_snapshot_mvc_regs *)buf;
689 struct a6xx_cluster_dbgahb_regs_info *info =
690 (struct a6xx_cluster_dbgahb_regs_info *)priv;
691 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
692 unsigned int data_size = 0;
693 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
694 int i, j;
695 unsigned int *src;
696
697
698 if (crash_dump_valid == false)
699 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
700 info);
701
702 if (remain < sizeof(*header)) {
703 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
704 return 0;
705 }
706
707 remain -= sizeof(*header);
708
709 header->ctxt_id = info->ctxt_id;
710 header->cluster_id = cluster->id;
711
712 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
713 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
714
715 for (i = 0; i < cluster->num_sets; i++) {
716 unsigned int start;
717 unsigned int end;
718
719 start = cluster->regs[2 * i];
720 end = cluster->regs[2 * i + 1];
721
722 if (remain < (end - start + 3) * 4) {
723 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
724 goto out;
725 }
726
727 remain -= (end - start + 3) * 4;
728 data_size += (end - start + 3) * 4;
729
730 *data++ = start | (1 << 31);
731 *data++ = end;
732 for (j = start; j <= end; j++)
733 *data++ = *src++;
734 }
735out:
736 return data_size + sizeof(*header);
737}
738
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600739static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
740 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530741{
742 struct kgsl_snapshot_regs *header =
743 (struct kgsl_snapshot_regs *)buf;
744 struct a6xx_non_ctx_dbgahb_registers *regs =
745 (struct a6xx_non_ctx_dbgahb_registers *)priv;
746 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
747 int count = 0;
748 unsigned int read_sel;
749 int i, j;
750
751 /* Figure out how many registers we are going to dump */
752 for (i = 0; i < regs->num_sets; i++) {
753 int start = regs->regs[i * 2];
754 int end = regs->regs[i * 2 + 1];
755
756 count += (end - start + 1);
757 }
758
759 if (remain < (count * 8) + sizeof(*header)) {
760 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
761 return 0;
762 }
763
764 header->count = count;
765
766 read_sel = (regs->statetype & 0xff) << 8;
767 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
768
769 for (i = 0; i < regs->num_sets; i++) {
770 unsigned int start = regs->regs[2 * i];
771 unsigned int end = regs->regs[2 * i + 1];
772
773 for (j = start; j <= end; j++) {
774 unsigned int val;
775
776 val = a6xx_read_dbgahb(device, regs->regbase, j);
777 *data++ = j;
778 *data++ = val;
779
780 }
781 }
782 return (count * 8) + sizeof(*header);
783}
784
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600785static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
786 size_t remain, void *priv)
787{
788 struct kgsl_snapshot_regs *header =
789 (struct kgsl_snapshot_regs *)buf;
790 struct a6xx_non_ctx_dbgahb_registers *regs =
791 (struct a6xx_non_ctx_dbgahb_registers *)priv;
792 unsigned int count = 0;
793 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
794 unsigned int i, k;
795 unsigned int *src;
796
797 if (crash_dump_valid == false)
798 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
799 regs);
800
801 if (remain < sizeof(*header)) {
802 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
803 return 0;
804 }
805
806 remain -= sizeof(*header);
807
808 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
809
810 for (i = 0; i < regs->num_sets; i++) {
811 unsigned int start;
812 unsigned int end;
813
814 start = regs->regs[2 * i];
815 end = regs->regs[(2 * i) + 1];
816
817 if (remain < (end - start + 1) * 8) {
818 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
819 goto out;
820 }
821
822 remain -= ((end - start) + 1) * 8;
823
824 for (k = start; k <= end; k++, count++) {
825 *data++ = k;
826 *data++ = *src++;
827 }
828 }
829out:
830 header->count = count;
831
832 /* Return the size of the section */
833 return (count * 8) + sizeof(*header);
834}
835
Lynus Vaz461e2382017-01-16 19:35:41 +0530836static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
837 struct kgsl_snapshot *snapshot)
838{
839 int i, j;
840
841 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
842 struct a6xx_cluster_dbgahb_registers *cluster =
843 &a6xx_dbgahb_ctx_clusters[i];
844 struct a6xx_cluster_dbgahb_regs_info info;
845
846 info.cluster = cluster;
847 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
848 info.ctxt_id = j;
849
850 kgsl_snapshot_add_section(device,
851 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
852 a6xx_snapshot_cluster_dbgahb, &info);
853 }
854 }
855
856 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
857 kgsl_snapshot_add_section(device,
858 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
859 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
860 }
861}
862
Shrenuj Bansal41665402016-12-16 15:25:54 -0800863static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
864 size_t remain, void *priv)
865{
866 struct kgsl_snapshot_mvc_regs *header =
867 (struct kgsl_snapshot_mvc_regs *)buf;
868 struct a6xx_cluster_regs_info *info =
869 (struct a6xx_cluster_regs_info *)priv;
870 struct a6xx_cluster_registers *cur_cluster = info->cluster;
871 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
872 unsigned int ctxt = info->ctxt_id;
873 unsigned int start, end, i, j, aperture_cntl = 0;
874 unsigned int data_size = 0;
875
876 if (remain < sizeof(*header)) {
877 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
878 return 0;
879 }
880
881 remain -= sizeof(*header);
882
883 header->ctxt_id = info->ctxt_id;
884 header->cluster_id = cur_cluster->id;
885
886 /*
887 * Set the AHB control for the Host to read from the
888 * cluster/context for this iteration.
889 */
890 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
891 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
892
893 for (i = 0; i < cur_cluster->num_sets; i++) {
894 start = cur_cluster->regs[2 * i];
895 end = cur_cluster->regs[2 * i + 1];
896
897 if (remain < (end - start + 3) * 4) {
898 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
899 goto out;
900 }
901
902 remain -= (end - start + 3) * 4;
903 data_size += (end - start + 3) * 4;
904
905 *data++ = start | (1 << 31);
906 *data++ = end;
907 for (j = start; j <= end; j++) {
908 unsigned int val;
909
910 kgsl_regread(device, j, &val);
911 *data++ = val;
912 }
913 }
914out:
915 return data_size + sizeof(*header);
916}
917
918static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
919 size_t remain, void *priv)
920{
921 struct kgsl_snapshot_mvc_regs *header =
922 (struct kgsl_snapshot_mvc_regs *)buf;
923 struct a6xx_cluster_regs_info *info =
924 (struct a6xx_cluster_regs_info *)priv;
925 struct a6xx_cluster_registers *cluster = info->cluster;
926 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
927 unsigned int *src;
928 int i, j;
929 unsigned int start, end;
930 size_t data_size = 0;
931
932 if (crash_dump_valid == false)
933 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
934
935 if (remain < sizeof(*header)) {
936 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
937 return 0;
938 }
939
940 remain -= sizeof(*header);
941
942 header->ctxt_id = info->ctxt_id;
943 header->cluster_id = cluster->id;
944
945 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
946 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
947
948 for (i = 0; i < cluster->num_sets; i++) {
949 start = cluster->regs[2 * i];
950 end = cluster->regs[2 * i + 1];
951
952 if (remain < (end - start + 3) * 4) {
953 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
954 goto out;
955 }
956
957 remain -= (end - start + 3) * 4;
958 data_size += (end - start + 3) * 4;
959
960 *data++ = start | (1 << 31);
961 *data++ = end;
962 for (j = start; j <= end; j++)
963 *data++ = *src++;
964 }
965
966out:
967 return data_size + sizeof(*header);
968
969}
970
971static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
972 struct kgsl_snapshot *snapshot)
973{
974 int i, j;
975 struct a6xx_cluster_regs_info info;
976
977 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
978 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
979
980 info.cluster = cluster;
981 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
982 info.ctxt_id = j;
983
984 kgsl_snapshot_add_section(device,
985 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
986 a6xx_snapshot_mvc, &info);
987 }
988 }
989}
990
Lynus Vaz20c81272017-02-10 16:22:12 +0530991/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
992static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
993 unsigned int block_id, unsigned int index, unsigned int *val)
994{
995 unsigned int reg;
996
997 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
998 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
999
1000 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1001 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1002 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1003 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1004
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001005 /*
1006 * There needs to be a delay of 1 us to ensure enough time for correct
1007 * data is funneled into the trace buffer
1008 */
1009 udelay(1);
1010
Lynus Vaz20c81272017-02-10 16:22:12 +05301011 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1012 val++;
1013 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1014}
1015
1016/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
1017static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1018 u8 *buf, size_t remain, void *priv)
1019{
Lynus Vazecd472c2017-04-18 14:15:57 +05301020 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301021 struct kgsl_snapshot_debugbus *header =
1022 (struct kgsl_snapshot_debugbus *)buf;
1023 struct adreno_debugbus_block *block = priv;
1024 int i;
1025 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1026 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301027 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301028 size_t size;
1029
1030 dwords = block->dwords;
1031
1032 /* For a6xx each debug bus data unit is 2 DWORDS */
1033 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1034
1035 if (remain < size) {
1036 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1037 return 0;
1038 }
1039
1040 header->id = block->block_id;
1041 header->count = dwords * 2;
1042
Lynus Vazecd472c2017-04-18 14:15:57 +05301043 block_id = block->block_id;
1044 /* GMU_GX data is read using the GMU_CX block id on A630 */
1045 if (adreno_is_a630(adreno_dev) &&
1046 (block_id == A6XX_DBGBUS_GMU_GX))
1047 block_id = A6XX_DBGBUS_GMU_CX;
1048
Lynus Vaz20c81272017-02-10 16:22:12 +05301049 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301050 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301051
1052 return size;
1053}
1054
Lynus Vazff24c972017-03-07 19:27:46 +05301055static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1056{
1057 void __iomem *reg;
1058
1059 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1060 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1061 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1062 return;
1063
1064 reg = a6xx_cx_dbgc +
1065 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1066 *value = __raw_readl(reg);
1067
1068 /*
1069 * ensure this read finishes before the next one.
1070 * i.e. act like normal readl()
1071 */
1072 rmb();
1073}
1074
1075static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1076{
1077 void __iomem *reg;
1078
1079 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1080 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1081 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1082 return;
1083
1084 reg = a6xx_cx_dbgc +
1085 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1086
1087 /*
1088 * ensure previous writes post before this one,
1089 * i.e. act like normal writel()
1090 */
1091 wmb();
1092 __raw_writel(value, reg);
1093}
1094
1095/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1096static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1097 unsigned int block_id, unsigned int index, unsigned int *val)
1098{
1099 unsigned int reg;
1100
1101 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1102 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1103
1104 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1105 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1106 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1107 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1108
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001109 /*
1110 * There needs to be a delay of 1 us to ensure enough time for correct
1111 * data is funneled into the trace buffer
1112 */
1113 udelay(1);
1114
Lynus Vazff24c972017-03-07 19:27:46 +05301115 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1116 val++;
1117 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1118}
1119
1120/*
1121 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1122 * block from the CX DBGC block
1123 */
1124static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1125 u8 *buf, size_t remain, void *priv)
1126{
1127 struct kgsl_snapshot_debugbus *header =
1128 (struct kgsl_snapshot_debugbus *)buf;
1129 struct adreno_debugbus_block *block = priv;
1130 int i;
1131 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1132 unsigned int dwords;
1133 size_t size;
1134
1135 dwords = block->dwords;
1136
1137 /* For a6xx each debug bus data unit is 2 DWRODS */
1138 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1139
1140 if (remain < size) {
1141 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1142 return 0;
1143 }
1144
1145 header->id = block->block_id;
1146 header->count = dwords * 2;
1147
1148 for (i = 0; i < dwords; i++)
1149 a6xx_cx_debug_bus_read(device, block->block_id, i,
1150 &data[i*2]);
1151
1152 return size;
1153}
1154
Lynus Vaz20c81272017-02-10 16:22:12 +05301155/* a6xx_snapshot_debugbus() - Capture debug bus data */
1156static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1157 struct kgsl_snapshot *snapshot)
1158{
1159 int i;
1160
1161 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1162 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001163 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1164 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301165
1166 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1167 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1168
1169 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1170 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1171 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1172 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1173
1174 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1175 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1176 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1177 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1178 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1179 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1180 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1181 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1182 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1183 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1184 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1185 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1186 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1187 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1188 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1189 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1190 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1191 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1192
1193 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1194 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1195 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1196 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1197
Lynus Vazff24c972017-03-07 19:27:46 +05301198 a6xx_cx_dbgc = ioremap(device->reg_phys +
1199 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1200 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1201 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1202
1203 if (a6xx_cx_dbgc) {
1204 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1205 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001206 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1207 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301208
1209 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1210 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1211
1212 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1213 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1214 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1215 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1216
1217 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1218 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1219 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1220 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1221 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1222 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1223 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1224 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1225 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1226 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1227 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1228 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1229 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1230 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1231 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1232 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1233 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1234 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1235
1236 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1237 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1238 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1239 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1240 } else
1241 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1242
Lynus Vaz20c81272017-02-10 16:22:12 +05301243 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1244 kgsl_snapshot_add_section(device,
1245 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1246 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1247 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1248 }
Lynus Vazff24c972017-03-07 19:27:46 +05301249
1250 if (a6xx_cx_dbgc) {
1251 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1252 kgsl_snapshot_add_section(device,
1253 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1254 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1255 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1256 }
1257 iounmap(a6xx_cx_dbgc);
1258 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301259}
1260
Kyle Piefer60733aa2017-03-21 11:24:01 -07001261static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
1262 u8 *buf, size_t remain, void *priv)
1263{
1264 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
1265 struct kgsl_snapshot_registers *regs = priv;
1266 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1267 int count = 0, j, k;
1268
1269 /* Figure out how many registers we are going to dump */
1270 for (j = 0; j < regs->count; j++) {
1271 int start = regs->regs[j * 2];
1272 int end = regs->regs[j * 2 + 1];
1273
1274 count += (end - start + 1);
1275 }
1276
1277 if (remain < (count * 8) + sizeof(*header)) {
1278 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
1279 return 0;
1280 }
1281
1282 for (j = 0; j < regs->count; j++) {
1283 unsigned int start = regs->regs[j * 2];
1284 unsigned int end = regs->regs[j * 2 + 1];
1285
1286 for (k = start; k <= end; k++) {
1287 unsigned int val;
1288
1289 kgsl_gmu_regread(device, k, &val);
1290 *data++ = k;
1291 *data++ = val;
1292 }
1293 }
1294
1295 header->count = count;
1296
1297 /* Return the size of the section */
1298 return (count * 8) + sizeof(*header);
1299}
1300
1301static void a6xx_snapshot_gmu(struct kgsl_device *device,
1302 struct kgsl_snapshot *snapshot)
1303{
1304 struct kgsl_snapshot_registers gmu_regs = {
1305 .regs = a6xx_gmu_registers,
1306 .count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
1307 };
1308
1309 if (!kgsl_gmu_isenabled(device))
1310 return;
1311
1312 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1313 snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
1314}
1315
Lynus Vaz85150052017-02-21 17:57:48 +05301316/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1317static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1318 size_t remain, void *priv)
1319{
1320 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1321 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1322 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1323 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1324
1325 if (remain < DEBUG_SECTION_SZ(1)) {
1326 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1327 return 0;
1328 }
1329
1330 /* Dump the SQE firmware version */
1331 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1332 header->size = 1;
1333 *data = fw->version;
1334
1335 return DEBUG_SECTION_SZ(1);
1336}
1337
Shrenuj Bansal41665402016-12-16 15:25:54 -08001338static void _a6xx_do_crashdump(struct kgsl_device *device)
1339{
1340 unsigned long wait_time;
1341 unsigned int reg = 0;
1342 unsigned int val;
1343
1344 crash_dump_valid = false;
1345
1346 if (a6xx_capturescript.gpuaddr == 0 ||
1347 a6xx_crashdump_registers.gpuaddr == 0)
1348 return;
1349
1350 /* IF the SMMU is stalled we cannot do a crash dump */
1351 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1352 if (val & BIT(24))
1353 return;
1354
1355 /* Turn on APRIV so we can access the buffers */
1356 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1357
1358 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1359 lower_32_bits(a6xx_capturescript.gpuaddr));
1360 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1361 upper_32_bits(a6xx_capturescript.gpuaddr));
1362 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1363
1364 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1365 while (!time_after(jiffies, wait_time)) {
1366 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1367 if (reg & 0x2)
1368 break;
1369 cpu_relax();
1370 }
1371
1372 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1373
1374 if (!(reg & 0x2)) {
1375 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1376 return;
1377 }
1378
1379 crash_dump_valid = true;
1380}
1381
1382/*
1383 * a6xx_snapshot() - A6XX GPU snapshot function
1384 * @adreno_dev: Device being snapshotted
1385 * @snapshot: Pointer to the snapshot instance
1386 *
1387 * This is where all of the A6XX specific bits and pieces are grabbed
1388 * into the snapshot memory
1389 */
1390void a6xx_snapshot(struct adreno_device *adreno_dev,
1391 struct kgsl_snapshot *snapshot)
1392{
1393 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1394 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1395 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
1396
1397 /* Try to run the crash dumper */
1398 _a6xx_do_crashdump(device);
1399
1400 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1401 snapshot, a6xx_snapshot_registers, NULL);
1402
1403 adreno_snapshot_vbif_registers(device, snapshot,
1404 a6xx_vbif_snapshot_registers,
1405 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1406
1407 /* CP_SQE indexed registers */
1408 kgsl_snapshot_indexed_registers(device, snapshot,
1409 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1410 0, snap_data->sect_sizes->cp_pfp);
1411
1412 /* CP_DRAW_STATE */
1413 kgsl_snapshot_indexed_registers(device, snapshot,
1414 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1415 0, 0x100);
1416
1417 /* SQE_UCODE Cache */
1418 kgsl_snapshot_indexed_registers(device, snapshot,
1419 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1420 0, 0x6000);
1421
1422 /* CP ROQ */
1423 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1424 snapshot, adreno_snapshot_cp_roq,
1425 &snap_data->sect_sizes->roq);
1426
Lynus Vaz85150052017-02-21 17:57:48 +05301427 /* SQE Firmware */
1428 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1429 snapshot, a6xx_snapshot_sqe, NULL);
1430
Lynus Vaza5922742017-03-14 18:50:54 +05301431 /* Mempool debug data */
1432 a6xx_snapshot_mempool(device, snapshot);
1433
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301434 /* Shader memory */
1435 a6xx_snapshot_shader(device, snapshot);
1436
Shrenuj Bansal41665402016-12-16 15:25:54 -08001437 /* MVC register section */
1438 a6xx_snapshot_mvc_regs(device, snapshot);
1439
Lynus Vaz461e2382017-01-16 19:35:41 +05301440 /* registers dumped through DBG AHB */
1441 a6xx_snapshot_dbgahb_regs(device, snapshot);
1442
Lynus Vaz20c81272017-02-10 16:22:12 +05301443 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001444
1445 /* GMU TCM data dumped through AHB */
1446 a6xx_snapshot_gmu(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001447}
1448
1449static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1450{
1451 int qwords = 0;
1452 unsigned int i, j, k;
1453 unsigned int count;
1454
1455 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1456 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1457
1458 cluster->offset0 = *offset;
1459 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1460
1461 if (j == 1)
1462 cluster->offset1 = *offset;
1463
1464 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1465 ptr[qwords++] =
1466 ((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
1467 (1 << 21) | 1;
1468
1469 for (k = 0; k < cluster->num_sets; k++) {
1470 count = REG_PAIR_COUNT(cluster->regs, k);
1471 ptr[qwords++] =
1472 a6xx_crashdump_registers.gpuaddr + *offset;
1473 ptr[qwords++] =
1474 (((uint64_t)cluster->regs[2 * k]) << 44) |
1475 count;
1476
1477 *offset += count * sizeof(unsigned int);
1478 }
1479 }
1480 }
1481
1482 return qwords;
1483}
1484
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301485static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1486 uint64_t *ptr, uint64_t *offset)
1487{
1488 int qwords = 0;
1489 unsigned int j;
1490
1491 /* Capture each bank in the block */
1492 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1493 /* Program the aperture */
1494 ptr[qwords++] =
1495 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1496 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1497 (1 << 21) | 1;
1498
1499 /* Read all the data in one chunk */
1500 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1501 ptr[qwords++] =
1502 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1503 block->sz;
1504
1505 /* Remember the offset of the first bank for easy access */
1506 if (j == 0)
1507 block->offset = *offset;
1508
1509 *offset += block->sz * sizeof(unsigned int);
1510 }
1511
1512 return qwords;
1513}
1514
Lynus Vaz1e258612017-04-27 21:35:22 +05301515static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1516{
1517 int qwords = 0;
1518 unsigned int i, j, k;
1519 unsigned int count;
1520
1521 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1522 struct a6xx_cluster_dbgahb_registers *cluster =
1523 &a6xx_dbgahb_ctx_clusters[i];
1524
1525 cluster->offset0 = *offset;
1526
1527 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1528 if (j == 1)
1529 cluster->offset1 = *offset;
1530
1531 /* Program the aperture */
1532 ptr[qwords++] =
1533 ((cluster->statetype + j * 2) & 0xff) << 8;
1534 ptr[qwords++] =
1535 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1536 (1 << 21) | 1;
1537
1538 for (k = 0; k < cluster->num_sets; k++) {
1539 unsigned int start = cluster->regs[2 * k];
1540
1541 count = REG_PAIR_COUNT(cluster->regs, k);
1542 ptr[qwords++] =
1543 a6xx_crashdump_registers.gpuaddr + *offset;
1544 ptr[qwords++] =
1545 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1546 start - cluster->regbase / 4) << 44)) |
1547 count;
1548
1549 *offset += count * sizeof(unsigned int);
1550 }
1551 }
1552 }
1553 return qwords;
1554}
1555
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001556static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1557{
1558 int qwords = 0;
1559 unsigned int i, k;
1560 unsigned int count;
1561
1562 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1563 struct a6xx_non_ctx_dbgahb_registers *regs =
1564 &a6xx_non_ctx_dbgahb[i];
1565
1566 regs->offset = *offset;
1567
1568 /* Program the aperture */
1569 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1570 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1571 (1 << 21) | 1;
1572
1573 for (k = 0; k < regs->num_sets; k++) {
1574 unsigned int start = regs->regs[2 * k];
1575
1576 count = REG_PAIR_COUNT(regs->regs, k);
1577 ptr[qwords++] =
1578 a6xx_crashdump_registers.gpuaddr + *offset;
1579 ptr[qwords++] =
1580 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1581 start - regs->regbase / 4) << 44)) |
1582 count;
1583
1584 *offset += count * sizeof(unsigned int);
1585 }
1586 }
1587 return qwords;
1588}
1589
Shrenuj Bansal41665402016-12-16 15:25:54 -08001590void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1591{
1592 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1593 unsigned int script_size = 0;
1594 unsigned int data_size = 0;
1595 unsigned int i, j, k;
1596 uint64_t *ptr;
1597 uint64_t offset = 0;
1598
1599 if (a6xx_capturescript.gpuaddr != 0 &&
1600 a6xx_crashdump_registers.gpuaddr != 0)
1601 return;
1602
1603 /*
1604 * We need to allocate two buffers:
1605 * 1 - the buffer to hold the draw script
1606 * 2 - the buffer to hold the data
1607 */
1608
1609 /*
1610 * To save the registers, we need 16 bytes per register pair for the
1611 * script and a dword for each register in the data
1612 */
1613 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1614 struct cdregs *regs = &_a6xx_cd_registers[i];
1615
1616 /* Each pair needs 16 bytes (2 qwords) */
1617 script_size += (regs->size / 2) * 16;
1618
1619 /* Each register needs a dword in the data */
1620 for (j = 0; j < regs->size / 2; j++)
1621 data_size += REG_PAIR_COUNT(regs->regs, j) *
1622 sizeof(unsigned int);
1623
1624 }
1625
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301626 /*
1627 * To save the shader blocks for each block in each type we need 32
1628 * bytes for the script (16 bytes to program the aperture and 16 to
1629 * read the data) and then a block specific number of bytes to hold
1630 * the data
1631 */
1632 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1633 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1634 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1635 A6XX_NUM_SHADER_BANKS;
1636 }
1637
Shrenuj Bansal41665402016-12-16 15:25:54 -08001638 /* Calculate the script and data size for MVC registers */
1639 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1640 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1641
1642 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1643
1644 /* 16 bytes for programming the aperture */
1645 script_size += 16;
1646
1647 /* Reading each pair of registers takes 16 bytes */
1648 script_size += 16 * cluster->num_sets;
1649
1650 /* A dword per register read from the cluster list */
1651 for (k = 0; k < cluster->num_sets; k++)
1652 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1653 sizeof(unsigned int);
1654 }
1655 }
1656
Lynus Vaz1e258612017-04-27 21:35:22 +05301657 /* Calculate the script and data size for debug AHB registers */
1658 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1659 struct a6xx_cluster_dbgahb_registers *cluster =
1660 &a6xx_dbgahb_ctx_clusters[i];
1661
1662 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1663
1664 /* 16 bytes for programming the aperture */
1665 script_size += 16;
1666
1667 /* Reading each pair of registers takes 16 bytes */
1668 script_size += 16 * cluster->num_sets;
1669
1670 /* A dword per register read from the cluster list */
1671 for (k = 0; k < cluster->num_sets; k++)
1672 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1673 sizeof(unsigned int);
1674 }
1675 }
1676
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001677 /*
1678 * Calculate the script and data size for non context debug
1679 * AHB registers
1680 */
1681 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1682 struct a6xx_non_ctx_dbgahb_registers *regs =
1683 &a6xx_non_ctx_dbgahb[i];
1684
1685 /* 16 bytes for programming the aperture */
1686 script_size += 16;
1687
1688 /* Reading each pair of registers takes 16 bytes */
1689 script_size += 16 * regs->num_sets;
1690
1691 /* A dword per register read from the cluster list */
1692 for (k = 0; k < regs->num_sets; k++)
1693 data_size += REG_PAIR_COUNT(regs->regs, k) *
1694 sizeof(unsigned int);
1695 }
1696
Shrenuj Bansal41665402016-12-16 15:25:54 -08001697 /* Now allocate the script and data buffers */
1698
1699 /* The script buffers needs 2 extra qwords on the end */
1700 if (kgsl_allocate_global(device, &a6xx_capturescript,
1701 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1702 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1703 return;
1704
1705 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1706 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1707 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1708 return;
1709 }
1710
1711 /* Build the crash script */
1712
1713 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1714
1715 /* For the registers, program a read command for each pair */
1716 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1717 struct cdregs *regs = &_a6xx_cd_registers[i];
1718
1719 for (j = 0; j < regs->size / 2; j++) {
1720 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1721 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1722 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1723 offset += r * sizeof(unsigned int);
1724 }
1725 }
1726
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301727 /* Program each shader block */
1728 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1729 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1730 &offset);
1731 }
1732
Shrenuj Bansal41665402016-12-16 15:25:54 -08001733 /* Program the capturescript for the MVC regsiters */
1734 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1735
Lynus Vaz1e258612017-04-27 21:35:22 +05301736 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1737
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001738 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1739
Shrenuj Bansal41665402016-12-16 15:25:54 -08001740 *ptr++ = 0;
1741 *ptr++ = 0;
1742}