blob: ba83cd7f513328a52a1186dcd2906eb1680a28b8 [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
24
25static const unsigned int a6xx_gras_cluster[] = {
26 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
27 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
28 0x8400, 0x840B,
29};
30
31static const unsigned int a6xx_ps_cluster[] = {
32 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
33 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
34 0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
35 0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
36 0x9218, 0x9236, 0x9300, 0x9306,
37};
38
39static const unsigned int a6xx_fe_cluster[] = {
40 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
41 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
42};
43
44static const unsigned int a6xx_pc_vs_cluster[] = {
45 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
46};
47
48static struct a6xx_cluster_registers {
49 unsigned int id;
50 const unsigned int *regs;
51 unsigned int num_sets;
52 unsigned int offset0;
53 unsigned int offset1;
54} a6xx_clusters[] = {
55 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
56 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
57 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
58 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
59 ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
60};
61
62struct a6xx_cluster_regs_info {
63 struct a6xx_cluster_registers *cluster;
64 unsigned int ctxt_id;
65};
66
Lynus Vaz461e2382017-01-16 19:35:41 +053067static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
68 0xB800, 0xB803, 0xB820, 0xB822,
69};
70
71static const unsigned int a6xx_sp_vs_sp_cluster[] = {
72 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
73 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
74};
75
76static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
77 0xBB10, 0xBB11, 0xBB20, 0xBB29,
78};
79
80static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
81 0xBD80, 0xBD80,
82};
83
84static const unsigned int a6xx_sp_duplicate_cluster[] = {
85 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
86};
87
88static const unsigned int a6xx_tp_duplicate_cluster[] = {
89 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
90};
91
92static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
93 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
94 0xB9C0, 0xB9C9,
95};
96
97static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
98 0xBD80, 0xBD80,
99};
100
101static const unsigned int a6xx_sp_ps_sp_cluster[] = {
102 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
103 0xAA00, 0xAA00, 0xAA30, 0xAA31,
104};
105
106static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
107 0xACC0, 0xACC0,
108};
109
110static const unsigned int a6xx_sp_ps_tp_cluster[] = {
111 0xB180, 0xB183, 0xB190, 0xB191,
112};
113
114static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
115 0xB4C0, 0xB4D1,
116};
117
118static struct a6xx_cluster_dbgahb_registers {
119 unsigned int id;
120 unsigned int regbase;
121 unsigned int statetype;
122 const unsigned int *regs;
123 unsigned int num_sets;
124} a6xx_dbgahb_ctx_clusters[] = {
125 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
126 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
127 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
128 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700129 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530130 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
131 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
132 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700133 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530134 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700135 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530136 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700137 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530138 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700139 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530140 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700141 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530142 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700143 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530144 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700145 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530146 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700147 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530148 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700149 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530150 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700151 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530152 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700153 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530154 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
155};
156
157struct a6xx_cluster_dbgahb_regs_info {
158 struct a6xx_cluster_dbgahb_registers *cluster;
159 unsigned int ctxt_id;
160};
161
162static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
163 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
164 0xBE20, 0xBE23,
165};
166
167static const unsigned int a6xx_sp_non_ctx_registers[] = {
168 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
169 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
170};
171
172static const unsigned int a6xx_tp_non_ctx_registers[] = {
173 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
174};
175
176static struct a6xx_non_ctx_dbgahb_registers {
177 unsigned int regbase;
178 unsigned int statetype;
179 const unsigned int *regs;
180 unsigned int num_sets;
181} a6xx_non_ctx_dbgahb[] = {
182 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
183 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
184 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
185 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
186 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
187 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
188};
189
Shrenuj Bansal41665402016-12-16 15:25:54 -0800190static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
191 /* VBIF */
192 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
193 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
194 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
195 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
196 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
197 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
198 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
199 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
200 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
201 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
202 0x3410, 0x3410, 0x3800, 0x3801,
203};
204
Kyle Piefer60733aa2017-03-21 11:24:01 -0700205static const unsigned int a6xx_gmu_registers[] = {
206 /* GMU */
207 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
208};
209
Shrenuj Bansal41665402016-12-16 15:25:54 -0800210static const struct adreno_vbif_snapshot_registers
211a6xx_vbif_snapshot_registers[] = {
212 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
213 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
214};
215
216/*
217 * Set of registers to dump for A6XX on snapshot.
218 * Registers in pairs - first value is the start offset, second
219 * is the stop offset (inclusive)
220 */
221
222static const unsigned int a6xx_registers[] = {
223 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530224 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
225 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
226 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
227 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213, 0x0218, 0x023D,
228 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511,
229 0x0533, 0x0533, 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800230 /* CP */
231 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
232 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
233 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
234 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
235 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
236 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
237 /* VSC */
238 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
239 /* UCHE */
240 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
241 0x0E38, 0x0E39,
242 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530243 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
244 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800245 /* RB */
246 0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
247 0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
248 0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
249 /* VPC */
250 0x9600, 0x9604, 0x9624, 0x9637,
251 /* PC */
252 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
253 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
254 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
255 /* VFD */
256 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530257 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800258};
259
Lynus Vaz20c81272017-02-10 16:22:12 +0530260enum a6xx_debugbus_id {
261 A6XX_DBGBUS_CP = 0x1,
262 A6XX_DBGBUS_RBBM = 0x2,
263 A6XX_DBGBUS_VBIF = 0x3,
264 A6XX_DBGBUS_HLSQ = 0x4,
265 A6XX_DBGBUS_UCHE = 0x5,
266 A6XX_DBGBUS_DPM = 0x6,
267 A6XX_DBGBUS_TESS = 0x7,
268 A6XX_DBGBUS_PC = 0x8,
269 A6XX_DBGBUS_VFDP = 0x9,
270 A6XX_DBGBUS_VPC = 0xa,
271 A6XX_DBGBUS_TSE = 0xb,
272 A6XX_DBGBUS_RAS = 0xc,
273 A6XX_DBGBUS_VSC = 0xd,
274 A6XX_DBGBUS_COM = 0xe,
275 A6XX_DBGBUS_LRZ = 0x10,
276 A6XX_DBGBUS_A2D = 0x11,
277 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530278 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530279 A6XX_DBGBUS_RBP = 0x14,
280 A6XX_DBGBUS_DCS = 0x15,
281 A6XX_DBGBUS_RBBM_CFG = 0x16,
282 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530283 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530284 A6XX_DBGBUS_TPFCHE = 0x19,
285 A6XX_DBGBUS_GPC = 0x1d,
286 A6XX_DBGBUS_LARC = 0x1e,
287 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
288 A6XX_DBGBUS_RB_0 = 0x20,
289 A6XX_DBGBUS_RB_1 = 0x21,
290 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
291 A6XX_DBGBUS_CCU_0 = 0x28,
292 A6XX_DBGBUS_CCU_1 = 0x29,
293 A6XX_DBGBUS_VFD_0 = 0x38,
294 A6XX_DBGBUS_VFD_1 = 0x39,
295 A6XX_DBGBUS_VFD_2 = 0x3a,
296 A6XX_DBGBUS_VFD_3 = 0x3b,
297 A6XX_DBGBUS_SP_0 = 0x40,
298 A6XX_DBGBUS_SP_1 = 0x41,
299 A6XX_DBGBUS_TPL1_0 = 0x48,
300 A6XX_DBGBUS_TPL1_1 = 0x49,
301 A6XX_DBGBUS_TPL1_2 = 0x4a,
302 A6XX_DBGBUS_TPL1_3 = 0x4b,
303};
304
305static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
306 { A6XX_DBGBUS_CP, 0x100, },
307 { A6XX_DBGBUS_RBBM, 0x100, },
308 { A6XX_DBGBUS_HLSQ, 0x100, },
309 { A6XX_DBGBUS_UCHE, 0x100, },
310 { A6XX_DBGBUS_DPM, 0x100, },
311 { A6XX_DBGBUS_TESS, 0x100, },
312 { A6XX_DBGBUS_PC, 0x100, },
313 { A6XX_DBGBUS_VFDP, 0x100, },
314 { A6XX_DBGBUS_VPC, 0x100, },
315 { A6XX_DBGBUS_TSE, 0x100, },
316 { A6XX_DBGBUS_RAS, 0x100, },
317 { A6XX_DBGBUS_VSC, 0x100, },
318 { A6XX_DBGBUS_COM, 0x100, },
319 { A6XX_DBGBUS_LRZ, 0x100, },
320 { A6XX_DBGBUS_A2D, 0x100, },
321 { A6XX_DBGBUS_CCUFCHE, 0x100, },
322 { A6XX_DBGBUS_RBP, 0x100, },
323 { A6XX_DBGBUS_DCS, 0x100, },
324 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530325 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530326 { A6XX_DBGBUS_TPFCHE, 0x100, },
327 { A6XX_DBGBUS_GPC, 0x100, },
328 { A6XX_DBGBUS_LARC, 0x100, },
329 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
330 { A6XX_DBGBUS_RB_0, 0x100, },
331 { A6XX_DBGBUS_RB_1, 0x100, },
332 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
333 { A6XX_DBGBUS_CCU_0, 0x100, },
334 { A6XX_DBGBUS_CCU_1, 0x100, },
335 { A6XX_DBGBUS_VFD_0, 0x100, },
336 { A6XX_DBGBUS_VFD_1, 0x100, },
337 { A6XX_DBGBUS_VFD_2, 0x100, },
338 { A6XX_DBGBUS_VFD_3, 0x100, },
339 { A6XX_DBGBUS_SP_0, 0x100, },
340 { A6XX_DBGBUS_SP_1, 0x100, },
341 { A6XX_DBGBUS_TPL1_0, 0x100, },
342 { A6XX_DBGBUS_TPL1_1, 0x100, },
343 { A6XX_DBGBUS_TPL1_2, 0x100, },
344 { A6XX_DBGBUS_TPL1_3, 0x100, },
345};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800346
Lynus Vazff24c972017-03-07 19:27:46 +0530347static void __iomem *a6xx_cx_dbgc;
348static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
349 { A6XX_DBGBUS_VBIF, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530350 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530351 { A6XX_DBGBUS_CX, 0x100, },
352};
353
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530354#define A6XX_NUM_SHADER_BANKS 3
355#define A6XX_SHADER_STATETYPE_SHIFT 8
356
357enum a6xx_shader_obj {
358 A6XX_TP0_TMO_DATA = 0x9,
359 A6XX_TP0_SMO_DATA = 0xa,
360 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
361 A6XX_TP1_TMO_DATA = 0x19,
362 A6XX_TP1_SMO_DATA = 0x1a,
363 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
364 A6XX_SP_INST_DATA = 0x29,
365 A6XX_SP_LB_0_DATA = 0x2a,
366 A6XX_SP_LB_1_DATA = 0x2b,
367 A6XX_SP_LB_2_DATA = 0x2c,
368 A6XX_SP_LB_3_DATA = 0x2d,
369 A6XX_SP_LB_4_DATA = 0x2e,
370 A6XX_SP_LB_5_DATA = 0x2f,
371 A6XX_SP_CB_BINDLESS_DATA = 0x30,
372 A6XX_SP_CB_LEGACY_DATA = 0x31,
373 A6XX_SP_UAV_DATA = 0x32,
374 A6XX_SP_INST_TAG = 0x33,
375 A6XX_SP_CB_BINDLESS_TAG = 0x34,
376 A6XX_SP_TMO_UMO_TAG = 0x35,
377 A6XX_SP_SMO_TAG = 0x36,
378 A6XX_SP_STATE_DATA = 0x37,
379 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
380 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
381 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
382 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
383 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
384 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
385 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
386 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
387 A6XX_HLSQ_INST_RAM = 0x52,
388 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
389 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
390 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
391 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
392 A6XX_HLSQ_INST_RAM_TAG = 0x57,
393 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
394 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
395 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
396 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
397 A6XX_HLSQ_DATAPATH_META = 0x60,
398 A6XX_HLSQ_FRONTEND_META = 0x61,
399 A6XX_HLSQ_INDIRECT_META = 0x62,
400 A6XX_HLSQ_BACKEND_META = 0x63
401};
402
403struct a6xx_shader_block {
404 unsigned int statetype;
405 unsigned int sz;
406 uint64_t offset;
407};
408
409struct a6xx_shader_block_info {
410 struct a6xx_shader_block *block;
411 unsigned int bank;
412 uint64_t offset;
413};
414
415static struct a6xx_shader_block a6xx_shader_blocks[] = {
416 {A6XX_TP0_TMO_DATA, 0x200},
417 {A6XX_TP0_SMO_DATA, 0x80,},
418 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
419 {A6XX_TP1_TMO_DATA, 0x200},
420 {A6XX_TP1_SMO_DATA, 0x80,},
421 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
422 {A6XX_SP_INST_DATA, 0x800},
423 {A6XX_SP_LB_0_DATA, 0x800},
424 {A6XX_SP_LB_1_DATA, 0x800},
425 {A6XX_SP_LB_2_DATA, 0x800},
426 {A6XX_SP_LB_3_DATA, 0x800},
427 {A6XX_SP_LB_4_DATA, 0x800},
428 {A6XX_SP_LB_5_DATA, 0x200},
429 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
430 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
431 {A6XX_SP_UAV_DATA, 0x80,},
432 {A6XX_SP_INST_TAG, 0x80,},
433 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
434 {A6XX_SP_TMO_UMO_TAG, 0x80,},
435 {A6XX_SP_SMO_TAG, 0x80},
436 {A6XX_SP_STATE_DATA, 0x3F},
437 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
438 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
439 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
440 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
441 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
442 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
443 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
444 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
445 {A6XX_HLSQ_INST_RAM, 0x800},
446 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
447 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
448 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
449 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
450 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
451 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
452 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
453 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
454 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
455 {A6XX_HLSQ_DATAPATH_META, 0x40,},
456 {A6XX_HLSQ_FRONTEND_META, 0x40},
457 {A6XX_HLSQ_INDIRECT_META, 0x40,}
458};
459
Shrenuj Bansal41665402016-12-16 15:25:54 -0800460static struct kgsl_memdesc a6xx_capturescript;
461static struct kgsl_memdesc a6xx_crashdump_registers;
462static bool crash_dump_valid;
463
464static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
465 u8 *buf, size_t remain)
466{
467 struct kgsl_snapshot_registers regs = {
468 .regs = a6xx_registers,
469 .count = ARRAY_SIZE(a6xx_registers) / 2,
470 };
471
472 return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
473}
474
475static struct cdregs {
476 const unsigned int *regs;
477 unsigned int size;
478} _a6xx_cd_registers[] = {
479 { a6xx_registers, ARRAY_SIZE(a6xx_registers) },
480};
481
482#define REG_PAIR_COUNT(_a, _i) \
483 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
484
485static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
486 size_t remain, void *priv)
487{
488 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
489 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
490 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
491 unsigned int i, j, k;
492 unsigned int count = 0;
493
494 if (crash_dump_valid == false)
495 return a6xx_legacy_snapshot_registers(device, buf, remain);
496
497 if (remain < sizeof(*header)) {
498 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
499 return 0;
500 }
501
502 remain -= sizeof(*header);
503
504 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
505 struct cdregs *regs = &_a6xx_cd_registers[i];
506
507 for (j = 0; j < regs->size / 2; j++) {
508 unsigned int start = regs->regs[2 * j];
509 unsigned int end = regs->regs[(2 * j) + 1];
510
511 if (remain < ((end - start) + 1) * 8) {
512 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
513 goto out;
514 }
515
516 remain -= ((end - start) + 1) * 8;
517
518 for (k = start; k <= end; k++, count++) {
519 *data++ = k;
520 *data++ = *src++;
521 }
522 }
523 }
524
525out:
526 header->count = count;
527
528 /* Return the size of the section */
529 return (count * 8) + sizeof(*header);
530}
531
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530532static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
533 u8 *buf, size_t remain, void *priv)
534{
535 struct kgsl_snapshot_shader *header =
536 (struct kgsl_snapshot_shader *) buf;
537 struct a6xx_shader_block_info *info =
538 (struct a6xx_shader_block_info *) priv;
539 struct a6xx_shader_block *block = info->block;
540 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
541
542 if (remain < SHADER_SECTION_SZ(block->sz)) {
543 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
544 return 0;
545 }
546
547 header->type = block->statetype;
548 header->index = info->bank;
549 header->size = block->sz;
550
551 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
552 block->sz);
553
554 return SHADER_SECTION_SZ(block->sz);
555}
556
557static void a6xx_snapshot_shader(struct kgsl_device *device,
558 struct kgsl_snapshot *snapshot)
559{
560 unsigned int i, j;
561 struct a6xx_shader_block_info info;
562
563 /* Shader blocks can only be read by the crash dumper */
564 if (crash_dump_valid == false)
565 return;
566
567 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
568 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
569 info.block = &a6xx_shader_blocks[i];
570 info.bank = j;
571 info.offset = a6xx_shader_blocks[i].offset +
572 (j * a6xx_shader_blocks[i].sz);
573
574 /* Shader working/shadow memory */
575 kgsl_snapshot_add_section(device,
576 KGSL_SNAPSHOT_SECTION_SHADER,
577 snapshot, a6xx_snapshot_shader_memory, &info);
578 }
579 }
580}
581
Lynus Vaza5922742017-03-14 18:50:54 +0530582static void a6xx_snapshot_mempool(struct kgsl_device *device,
583 struct kgsl_snapshot *snapshot)
584{
585 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530586 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530587
Lynus Vazb8e43d52017-04-20 14:47:37 +0530588 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530589 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
590 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
591
592 kgsl_snapshot_indexed_registers(device, snapshot,
593 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
594 0, 0x2060);
595
Lynus Vazb8e43d52017-04-20 14:47:37 +0530596 /*
597 * Data at offset 0x2000 in the mempool section is the mempool size.
598 * Since we set it to 0, patch in the original size so that the data
599 * is consistent.
600 */
601 if (buf < snapshot->ptr) {
602 unsigned int *data;
603
604 /* Skip over the headers */
605 buf += sizeof(struct kgsl_snapshot_section_header) +
606 sizeof(struct kgsl_snapshot_indexed_regs);
607
608 data = (unsigned int *)buf + 0x2000;
609 *data = pool_size;
610 }
611
Lynus Vaza5922742017-03-14 18:50:54 +0530612 /* Restore the saved mempool size */
613 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
614}
615
Lynus Vaz461e2382017-01-16 19:35:41 +0530616static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
617 unsigned int regbase, unsigned int reg)
618{
619 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
620 reg - regbase / 4;
621 unsigned int val;
622
623 kgsl_regread(device, read_reg, &val);
624 return val;
625}
626
627static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
628 size_t remain, void *priv)
629{
630 struct kgsl_snapshot_mvc_regs *header =
631 (struct kgsl_snapshot_mvc_regs *)buf;
632 struct a6xx_cluster_dbgahb_regs_info *info =
633 (struct a6xx_cluster_dbgahb_regs_info *)priv;
634 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
635 unsigned int read_sel;
636 unsigned int data_size = 0;
637 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
638 int i, j;
639
640 if (remain < sizeof(*header)) {
641 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
642 return 0;
643 }
644
645 remain -= sizeof(*header);
646
647 header->ctxt_id = info->ctxt_id;
648 header->cluster_id = cur_cluster->id;
649
650 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
651 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
652
653 for (i = 0; i < cur_cluster->num_sets; i++) {
654 unsigned int start = cur_cluster->regs[2 * i];
655 unsigned int end = cur_cluster->regs[2 * i + 1];
656
657 if (remain < (end - start + 3) * 4) {
658 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
659 goto out;
660 }
661
662 remain -= (end - start + 3) * 4;
663 data_size += (end - start + 3) * 4;
664
665 *data++ = start | (1 << 31);
666 *data++ = end;
667
668 for (j = start; j <= end; j++) {
669 unsigned int val;
670
671 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
672 *data++ = val;
673
674 }
675 }
676
677out:
678 return data_size + sizeof(*header);
679}
680
681static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
682 size_t remain, void *priv)
683{
684 struct kgsl_snapshot_regs *header =
685 (struct kgsl_snapshot_regs *)buf;
686 struct a6xx_non_ctx_dbgahb_registers *regs =
687 (struct a6xx_non_ctx_dbgahb_registers *)priv;
688 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
689 int count = 0;
690 unsigned int read_sel;
691 int i, j;
692
693 /* Figure out how many registers we are going to dump */
694 for (i = 0; i < regs->num_sets; i++) {
695 int start = regs->regs[i * 2];
696 int end = regs->regs[i * 2 + 1];
697
698 count += (end - start + 1);
699 }
700
701 if (remain < (count * 8) + sizeof(*header)) {
702 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
703 return 0;
704 }
705
706 header->count = count;
707
708 read_sel = (regs->statetype & 0xff) << 8;
709 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
710
711 for (i = 0; i < regs->num_sets; i++) {
712 unsigned int start = regs->regs[2 * i];
713 unsigned int end = regs->regs[2 * i + 1];
714
715 for (j = start; j <= end; j++) {
716 unsigned int val;
717
718 val = a6xx_read_dbgahb(device, regs->regbase, j);
719 *data++ = j;
720 *data++ = val;
721
722 }
723 }
724 return (count * 8) + sizeof(*header);
725}
726
727static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
728 struct kgsl_snapshot *snapshot)
729{
730 int i, j;
731
732 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
733 struct a6xx_cluster_dbgahb_registers *cluster =
734 &a6xx_dbgahb_ctx_clusters[i];
735 struct a6xx_cluster_dbgahb_regs_info info;
736
737 info.cluster = cluster;
738 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
739 info.ctxt_id = j;
740
741 kgsl_snapshot_add_section(device,
742 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
743 a6xx_snapshot_cluster_dbgahb, &info);
744 }
745 }
746
747 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
748 kgsl_snapshot_add_section(device,
749 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
750 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
751 }
752}
753
Shrenuj Bansal41665402016-12-16 15:25:54 -0800754static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
755 size_t remain, void *priv)
756{
757 struct kgsl_snapshot_mvc_regs *header =
758 (struct kgsl_snapshot_mvc_regs *)buf;
759 struct a6xx_cluster_regs_info *info =
760 (struct a6xx_cluster_regs_info *)priv;
761 struct a6xx_cluster_registers *cur_cluster = info->cluster;
762 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
763 unsigned int ctxt = info->ctxt_id;
764 unsigned int start, end, i, j, aperture_cntl = 0;
765 unsigned int data_size = 0;
766
767 if (remain < sizeof(*header)) {
768 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
769 return 0;
770 }
771
772 remain -= sizeof(*header);
773
774 header->ctxt_id = info->ctxt_id;
775 header->cluster_id = cur_cluster->id;
776
777 /*
778 * Set the AHB control for the Host to read from the
779 * cluster/context for this iteration.
780 */
781 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
782 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
783
784 for (i = 0; i < cur_cluster->num_sets; i++) {
785 start = cur_cluster->regs[2 * i];
786 end = cur_cluster->regs[2 * i + 1];
787
788 if (remain < (end - start + 3) * 4) {
789 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
790 goto out;
791 }
792
793 remain -= (end - start + 3) * 4;
794 data_size += (end - start + 3) * 4;
795
796 *data++ = start | (1 << 31);
797 *data++ = end;
798 for (j = start; j <= end; j++) {
799 unsigned int val;
800
801 kgsl_regread(device, j, &val);
802 *data++ = val;
803 }
804 }
805out:
806 return data_size + sizeof(*header);
807}
808
809static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
810 size_t remain, void *priv)
811{
812 struct kgsl_snapshot_mvc_regs *header =
813 (struct kgsl_snapshot_mvc_regs *)buf;
814 struct a6xx_cluster_regs_info *info =
815 (struct a6xx_cluster_regs_info *)priv;
816 struct a6xx_cluster_registers *cluster = info->cluster;
817 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
818 unsigned int *src;
819 int i, j;
820 unsigned int start, end;
821 size_t data_size = 0;
822
823 if (crash_dump_valid == false)
824 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
825
826 if (remain < sizeof(*header)) {
827 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
828 return 0;
829 }
830
831 remain -= sizeof(*header);
832
833 header->ctxt_id = info->ctxt_id;
834 header->cluster_id = cluster->id;
835
836 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
837 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
838
839 for (i = 0; i < cluster->num_sets; i++) {
840 start = cluster->regs[2 * i];
841 end = cluster->regs[2 * i + 1];
842
843 if (remain < (end - start + 3) * 4) {
844 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
845 goto out;
846 }
847
848 remain -= (end - start + 3) * 4;
849 data_size += (end - start + 3) * 4;
850
851 *data++ = start | (1 << 31);
852 *data++ = end;
853 for (j = start; j <= end; j++)
854 *data++ = *src++;
855 }
856
857out:
858 return data_size + sizeof(*header);
859
860}
861
862static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
863 struct kgsl_snapshot *snapshot)
864{
865 int i, j;
866 struct a6xx_cluster_regs_info info;
867
868 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
869 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
870
871 info.cluster = cluster;
872 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
873 info.ctxt_id = j;
874
875 kgsl_snapshot_add_section(device,
876 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
877 a6xx_snapshot_mvc, &info);
878 }
879 }
880}
881
Lynus Vaz20c81272017-02-10 16:22:12 +0530882/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
883static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
884 unsigned int block_id, unsigned int index, unsigned int *val)
885{
886 unsigned int reg;
887
888 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
889 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
890
891 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
892 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
893 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
894 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
895
896 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
897 val++;
898 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
899}
900
901/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
902static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
903 u8 *buf, size_t remain, void *priv)
904{
Lynus Vazecd472c2017-04-18 14:15:57 +0530905 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +0530906 struct kgsl_snapshot_debugbus *header =
907 (struct kgsl_snapshot_debugbus *)buf;
908 struct adreno_debugbus_block *block = priv;
909 int i;
910 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
911 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +0530912 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +0530913 size_t size;
914
915 dwords = block->dwords;
916
917 /* For a6xx each debug bus data unit is 2 DWORDS */
918 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
919
920 if (remain < size) {
921 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
922 return 0;
923 }
924
925 header->id = block->block_id;
926 header->count = dwords * 2;
927
Lynus Vazecd472c2017-04-18 14:15:57 +0530928 block_id = block->block_id;
929 /* GMU_GX data is read using the GMU_CX block id on A630 */
930 if (adreno_is_a630(adreno_dev) &&
931 (block_id == A6XX_DBGBUS_GMU_GX))
932 block_id = A6XX_DBGBUS_GMU_CX;
933
Lynus Vaz20c81272017-02-10 16:22:12 +0530934 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +0530935 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +0530936
937 return size;
938}
939
Lynus Vazff24c972017-03-07 19:27:46 +0530940static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
941{
942 void __iomem *reg;
943
944 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
945 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
946 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
947 return;
948
949 reg = a6xx_cx_dbgc +
950 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
951 *value = __raw_readl(reg);
952
953 /*
954 * ensure this read finishes before the next one.
955 * i.e. act like normal readl()
956 */
957 rmb();
958}
959
960static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
961{
962 void __iomem *reg;
963
964 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
965 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
966 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
967 return;
968
969 reg = a6xx_cx_dbgc +
970 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
971
972 /*
973 * ensure previous writes post before this one,
974 * i.e. act like normal writel()
975 */
976 wmb();
977 __raw_writel(value, reg);
978}
979
980/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
981static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
982 unsigned int block_id, unsigned int index, unsigned int *val)
983{
984 unsigned int reg;
985
986 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
987 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
988
989 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
990 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
991 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
992 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
993
994 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
995 val++;
996 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
997}
998
999/*
1000 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1001 * block from the CX DBGC block
1002 */
1003static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1004 u8 *buf, size_t remain, void *priv)
1005{
1006 struct kgsl_snapshot_debugbus *header =
1007 (struct kgsl_snapshot_debugbus *)buf;
1008 struct adreno_debugbus_block *block = priv;
1009 int i;
1010 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1011 unsigned int dwords;
1012 size_t size;
1013
1014 dwords = block->dwords;
1015
1016 /* For a6xx each debug bus data unit is 2 DWRODS */
1017 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1018
1019 if (remain < size) {
1020 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1021 return 0;
1022 }
1023
1024 header->id = block->block_id;
1025 header->count = dwords * 2;
1026
1027 for (i = 0; i < dwords; i++)
1028 a6xx_cx_debug_bus_read(device, block->block_id, i,
1029 &data[i*2]);
1030
1031 return size;
1032}
1033
Lynus Vaz20c81272017-02-10 16:22:12 +05301034/* a6xx_snapshot_debugbus() - Capture debug bus data */
1035static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1036 struct kgsl_snapshot *snapshot)
1037{
1038 int i;
1039
1040 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1041 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
1042 (0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1043 (0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
1044
1045 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1046 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1047
1048 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1049 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1050 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1051 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1052
1053 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1054 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1055 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1056 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1057 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1058 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1059 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1060 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1061 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1062 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1063 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1064 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1065 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1066 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1067 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1068 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1069 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1070 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1071
1072 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1073 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1074 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1075 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1076
Lynus Vazff24c972017-03-07 19:27:46 +05301077 a6xx_cx_dbgc = ioremap(device->reg_phys +
1078 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1079 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1080 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1081
1082 if (a6xx_cx_dbgc) {
1083 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1084 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
1085 (0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1086 (0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
1087
1088 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1089 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1090
1091 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1092 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1093 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1094 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1095
1096 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1097 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1098 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1099 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1100 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1101 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1102 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1103 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1104 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1105 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1106 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1107 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1108 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1109 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1110 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1111 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1112 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1113 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1114
1115 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1116 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1117 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1118 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1119 } else
1120 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1121
Lynus Vaz20c81272017-02-10 16:22:12 +05301122 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1123 kgsl_snapshot_add_section(device,
1124 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1125 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1126 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1127 }
Lynus Vazff24c972017-03-07 19:27:46 +05301128
1129 if (a6xx_cx_dbgc) {
1130 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1131 kgsl_snapshot_add_section(device,
1132 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1133 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1134 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1135 }
1136 iounmap(a6xx_cx_dbgc);
1137 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301138}
1139
Kyle Piefer60733aa2017-03-21 11:24:01 -07001140static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
1141 u8 *buf, size_t remain, void *priv)
1142{
1143 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
1144 struct kgsl_snapshot_registers *regs = priv;
1145 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1146 int count = 0, j, k;
1147
1148 /* Figure out how many registers we are going to dump */
1149 for (j = 0; j < regs->count; j++) {
1150 int start = regs->regs[j * 2];
1151 int end = regs->regs[j * 2 + 1];
1152
1153 count += (end - start + 1);
1154 }
1155
1156 if (remain < (count * 8) + sizeof(*header)) {
1157 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
1158 return 0;
1159 }
1160
1161 for (j = 0; j < regs->count; j++) {
1162 unsigned int start = regs->regs[j * 2];
1163 unsigned int end = regs->regs[j * 2 + 1];
1164
1165 for (k = start; k <= end; k++) {
1166 unsigned int val;
1167
1168 kgsl_gmu_regread(device, k, &val);
1169 *data++ = k;
1170 *data++ = val;
1171 }
1172 }
1173
1174 header->count = count;
1175
1176 /* Return the size of the section */
1177 return (count * 8) + sizeof(*header);
1178}
1179
1180static void a6xx_snapshot_gmu(struct kgsl_device *device,
1181 struct kgsl_snapshot *snapshot)
1182{
1183 struct kgsl_snapshot_registers gmu_regs = {
1184 .regs = a6xx_gmu_registers,
1185 .count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
1186 };
1187
1188 if (!kgsl_gmu_isenabled(device))
1189 return;
1190
1191 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1192 snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
1193}
1194
Lynus Vaz85150052017-02-21 17:57:48 +05301195/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1196static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1197 size_t remain, void *priv)
1198{
1199 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1200 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1201 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1202 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1203
1204 if (remain < DEBUG_SECTION_SZ(1)) {
1205 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1206 return 0;
1207 }
1208
1209 /* Dump the SQE firmware version */
1210 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1211 header->size = 1;
1212 *data = fw->version;
1213
1214 return DEBUG_SECTION_SZ(1);
1215}
1216
Shrenuj Bansal41665402016-12-16 15:25:54 -08001217static void _a6xx_do_crashdump(struct kgsl_device *device)
1218{
1219 unsigned long wait_time;
1220 unsigned int reg = 0;
1221 unsigned int val;
1222
1223 crash_dump_valid = false;
1224
1225 if (a6xx_capturescript.gpuaddr == 0 ||
1226 a6xx_crashdump_registers.gpuaddr == 0)
1227 return;
1228
1229 /* IF the SMMU is stalled we cannot do a crash dump */
1230 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1231 if (val & BIT(24))
1232 return;
1233
1234 /* Turn on APRIV so we can access the buffers */
1235 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1236
1237 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1238 lower_32_bits(a6xx_capturescript.gpuaddr));
1239 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1240 upper_32_bits(a6xx_capturescript.gpuaddr));
1241 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1242
1243 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1244 while (!time_after(jiffies, wait_time)) {
1245 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1246 if (reg & 0x2)
1247 break;
1248 cpu_relax();
1249 }
1250
1251 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1252
1253 if (!(reg & 0x2)) {
1254 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1255 return;
1256 }
1257
1258 crash_dump_valid = true;
1259}
1260
1261/*
1262 * a6xx_snapshot() - A6XX GPU snapshot function
1263 * @adreno_dev: Device being snapshotted
1264 * @snapshot: Pointer to the snapshot instance
1265 *
1266 * This is where all of the A6XX specific bits and pieces are grabbed
1267 * into the snapshot memory
1268 */
1269void a6xx_snapshot(struct adreno_device *adreno_dev,
1270 struct kgsl_snapshot *snapshot)
1271{
1272 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1273 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1274 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
1275
1276 /* Try to run the crash dumper */
1277 _a6xx_do_crashdump(device);
1278
1279 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1280 snapshot, a6xx_snapshot_registers, NULL);
1281
1282 adreno_snapshot_vbif_registers(device, snapshot,
1283 a6xx_vbif_snapshot_registers,
1284 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1285
1286 /* CP_SQE indexed registers */
1287 kgsl_snapshot_indexed_registers(device, snapshot,
1288 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1289 0, snap_data->sect_sizes->cp_pfp);
1290
1291 /* CP_DRAW_STATE */
1292 kgsl_snapshot_indexed_registers(device, snapshot,
1293 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1294 0, 0x100);
1295
1296 /* SQE_UCODE Cache */
1297 kgsl_snapshot_indexed_registers(device, snapshot,
1298 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1299 0, 0x6000);
1300
1301 /* CP ROQ */
1302 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1303 snapshot, adreno_snapshot_cp_roq,
1304 &snap_data->sect_sizes->roq);
1305
Lynus Vaz85150052017-02-21 17:57:48 +05301306 /* SQE Firmware */
1307 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1308 snapshot, a6xx_snapshot_sqe, NULL);
1309
Lynus Vaza5922742017-03-14 18:50:54 +05301310 /* Mempool debug data */
1311 a6xx_snapshot_mempool(device, snapshot);
1312
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301313 /* Shader memory */
1314 a6xx_snapshot_shader(device, snapshot);
1315
Shrenuj Bansal41665402016-12-16 15:25:54 -08001316 /* MVC register section */
1317 a6xx_snapshot_mvc_regs(device, snapshot);
1318
Lynus Vaz461e2382017-01-16 19:35:41 +05301319 /* registers dumped through DBG AHB */
1320 a6xx_snapshot_dbgahb_regs(device, snapshot);
1321
Lynus Vaz20c81272017-02-10 16:22:12 +05301322 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001323
1324 /* GMU TCM data dumped through AHB */
1325 a6xx_snapshot_gmu(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001326}
1327
1328static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1329{
1330 int qwords = 0;
1331 unsigned int i, j, k;
1332 unsigned int count;
1333
1334 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1335 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1336
1337 cluster->offset0 = *offset;
1338 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1339
1340 if (j == 1)
1341 cluster->offset1 = *offset;
1342
1343 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1344 ptr[qwords++] =
1345 ((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
1346 (1 << 21) | 1;
1347
1348 for (k = 0; k < cluster->num_sets; k++) {
1349 count = REG_PAIR_COUNT(cluster->regs, k);
1350 ptr[qwords++] =
1351 a6xx_crashdump_registers.gpuaddr + *offset;
1352 ptr[qwords++] =
1353 (((uint64_t)cluster->regs[2 * k]) << 44) |
1354 count;
1355
1356 *offset += count * sizeof(unsigned int);
1357 }
1358 }
1359 }
1360
1361 return qwords;
1362}
1363
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301364static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1365 uint64_t *ptr, uint64_t *offset)
1366{
1367 int qwords = 0;
1368 unsigned int j;
1369
1370 /* Capture each bank in the block */
1371 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1372 /* Program the aperture */
1373 ptr[qwords++] =
1374 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1375 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1376 (1 << 21) | 1;
1377
1378 /* Read all the data in one chunk */
1379 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1380 ptr[qwords++] =
1381 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1382 block->sz;
1383
1384 /* Remember the offset of the first bank for easy access */
1385 if (j == 0)
1386 block->offset = *offset;
1387
1388 *offset += block->sz * sizeof(unsigned int);
1389 }
1390
1391 return qwords;
1392}
1393
Shrenuj Bansal41665402016-12-16 15:25:54 -08001394void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1395{
1396 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1397 unsigned int script_size = 0;
1398 unsigned int data_size = 0;
1399 unsigned int i, j, k;
1400 uint64_t *ptr;
1401 uint64_t offset = 0;
1402
1403 if (a6xx_capturescript.gpuaddr != 0 &&
1404 a6xx_crashdump_registers.gpuaddr != 0)
1405 return;
1406
1407 /*
1408 * We need to allocate two buffers:
1409 * 1 - the buffer to hold the draw script
1410 * 2 - the buffer to hold the data
1411 */
1412
1413 /*
1414 * To save the registers, we need 16 bytes per register pair for the
1415 * script and a dword for each register in the data
1416 */
1417 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1418 struct cdregs *regs = &_a6xx_cd_registers[i];
1419
1420 /* Each pair needs 16 bytes (2 qwords) */
1421 script_size += (regs->size / 2) * 16;
1422
1423 /* Each register needs a dword in the data */
1424 for (j = 0; j < regs->size / 2; j++)
1425 data_size += REG_PAIR_COUNT(regs->regs, j) *
1426 sizeof(unsigned int);
1427
1428 }
1429
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301430 /*
1431 * To save the shader blocks for each block in each type we need 32
1432 * bytes for the script (16 bytes to program the aperture and 16 to
1433 * read the data) and then a block specific number of bytes to hold
1434 * the data
1435 */
1436 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1437 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1438 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1439 A6XX_NUM_SHADER_BANKS;
1440 }
1441
Shrenuj Bansal41665402016-12-16 15:25:54 -08001442 /* Calculate the script and data size for MVC registers */
1443 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1444 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1445
1446 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1447
1448 /* 16 bytes for programming the aperture */
1449 script_size += 16;
1450
1451 /* Reading each pair of registers takes 16 bytes */
1452 script_size += 16 * cluster->num_sets;
1453
1454 /* A dword per register read from the cluster list */
1455 for (k = 0; k < cluster->num_sets; k++)
1456 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1457 sizeof(unsigned int);
1458 }
1459 }
1460
1461 /* Now allocate the script and data buffers */
1462
1463 /* The script buffers needs 2 extra qwords on the end */
1464 if (kgsl_allocate_global(device, &a6xx_capturescript,
1465 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1466 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1467 return;
1468
1469 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1470 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1471 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1472 return;
1473 }
1474
1475 /* Build the crash script */
1476
1477 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1478
1479 /* For the registers, program a read command for each pair */
1480 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1481 struct cdregs *regs = &_a6xx_cd_registers[i];
1482
1483 for (j = 0; j < regs->size / 2; j++) {
1484 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1485 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1486 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1487 offset += r * sizeof(unsigned int);
1488 }
1489 }
1490
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301491 /* Program each shader block */
1492 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1493 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1494 &offset);
1495 }
1496
Shrenuj Bansal41665402016-12-16 15:25:54 -08001497 /* Program the capturescript for the MVC regsiters */
1498 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1499
1500 *ptr++ = 0;
1501 *ptr++ = 0;
1502}