blob: f9965c7c8062f5d49a1278cd1d3405bf933bbe84 [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
24
25static const unsigned int a6xx_gras_cluster[] = {
26 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
27 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
28 0x8400, 0x840B,
29};
30
31static const unsigned int a6xx_ps_cluster[] = {
32 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
33 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
34 0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
35 0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
36 0x9218, 0x9236, 0x9300, 0x9306,
37};
38
39static const unsigned int a6xx_fe_cluster[] = {
40 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
41 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
42};
43
44static const unsigned int a6xx_pc_vs_cluster[] = {
45 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
46};
47
48static struct a6xx_cluster_registers {
49 unsigned int id;
50 const unsigned int *regs;
51 unsigned int num_sets;
52 unsigned int offset0;
53 unsigned int offset1;
54} a6xx_clusters[] = {
55 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
56 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
57 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
58 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
59 ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
60};
61
62struct a6xx_cluster_regs_info {
63 struct a6xx_cluster_registers *cluster;
64 unsigned int ctxt_id;
65};
66
Lynus Vaz461e2382017-01-16 19:35:41 +053067static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
68 0xB800, 0xB803, 0xB820, 0xB822,
69};
70
71static const unsigned int a6xx_sp_vs_sp_cluster[] = {
72 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
73 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
74};
75
76static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
77 0xBB10, 0xBB11, 0xBB20, 0xBB29,
78};
79
80static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
81 0xBD80, 0xBD80,
82};
83
84static const unsigned int a6xx_sp_duplicate_cluster[] = {
85 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
86};
87
88static const unsigned int a6xx_tp_duplicate_cluster[] = {
89 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
90};
91
92static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
93 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
94 0xB9C0, 0xB9C9,
95};
96
97static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
98 0xBD80, 0xBD80,
99};
100
101static const unsigned int a6xx_sp_ps_sp_cluster[] = {
102 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
103 0xAA00, 0xAA00, 0xAA30, 0xAA31,
104};
105
106static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
107 0xACC0, 0xACC0,
108};
109
110static const unsigned int a6xx_sp_ps_tp_cluster[] = {
111 0xB180, 0xB183, 0xB190, 0xB191,
112};
113
114static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
115 0xB4C0, 0xB4D1,
116};
117
118static struct a6xx_cluster_dbgahb_registers {
119 unsigned int id;
120 unsigned int regbase;
121 unsigned int statetype;
122 const unsigned int *regs;
123 unsigned int num_sets;
124} a6xx_dbgahb_ctx_clusters[] = {
125 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
126 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
127 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
128 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700129 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530130 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
131 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
132 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700133 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530134 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700135 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530136 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700137 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530138 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700139 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530140 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700141 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530142 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700143 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530144 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700145 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530146 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700147 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530148 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700149 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530150 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700151 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530152 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700153 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530154 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
155};
156
157struct a6xx_cluster_dbgahb_regs_info {
158 struct a6xx_cluster_dbgahb_registers *cluster;
159 unsigned int ctxt_id;
160};
161
162static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
163 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
164 0xBE20, 0xBE23,
165};
166
167static const unsigned int a6xx_sp_non_ctx_registers[] = {
168 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
169 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
170};
171
172static const unsigned int a6xx_tp_non_ctx_registers[] = {
173 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
174};
175
176static struct a6xx_non_ctx_dbgahb_registers {
177 unsigned int regbase;
178 unsigned int statetype;
179 const unsigned int *regs;
180 unsigned int num_sets;
181} a6xx_non_ctx_dbgahb[] = {
182 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
183 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
184 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
185 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
186 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
187 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
188};
189
Shrenuj Bansal41665402016-12-16 15:25:54 -0800190static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
191 /* VBIF */
192 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
193 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
194 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
195 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
196 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
197 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
198 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
199 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
200 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
201 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
202 0x3410, 0x3410, 0x3800, 0x3801,
203};
204
Kyle Piefer60733aa2017-03-21 11:24:01 -0700205static const unsigned int a6xx_gmu_registers[] = {
206 /* GMU */
207 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
208};
209
Shrenuj Bansal41665402016-12-16 15:25:54 -0800210static const struct adreno_vbif_snapshot_registers
211a6xx_vbif_snapshot_registers[] = {
212 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
213 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
214};
215
216/*
217 * Set of registers to dump for A6XX on snapshot.
218 * Registers in pairs - first value is the start offset, second
219 * is the stop offset (inclusive)
220 */
221
222static const unsigned int a6xx_registers[] = {
223 /* RBBM */
224 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0014, 0x0014,
225 0x0018, 0x001B, 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042,
226 0x0044, 0x0044, 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE,
227 0x00B0, 0x00FB, 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213,
228 0x0218, 0x023D, 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B,
229 0x050E, 0x0511, 0x0533, 0x0533, 0x0540, 0x0555,
230 /* CP */
231 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
232 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
233 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
234 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
235 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
236 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
237 /* VSC */
238 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
239 /* UCHE */
240 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
241 0x0E38, 0x0E39,
242 /* GRAS */
243 0x8600, 0x8601, 0x8604, 0x8605, 0x8610, 0x861B, 0x8620, 0x8620,
244 0x8628, 0x862B, 0x8630, 0x8637,
245 /* RB */
246 0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
247 0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
248 0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
249 /* VPC */
250 0x9600, 0x9604, 0x9624, 0x9637,
251 /* PC */
252 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
253 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
254 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
255 /* VFD */
256 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
257 0xA630, 0xA630, 0xD200, 0xD263,
258};
259
Lynus Vaz20c81272017-02-10 16:22:12 +0530260enum a6xx_debugbus_id {
261 A6XX_DBGBUS_CP = 0x1,
262 A6XX_DBGBUS_RBBM = 0x2,
263 A6XX_DBGBUS_VBIF = 0x3,
264 A6XX_DBGBUS_HLSQ = 0x4,
265 A6XX_DBGBUS_UCHE = 0x5,
266 A6XX_DBGBUS_DPM = 0x6,
267 A6XX_DBGBUS_TESS = 0x7,
268 A6XX_DBGBUS_PC = 0x8,
269 A6XX_DBGBUS_VFDP = 0x9,
270 A6XX_DBGBUS_VPC = 0xa,
271 A6XX_DBGBUS_TSE = 0xb,
272 A6XX_DBGBUS_RAS = 0xc,
273 A6XX_DBGBUS_VSC = 0xd,
274 A6XX_DBGBUS_COM = 0xe,
275 A6XX_DBGBUS_LRZ = 0x10,
276 A6XX_DBGBUS_A2D = 0x11,
277 A6XX_DBGBUS_CCUFCHE = 0x12,
278 A6XX_DBGBUS_GMU = 0x13,
279 A6XX_DBGBUS_RBP = 0x14,
280 A6XX_DBGBUS_DCS = 0x15,
281 A6XX_DBGBUS_RBBM_CFG = 0x16,
282 A6XX_DBGBUS_CX = 0x17,
283 A6XX_DBGBUS_TPFCHE = 0x19,
284 A6XX_DBGBUS_GPC = 0x1d,
285 A6XX_DBGBUS_LARC = 0x1e,
286 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
287 A6XX_DBGBUS_RB_0 = 0x20,
288 A6XX_DBGBUS_RB_1 = 0x21,
289 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
290 A6XX_DBGBUS_CCU_0 = 0x28,
291 A6XX_DBGBUS_CCU_1 = 0x29,
292 A6XX_DBGBUS_VFD_0 = 0x38,
293 A6XX_DBGBUS_VFD_1 = 0x39,
294 A6XX_DBGBUS_VFD_2 = 0x3a,
295 A6XX_DBGBUS_VFD_3 = 0x3b,
296 A6XX_DBGBUS_SP_0 = 0x40,
297 A6XX_DBGBUS_SP_1 = 0x41,
298 A6XX_DBGBUS_TPL1_0 = 0x48,
299 A6XX_DBGBUS_TPL1_1 = 0x49,
300 A6XX_DBGBUS_TPL1_2 = 0x4a,
301 A6XX_DBGBUS_TPL1_3 = 0x4b,
302};
303
304static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
305 { A6XX_DBGBUS_CP, 0x100, },
306 { A6XX_DBGBUS_RBBM, 0x100, },
307 { A6XX_DBGBUS_HLSQ, 0x100, },
308 { A6XX_DBGBUS_UCHE, 0x100, },
309 { A6XX_DBGBUS_DPM, 0x100, },
310 { A6XX_DBGBUS_TESS, 0x100, },
311 { A6XX_DBGBUS_PC, 0x100, },
312 { A6XX_DBGBUS_VFDP, 0x100, },
313 { A6XX_DBGBUS_VPC, 0x100, },
314 { A6XX_DBGBUS_TSE, 0x100, },
315 { A6XX_DBGBUS_RAS, 0x100, },
316 { A6XX_DBGBUS_VSC, 0x100, },
317 { A6XX_DBGBUS_COM, 0x100, },
318 { A6XX_DBGBUS_LRZ, 0x100, },
319 { A6XX_DBGBUS_A2D, 0x100, },
320 { A6XX_DBGBUS_CCUFCHE, 0x100, },
321 { A6XX_DBGBUS_RBP, 0x100, },
322 { A6XX_DBGBUS_DCS, 0x100, },
323 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
324 { A6XX_DBGBUS_TPFCHE, 0x100, },
325 { A6XX_DBGBUS_GPC, 0x100, },
326 { A6XX_DBGBUS_LARC, 0x100, },
327 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
328 { A6XX_DBGBUS_RB_0, 0x100, },
329 { A6XX_DBGBUS_RB_1, 0x100, },
330 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
331 { A6XX_DBGBUS_CCU_0, 0x100, },
332 { A6XX_DBGBUS_CCU_1, 0x100, },
333 { A6XX_DBGBUS_VFD_0, 0x100, },
334 { A6XX_DBGBUS_VFD_1, 0x100, },
335 { A6XX_DBGBUS_VFD_2, 0x100, },
336 { A6XX_DBGBUS_VFD_3, 0x100, },
337 { A6XX_DBGBUS_SP_0, 0x100, },
338 { A6XX_DBGBUS_SP_1, 0x100, },
339 { A6XX_DBGBUS_TPL1_0, 0x100, },
340 { A6XX_DBGBUS_TPL1_1, 0x100, },
341 { A6XX_DBGBUS_TPL1_2, 0x100, },
342 { A6XX_DBGBUS_TPL1_3, 0x100, },
343};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800344
Lynus Vazff24c972017-03-07 19:27:46 +0530345static void __iomem *a6xx_cx_dbgc;
346static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
347 { A6XX_DBGBUS_VBIF, 0x100, },
348 { A6XX_DBGBUS_GMU, 0x100, },
349 { A6XX_DBGBUS_CX, 0x100, },
350};
351
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530352#define A6XX_NUM_SHADER_BANKS 3
353#define A6XX_SHADER_STATETYPE_SHIFT 8
354
355enum a6xx_shader_obj {
356 A6XX_TP0_TMO_DATA = 0x9,
357 A6XX_TP0_SMO_DATA = 0xa,
358 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
359 A6XX_TP1_TMO_DATA = 0x19,
360 A6XX_TP1_SMO_DATA = 0x1a,
361 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
362 A6XX_SP_INST_DATA = 0x29,
363 A6XX_SP_LB_0_DATA = 0x2a,
364 A6XX_SP_LB_1_DATA = 0x2b,
365 A6XX_SP_LB_2_DATA = 0x2c,
366 A6XX_SP_LB_3_DATA = 0x2d,
367 A6XX_SP_LB_4_DATA = 0x2e,
368 A6XX_SP_LB_5_DATA = 0x2f,
369 A6XX_SP_CB_BINDLESS_DATA = 0x30,
370 A6XX_SP_CB_LEGACY_DATA = 0x31,
371 A6XX_SP_UAV_DATA = 0x32,
372 A6XX_SP_INST_TAG = 0x33,
373 A6XX_SP_CB_BINDLESS_TAG = 0x34,
374 A6XX_SP_TMO_UMO_TAG = 0x35,
375 A6XX_SP_SMO_TAG = 0x36,
376 A6XX_SP_STATE_DATA = 0x37,
377 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
378 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
379 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
380 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
381 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
382 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
383 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
384 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
385 A6XX_HLSQ_INST_RAM = 0x52,
386 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
387 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
388 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
389 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
390 A6XX_HLSQ_INST_RAM_TAG = 0x57,
391 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
392 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
393 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
394 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
395 A6XX_HLSQ_DATAPATH_META = 0x60,
396 A6XX_HLSQ_FRONTEND_META = 0x61,
397 A6XX_HLSQ_INDIRECT_META = 0x62,
398 A6XX_HLSQ_BACKEND_META = 0x63
399};
400
401struct a6xx_shader_block {
402 unsigned int statetype;
403 unsigned int sz;
404 uint64_t offset;
405};
406
407struct a6xx_shader_block_info {
408 struct a6xx_shader_block *block;
409 unsigned int bank;
410 uint64_t offset;
411};
412
413static struct a6xx_shader_block a6xx_shader_blocks[] = {
414 {A6XX_TP0_TMO_DATA, 0x200},
415 {A6XX_TP0_SMO_DATA, 0x80,},
416 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
417 {A6XX_TP1_TMO_DATA, 0x200},
418 {A6XX_TP1_SMO_DATA, 0x80,},
419 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
420 {A6XX_SP_INST_DATA, 0x800},
421 {A6XX_SP_LB_0_DATA, 0x800},
422 {A6XX_SP_LB_1_DATA, 0x800},
423 {A6XX_SP_LB_2_DATA, 0x800},
424 {A6XX_SP_LB_3_DATA, 0x800},
425 {A6XX_SP_LB_4_DATA, 0x800},
426 {A6XX_SP_LB_5_DATA, 0x200},
427 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
428 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
429 {A6XX_SP_UAV_DATA, 0x80,},
430 {A6XX_SP_INST_TAG, 0x80,},
431 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
432 {A6XX_SP_TMO_UMO_TAG, 0x80,},
433 {A6XX_SP_SMO_TAG, 0x80},
434 {A6XX_SP_STATE_DATA, 0x3F},
435 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
436 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
437 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
438 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
439 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
440 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
441 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
442 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
443 {A6XX_HLSQ_INST_RAM, 0x800},
444 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
445 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
446 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
447 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
448 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
449 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
450 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
451 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
452 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
453 {A6XX_HLSQ_DATAPATH_META, 0x40,},
454 {A6XX_HLSQ_FRONTEND_META, 0x40},
455 {A6XX_HLSQ_INDIRECT_META, 0x40,}
456};
457
Shrenuj Bansal41665402016-12-16 15:25:54 -0800458static struct kgsl_memdesc a6xx_capturescript;
459static struct kgsl_memdesc a6xx_crashdump_registers;
460static bool crash_dump_valid;
461
462static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
463 u8 *buf, size_t remain)
464{
465 struct kgsl_snapshot_registers regs = {
466 .regs = a6xx_registers,
467 .count = ARRAY_SIZE(a6xx_registers) / 2,
468 };
469
470 return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
471}
472
473static struct cdregs {
474 const unsigned int *regs;
475 unsigned int size;
476} _a6xx_cd_registers[] = {
477 { a6xx_registers, ARRAY_SIZE(a6xx_registers) },
478};
479
480#define REG_PAIR_COUNT(_a, _i) \
481 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
482
483static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
484 size_t remain, void *priv)
485{
486 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
487 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
488 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
489 unsigned int i, j, k;
490 unsigned int count = 0;
491
492 if (crash_dump_valid == false)
493 return a6xx_legacy_snapshot_registers(device, buf, remain);
494
495 if (remain < sizeof(*header)) {
496 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
497 return 0;
498 }
499
500 remain -= sizeof(*header);
501
502 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
503 struct cdregs *regs = &_a6xx_cd_registers[i];
504
505 for (j = 0; j < regs->size / 2; j++) {
506 unsigned int start = regs->regs[2 * j];
507 unsigned int end = regs->regs[(2 * j) + 1];
508
509 if (remain < ((end - start) + 1) * 8) {
510 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
511 goto out;
512 }
513
514 remain -= ((end - start) + 1) * 8;
515
516 for (k = start; k <= end; k++, count++) {
517 *data++ = k;
518 *data++ = *src++;
519 }
520 }
521 }
522
523out:
524 header->count = count;
525
526 /* Return the size of the section */
527 return (count * 8) + sizeof(*header);
528}
529
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530530static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
531 u8 *buf, size_t remain, void *priv)
532{
533 struct kgsl_snapshot_shader *header =
534 (struct kgsl_snapshot_shader *) buf;
535 struct a6xx_shader_block_info *info =
536 (struct a6xx_shader_block_info *) priv;
537 struct a6xx_shader_block *block = info->block;
538 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
539
540 if (remain < SHADER_SECTION_SZ(block->sz)) {
541 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
542 return 0;
543 }
544
545 header->type = block->statetype;
546 header->index = info->bank;
547 header->size = block->sz;
548
549 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
550 block->sz);
551
552 return SHADER_SECTION_SZ(block->sz);
553}
554
555static void a6xx_snapshot_shader(struct kgsl_device *device,
556 struct kgsl_snapshot *snapshot)
557{
558 unsigned int i, j;
559 struct a6xx_shader_block_info info;
560
561 /* Shader blocks can only be read by the crash dumper */
562 if (crash_dump_valid == false)
563 return;
564
565 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
566 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
567 info.block = &a6xx_shader_blocks[i];
568 info.bank = j;
569 info.offset = a6xx_shader_blocks[i].offset +
570 (j * a6xx_shader_blocks[i].sz);
571
572 /* Shader working/shadow memory */
573 kgsl_snapshot_add_section(device,
574 KGSL_SNAPSHOT_SECTION_SHADER,
575 snapshot, a6xx_snapshot_shader_memory, &info);
576 }
577 }
578}
579
Lynus Vaza5922742017-03-14 18:50:54 +0530580static void a6xx_snapshot_mempool(struct kgsl_device *device,
581 struct kgsl_snapshot *snapshot)
582{
583 unsigned int pool_size;
584
585 /* Save the mempool size to 0 to stabilize it while dumping */
586 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
587 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
588
589 kgsl_snapshot_indexed_registers(device, snapshot,
590 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
591 0, 0x2060);
592
593 /* Restore the saved mempool size */
594 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
595}
596
Lynus Vaz461e2382017-01-16 19:35:41 +0530597static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
598 unsigned int regbase, unsigned int reg)
599{
600 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
601 reg - regbase / 4;
602 unsigned int val;
603
604 kgsl_regread(device, read_reg, &val);
605 return val;
606}
607
608static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
609 size_t remain, void *priv)
610{
611 struct kgsl_snapshot_mvc_regs *header =
612 (struct kgsl_snapshot_mvc_regs *)buf;
613 struct a6xx_cluster_dbgahb_regs_info *info =
614 (struct a6xx_cluster_dbgahb_regs_info *)priv;
615 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
616 unsigned int read_sel;
617 unsigned int data_size = 0;
618 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
619 int i, j;
620
621 if (remain < sizeof(*header)) {
622 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
623 return 0;
624 }
625
626 remain -= sizeof(*header);
627
628 header->ctxt_id = info->ctxt_id;
629 header->cluster_id = cur_cluster->id;
630
631 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
632 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
633
634 for (i = 0; i < cur_cluster->num_sets; i++) {
635 unsigned int start = cur_cluster->regs[2 * i];
636 unsigned int end = cur_cluster->regs[2 * i + 1];
637
638 if (remain < (end - start + 3) * 4) {
639 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
640 goto out;
641 }
642
643 remain -= (end - start + 3) * 4;
644 data_size += (end - start + 3) * 4;
645
646 *data++ = start | (1 << 31);
647 *data++ = end;
648
649 for (j = start; j <= end; j++) {
650 unsigned int val;
651
652 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
653 *data++ = val;
654
655 }
656 }
657
658out:
659 return data_size + sizeof(*header);
660}
661
662static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
663 size_t remain, void *priv)
664{
665 struct kgsl_snapshot_regs *header =
666 (struct kgsl_snapshot_regs *)buf;
667 struct a6xx_non_ctx_dbgahb_registers *regs =
668 (struct a6xx_non_ctx_dbgahb_registers *)priv;
669 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
670 int count = 0;
671 unsigned int read_sel;
672 int i, j;
673
674 /* Figure out how many registers we are going to dump */
675 for (i = 0; i < regs->num_sets; i++) {
676 int start = regs->regs[i * 2];
677 int end = regs->regs[i * 2 + 1];
678
679 count += (end - start + 1);
680 }
681
682 if (remain < (count * 8) + sizeof(*header)) {
683 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
684 return 0;
685 }
686
687 header->count = count;
688
689 read_sel = (regs->statetype & 0xff) << 8;
690 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
691
692 for (i = 0; i < regs->num_sets; i++) {
693 unsigned int start = regs->regs[2 * i];
694 unsigned int end = regs->regs[2 * i + 1];
695
696 for (j = start; j <= end; j++) {
697 unsigned int val;
698
699 val = a6xx_read_dbgahb(device, regs->regbase, j);
700 *data++ = j;
701 *data++ = val;
702
703 }
704 }
705 return (count * 8) + sizeof(*header);
706}
707
708static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
709 struct kgsl_snapshot *snapshot)
710{
711 int i, j;
712
713 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
714 struct a6xx_cluster_dbgahb_registers *cluster =
715 &a6xx_dbgahb_ctx_clusters[i];
716 struct a6xx_cluster_dbgahb_regs_info info;
717
718 info.cluster = cluster;
719 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
720 info.ctxt_id = j;
721
722 kgsl_snapshot_add_section(device,
723 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
724 a6xx_snapshot_cluster_dbgahb, &info);
725 }
726 }
727
728 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
729 kgsl_snapshot_add_section(device,
730 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
731 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
732 }
733}
734
Shrenuj Bansal41665402016-12-16 15:25:54 -0800735static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
736 size_t remain, void *priv)
737{
738 struct kgsl_snapshot_mvc_regs *header =
739 (struct kgsl_snapshot_mvc_regs *)buf;
740 struct a6xx_cluster_regs_info *info =
741 (struct a6xx_cluster_regs_info *)priv;
742 struct a6xx_cluster_registers *cur_cluster = info->cluster;
743 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
744 unsigned int ctxt = info->ctxt_id;
745 unsigned int start, end, i, j, aperture_cntl = 0;
746 unsigned int data_size = 0;
747
748 if (remain < sizeof(*header)) {
749 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
750 return 0;
751 }
752
753 remain -= sizeof(*header);
754
755 header->ctxt_id = info->ctxt_id;
756 header->cluster_id = cur_cluster->id;
757
758 /*
759 * Set the AHB control for the Host to read from the
760 * cluster/context for this iteration.
761 */
762 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
763 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
764
765 for (i = 0; i < cur_cluster->num_sets; i++) {
766 start = cur_cluster->regs[2 * i];
767 end = cur_cluster->regs[2 * i + 1];
768
769 if (remain < (end - start + 3) * 4) {
770 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
771 goto out;
772 }
773
774 remain -= (end - start + 3) * 4;
775 data_size += (end - start + 3) * 4;
776
777 *data++ = start | (1 << 31);
778 *data++ = end;
779 for (j = start; j <= end; j++) {
780 unsigned int val;
781
782 kgsl_regread(device, j, &val);
783 *data++ = val;
784 }
785 }
786out:
787 return data_size + sizeof(*header);
788}
789
790static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
791 size_t remain, void *priv)
792{
793 struct kgsl_snapshot_mvc_regs *header =
794 (struct kgsl_snapshot_mvc_regs *)buf;
795 struct a6xx_cluster_regs_info *info =
796 (struct a6xx_cluster_regs_info *)priv;
797 struct a6xx_cluster_registers *cluster = info->cluster;
798 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
799 unsigned int *src;
800 int i, j;
801 unsigned int start, end;
802 size_t data_size = 0;
803
804 if (crash_dump_valid == false)
805 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
806
807 if (remain < sizeof(*header)) {
808 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
809 return 0;
810 }
811
812 remain -= sizeof(*header);
813
814 header->ctxt_id = info->ctxt_id;
815 header->cluster_id = cluster->id;
816
817 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
818 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
819
820 for (i = 0; i < cluster->num_sets; i++) {
821 start = cluster->regs[2 * i];
822 end = cluster->regs[2 * i + 1];
823
824 if (remain < (end - start + 3) * 4) {
825 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
826 goto out;
827 }
828
829 remain -= (end - start + 3) * 4;
830 data_size += (end - start + 3) * 4;
831
832 *data++ = start | (1 << 31);
833 *data++ = end;
834 for (j = start; j <= end; j++)
835 *data++ = *src++;
836 }
837
838out:
839 return data_size + sizeof(*header);
840
841}
842
843static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
844 struct kgsl_snapshot *snapshot)
845{
846 int i, j;
847 struct a6xx_cluster_regs_info info;
848
849 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
850 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
851
852 info.cluster = cluster;
853 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
854 info.ctxt_id = j;
855
856 kgsl_snapshot_add_section(device,
857 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
858 a6xx_snapshot_mvc, &info);
859 }
860 }
861}
862
Lynus Vaz20c81272017-02-10 16:22:12 +0530863/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
864static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
865 unsigned int block_id, unsigned int index, unsigned int *val)
866{
867 unsigned int reg;
868
869 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
870 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
871
872 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
873 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
874 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
875 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
876
877 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
878 val++;
879 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
880}
881
882/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
883static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
884 u8 *buf, size_t remain, void *priv)
885{
886 struct kgsl_snapshot_debugbus *header =
887 (struct kgsl_snapshot_debugbus *)buf;
888 struct adreno_debugbus_block *block = priv;
889 int i;
890 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
891 unsigned int dwords;
892 size_t size;
893
894 dwords = block->dwords;
895
896 /* For a6xx each debug bus data unit is 2 DWORDS */
897 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
898
899 if (remain < size) {
900 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
901 return 0;
902 }
903
904 header->id = block->block_id;
905 header->count = dwords * 2;
906
907 for (i = 0; i < dwords; i++)
908 a6xx_dbgc_debug_bus_read(device, block->block_id, i,
909 &data[i*2]);
910
911 return size;
912}
913
Lynus Vazff24c972017-03-07 19:27:46 +0530914static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
915{
916 void __iomem *reg;
917
918 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
919 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
920 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
921 return;
922
923 reg = a6xx_cx_dbgc +
924 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
925 *value = __raw_readl(reg);
926
927 /*
928 * ensure this read finishes before the next one.
929 * i.e. act like normal readl()
930 */
931 rmb();
932}
933
934static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
935{
936 void __iomem *reg;
937
938 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
939 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
940 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
941 return;
942
943 reg = a6xx_cx_dbgc +
944 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
945
946 /*
947 * ensure previous writes post before this one,
948 * i.e. act like normal writel()
949 */
950 wmb();
951 __raw_writel(value, reg);
952}
953
954/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
955static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
956 unsigned int block_id, unsigned int index, unsigned int *val)
957{
958 unsigned int reg;
959
960 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
961 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
962
963 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
964 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
965 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
966 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
967
968 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
969 val++;
970 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
971}
972
973/*
974 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
975 * block from the CX DBGC block
976 */
977static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
978 u8 *buf, size_t remain, void *priv)
979{
980 struct kgsl_snapshot_debugbus *header =
981 (struct kgsl_snapshot_debugbus *)buf;
982 struct adreno_debugbus_block *block = priv;
983 int i;
984 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
985 unsigned int dwords;
986 size_t size;
987
988 dwords = block->dwords;
989
990 /* For a6xx each debug bus data unit is 2 DWRODS */
991 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
992
993 if (remain < size) {
994 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
995 return 0;
996 }
997
998 header->id = block->block_id;
999 header->count = dwords * 2;
1000
1001 for (i = 0; i < dwords; i++)
1002 a6xx_cx_debug_bus_read(device, block->block_id, i,
1003 &data[i*2]);
1004
1005 return size;
1006}
1007
Lynus Vaz20c81272017-02-10 16:22:12 +05301008/* a6xx_snapshot_debugbus() - Capture debug bus data */
1009static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1010 struct kgsl_snapshot *snapshot)
1011{
1012 int i;
1013
1014 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1015 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
1016 (0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1017 (0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
1018
1019 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1020 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1021
1022 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1023 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1024 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1025 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1026
1027 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1028 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1029 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1030 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1031 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1032 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1033 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1034 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1035 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1036 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1037 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1038 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1039 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1040 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1041 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1042 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1043 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1044 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1045
1046 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1047 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1048 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1049 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1050
Lynus Vazff24c972017-03-07 19:27:46 +05301051 a6xx_cx_dbgc = ioremap(device->reg_phys +
1052 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1053 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1054 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1055
1056 if (a6xx_cx_dbgc) {
1057 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1058 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
1059 (0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1060 (0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
1061
1062 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1063 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1064
1065 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1066 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1067 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1068 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1069
1070 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1071 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1072 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1073 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1074 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1075 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1076 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1077 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1078 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1079 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1080 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1081 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1082 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1083 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1084 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1085 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1086 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1087 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1088
1089 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1090 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1091 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1092 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1093 } else
1094 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1095
Lynus Vaz20c81272017-02-10 16:22:12 +05301096 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1097 kgsl_snapshot_add_section(device,
1098 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1099 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1100 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1101 }
Lynus Vazff24c972017-03-07 19:27:46 +05301102
1103 if (a6xx_cx_dbgc) {
1104 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1105 kgsl_snapshot_add_section(device,
1106 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1107 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1108 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1109 }
1110 iounmap(a6xx_cx_dbgc);
1111 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301112}
1113
Kyle Piefer60733aa2017-03-21 11:24:01 -07001114static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
1115 u8 *buf, size_t remain, void *priv)
1116{
1117 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
1118 struct kgsl_snapshot_registers *regs = priv;
1119 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1120 int count = 0, j, k;
1121
1122 /* Figure out how many registers we are going to dump */
1123 for (j = 0; j < regs->count; j++) {
1124 int start = regs->regs[j * 2];
1125 int end = regs->regs[j * 2 + 1];
1126
1127 count += (end - start + 1);
1128 }
1129
1130 if (remain < (count * 8) + sizeof(*header)) {
1131 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
1132 return 0;
1133 }
1134
1135 for (j = 0; j < regs->count; j++) {
1136 unsigned int start = regs->regs[j * 2];
1137 unsigned int end = regs->regs[j * 2 + 1];
1138
1139 for (k = start; k <= end; k++) {
1140 unsigned int val;
1141
1142 kgsl_gmu_regread(device, k, &val);
1143 *data++ = k;
1144 *data++ = val;
1145 }
1146 }
1147
1148 header->count = count;
1149
1150 /* Return the size of the section */
1151 return (count * 8) + sizeof(*header);
1152}
1153
1154static void a6xx_snapshot_gmu(struct kgsl_device *device,
1155 struct kgsl_snapshot *snapshot)
1156{
1157 struct kgsl_snapshot_registers gmu_regs = {
1158 .regs = a6xx_gmu_registers,
1159 .count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
1160 };
1161
1162 if (!kgsl_gmu_isenabled(device))
1163 return;
1164
1165 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1166 snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
1167}
1168
Shrenuj Bansal41665402016-12-16 15:25:54 -08001169static void _a6xx_do_crashdump(struct kgsl_device *device)
1170{
1171 unsigned long wait_time;
1172 unsigned int reg = 0;
1173 unsigned int val;
1174
1175 crash_dump_valid = false;
1176
1177 if (a6xx_capturescript.gpuaddr == 0 ||
1178 a6xx_crashdump_registers.gpuaddr == 0)
1179 return;
1180
1181 /* IF the SMMU is stalled we cannot do a crash dump */
1182 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1183 if (val & BIT(24))
1184 return;
1185
1186 /* Turn on APRIV so we can access the buffers */
1187 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1188
1189 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1190 lower_32_bits(a6xx_capturescript.gpuaddr));
1191 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1192 upper_32_bits(a6xx_capturescript.gpuaddr));
1193 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1194
1195 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1196 while (!time_after(jiffies, wait_time)) {
1197 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1198 if (reg & 0x2)
1199 break;
1200 cpu_relax();
1201 }
1202
1203 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1204
1205 if (!(reg & 0x2)) {
1206 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1207 return;
1208 }
1209
1210 crash_dump_valid = true;
1211}
1212
1213/*
1214 * a6xx_snapshot() - A6XX GPU snapshot function
1215 * @adreno_dev: Device being snapshotted
1216 * @snapshot: Pointer to the snapshot instance
1217 *
1218 * This is where all of the A6XX specific bits and pieces are grabbed
1219 * into the snapshot memory
1220 */
1221void a6xx_snapshot(struct adreno_device *adreno_dev,
1222 struct kgsl_snapshot *snapshot)
1223{
1224 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1225 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1226 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
1227
1228 /* Try to run the crash dumper */
1229 _a6xx_do_crashdump(device);
1230
1231 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1232 snapshot, a6xx_snapshot_registers, NULL);
1233
1234 adreno_snapshot_vbif_registers(device, snapshot,
1235 a6xx_vbif_snapshot_registers,
1236 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1237
1238 /* CP_SQE indexed registers */
1239 kgsl_snapshot_indexed_registers(device, snapshot,
1240 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1241 0, snap_data->sect_sizes->cp_pfp);
1242
1243 /* CP_DRAW_STATE */
1244 kgsl_snapshot_indexed_registers(device, snapshot,
1245 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1246 0, 0x100);
1247
1248 /* SQE_UCODE Cache */
1249 kgsl_snapshot_indexed_registers(device, snapshot,
1250 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1251 0, 0x6000);
1252
1253 /* CP ROQ */
1254 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1255 snapshot, adreno_snapshot_cp_roq,
1256 &snap_data->sect_sizes->roq);
1257
Lynus Vaza5922742017-03-14 18:50:54 +05301258 /* Mempool debug data */
1259 a6xx_snapshot_mempool(device, snapshot);
1260
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301261 /* Shader memory */
1262 a6xx_snapshot_shader(device, snapshot);
1263
Shrenuj Bansal41665402016-12-16 15:25:54 -08001264 /* MVC register section */
1265 a6xx_snapshot_mvc_regs(device, snapshot);
1266
Lynus Vaz461e2382017-01-16 19:35:41 +05301267 /* registers dumped through DBG AHB */
1268 a6xx_snapshot_dbgahb_regs(device, snapshot);
1269
Lynus Vaz20c81272017-02-10 16:22:12 +05301270 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001271
1272 /* GMU TCM data dumped through AHB */
1273 a6xx_snapshot_gmu(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001274}
1275
1276static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1277{
1278 int qwords = 0;
1279 unsigned int i, j, k;
1280 unsigned int count;
1281
1282 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1283 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1284
1285 cluster->offset0 = *offset;
1286 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1287
1288 if (j == 1)
1289 cluster->offset1 = *offset;
1290
1291 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1292 ptr[qwords++] =
1293 ((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
1294 (1 << 21) | 1;
1295
1296 for (k = 0; k < cluster->num_sets; k++) {
1297 count = REG_PAIR_COUNT(cluster->regs, k);
1298 ptr[qwords++] =
1299 a6xx_crashdump_registers.gpuaddr + *offset;
1300 ptr[qwords++] =
1301 (((uint64_t)cluster->regs[2 * k]) << 44) |
1302 count;
1303
1304 *offset += count * sizeof(unsigned int);
1305 }
1306 }
1307 }
1308
1309 return qwords;
1310}
1311
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301312static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1313 uint64_t *ptr, uint64_t *offset)
1314{
1315 int qwords = 0;
1316 unsigned int j;
1317
1318 /* Capture each bank in the block */
1319 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1320 /* Program the aperture */
1321 ptr[qwords++] =
1322 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1323 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1324 (1 << 21) | 1;
1325
1326 /* Read all the data in one chunk */
1327 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1328 ptr[qwords++] =
1329 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1330 block->sz;
1331
1332 /* Remember the offset of the first bank for easy access */
1333 if (j == 0)
1334 block->offset = *offset;
1335
1336 *offset += block->sz * sizeof(unsigned int);
1337 }
1338
1339 return qwords;
1340}
1341
Shrenuj Bansal41665402016-12-16 15:25:54 -08001342void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1343{
1344 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1345 unsigned int script_size = 0;
1346 unsigned int data_size = 0;
1347 unsigned int i, j, k;
1348 uint64_t *ptr;
1349 uint64_t offset = 0;
1350
1351 if (a6xx_capturescript.gpuaddr != 0 &&
1352 a6xx_crashdump_registers.gpuaddr != 0)
1353 return;
1354
1355 /*
1356 * We need to allocate two buffers:
1357 * 1 - the buffer to hold the draw script
1358 * 2 - the buffer to hold the data
1359 */
1360
1361 /*
1362 * To save the registers, we need 16 bytes per register pair for the
1363 * script and a dword for each register in the data
1364 */
1365 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1366 struct cdregs *regs = &_a6xx_cd_registers[i];
1367
1368 /* Each pair needs 16 bytes (2 qwords) */
1369 script_size += (regs->size / 2) * 16;
1370
1371 /* Each register needs a dword in the data */
1372 for (j = 0; j < regs->size / 2; j++)
1373 data_size += REG_PAIR_COUNT(regs->regs, j) *
1374 sizeof(unsigned int);
1375
1376 }
1377
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301378 /*
1379 * To save the shader blocks for each block in each type we need 32
1380 * bytes for the script (16 bytes to program the aperture and 16 to
1381 * read the data) and then a block specific number of bytes to hold
1382 * the data
1383 */
1384 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1385 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1386 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1387 A6XX_NUM_SHADER_BANKS;
1388 }
1389
Shrenuj Bansal41665402016-12-16 15:25:54 -08001390 /* Calculate the script and data size for MVC registers */
1391 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1392 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1393
1394 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1395
1396 /* 16 bytes for programming the aperture */
1397 script_size += 16;
1398
1399 /* Reading each pair of registers takes 16 bytes */
1400 script_size += 16 * cluster->num_sets;
1401
1402 /* A dword per register read from the cluster list */
1403 for (k = 0; k < cluster->num_sets; k++)
1404 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1405 sizeof(unsigned int);
1406 }
1407 }
1408
1409 /* Now allocate the script and data buffers */
1410
1411 /* The script buffers needs 2 extra qwords on the end */
1412 if (kgsl_allocate_global(device, &a6xx_capturescript,
1413 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1414 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1415 return;
1416
1417 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1418 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1419 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1420 return;
1421 }
1422
1423 /* Build the crash script */
1424
1425 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1426
1427 /* For the registers, program a read command for each pair */
1428 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1429 struct cdregs *regs = &_a6xx_cd_registers[i];
1430
1431 for (j = 0; j < regs->size / 2; j++) {
1432 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1433 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1434 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1435 offset += r * sizeof(unsigned int);
1436 }
1437 }
1438
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301439 /* Program each shader block */
1440 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1441 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1442 &offset);
1443 }
1444
Shrenuj Bansal41665402016-12-16 15:25:54 -08001445 /* Program the capturescript for the MVC regsiters */
1446 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1447
1448 *ptr++ = 0;
1449 *ptr++ = 0;
1450}