blob: 589417fe9e983689ab33c4beb0a2d60e50d9786e [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
Lynus Vazdaac540732017-07-27 14:23:35 +053024#define A6XX_NUM_AXI_ARB_BLOCKS 2
25#define A6XX_NUM_XIN_AXI_BLOCKS 5
26#define A6XX_NUM_XIN_CORE_BLOCKS 4
Shrenuj Bansal41665402016-12-16 15:25:54 -080027
28static const unsigned int a6xx_gras_cluster[] = {
29 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
30 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
31 0x8400, 0x840B,
32};
33
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060034static const unsigned int a6xx_ps_cluster_rac[] = {
Shrenuj Bansal41665402016-12-16 15:25:54 -080035 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
36 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060037 0x88C0, 0x88C1, 0x88D0, 0x88E3, 0x8900, 0x890C, 0x890F, 0x891A,
38 0x8C00, 0x8C01, 0x8C08, 0x8C10, 0x8C17, 0x8C1F, 0x8C26, 0x8C33,
39};
40
41static const unsigned int a6xx_ps_cluster_rbp[] = {
42 0x88F0, 0x88F3, 0x890D, 0x890E, 0x8927, 0x8928, 0x8BF0, 0x8BF1,
43 0x8C02, 0x8C07, 0x8C11, 0x8C16, 0x8C20, 0x8C25,
44};
45
46static const unsigned int a6xx_ps_cluster[] = {
47 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
Shrenuj Bansal41665402016-12-16 15:25:54 -080048};
49
50static const unsigned int a6xx_fe_cluster[] = {
51 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
52 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
53};
54
55static const unsigned int a6xx_pc_vs_cluster[] = {
56 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
57};
58
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060059static const struct sel_reg {
60 unsigned int host_reg;
61 unsigned int cd_reg;
62 unsigned int val;
63} _a6xx_rb_rac_aperture = {
64 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
65 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
66 .val = 0x0,
67},
68_a6xx_rb_rbp_aperture = {
69 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
70 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
71 .val = 0x9,
72};
73
Shrenuj Bansal41665402016-12-16 15:25:54 -080074static struct a6xx_cluster_registers {
75 unsigned int id;
76 const unsigned int *regs;
77 unsigned int num_sets;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060078 const struct sel_reg *sel;
Shrenuj Bansal41665402016-12-16 15:25:54 -080079 unsigned int offset0;
80 unsigned int offset1;
81} a6xx_clusters[] = {
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060082 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2,
83 NULL },
84 { CP_CLUSTER_PS, a6xx_ps_cluster_rac, ARRAY_SIZE(a6xx_ps_cluster_rac)/2,
85 &_a6xx_rb_rac_aperture },
86 { CP_CLUSTER_PS, a6xx_ps_cluster_rbp, ARRAY_SIZE(a6xx_ps_cluster_rbp)/2,
87 &_a6xx_rb_rbp_aperture },
88 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2,
89 NULL },
90 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2,
91 NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080092 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060093 ARRAY_SIZE(a6xx_pc_vs_cluster)/2, NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080094};
95
96struct a6xx_cluster_regs_info {
97 struct a6xx_cluster_registers *cluster;
98 unsigned int ctxt_id;
99};
100
Lynus Vaz461e2382017-01-16 19:35:41 +0530101static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
102 0xB800, 0xB803, 0xB820, 0xB822,
103};
104
105static const unsigned int a6xx_sp_vs_sp_cluster[] = {
106 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
107 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
108};
109
110static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
111 0xBB10, 0xBB11, 0xBB20, 0xBB29,
112};
113
114static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
115 0xBD80, 0xBD80,
116};
117
118static const unsigned int a6xx_sp_duplicate_cluster[] = {
119 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
120};
121
122static const unsigned int a6xx_tp_duplicate_cluster[] = {
123 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
124};
125
126static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
127 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
128 0xB9C0, 0xB9C9,
129};
130
131static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
132 0xBD80, 0xBD80,
133};
134
135static const unsigned int a6xx_sp_ps_sp_cluster[] = {
136 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
137 0xAA00, 0xAA00, 0xAA30, 0xAA31,
138};
139
140static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
141 0xACC0, 0xACC0,
142};
143
144static const unsigned int a6xx_sp_ps_tp_cluster[] = {
145 0xB180, 0xB183, 0xB190, 0xB191,
146};
147
148static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
149 0xB4C0, 0xB4D1,
150};
151
152static struct a6xx_cluster_dbgahb_registers {
153 unsigned int id;
154 unsigned int regbase;
155 unsigned int statetype;
156 const unsigned int *regs;
157 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530158 unsigned int offset0;
159 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530160} a6xx_dbgahb_ctx_clusters[] = {
161 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
162 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
163 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
164 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700165 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530166 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
167 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
168 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700169 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530170 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700171 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530172 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700173 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530174 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700175 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530176 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700177 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530178 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700179 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530180 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700181 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530182 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700183 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530184 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700185 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530186 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700187 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530188 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700189 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530190 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
191};
192
193struct a6xx_cluster_dbgahb_regs_info {
194 struct a6xx_cluster_dbgahb_registers *cluster;
195 unsigned int ctxt_id;
196};
197
Harshdeep Dhattfbf71e62017-12-07 14:03:08 -0700198static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
199 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
200 0xBE20, 0xBE23,
201};
202
203static const unsigned int a6xx_sp_non_ctx_registers[] = {
204 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
205 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
206};
207
208static const unsigned int a6xx_tp_non_ctx_registers[] = {
209 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
210};
211
212static struct a6xx_non_ctx_dbgahb_registers {
213 unsigned int regbase;
214 unsigned int statetype;
215 const unsigned int *regs;
216 unsigned int num_sets;
217 unsigned int offset;
218} a6xx_non_ctx_dbgahb[] = {
219 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
220 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
221 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
222 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
223 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
224 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
225};
226
Shrenuj Bansal41665402016-12-16 15:25:54 -0800227static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
228 /* VBIF */
229 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
230 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
231 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
232 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
233 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
234 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
235 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
236 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
237 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
238 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
239 0x3410, 0x3410, 0x3800, 0x3801,
240};
241
Rajesh Kemisettib36bb492017-11-20 10:49:27 +0530242static const unsigned int a6xx_gbif_registers[] = {
243 /* GBIF */
244 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1,
245};
246
George Shen1d447b02017-07-12 13:40:28 -0700247static const unsigned int a6xx_gmu_gx_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700248 /* GMU GX */
249 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
250 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
251 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
252 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
253 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
George Shen1d447b02017-07-12 13:40:28 -0700254};
255
256static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700257 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700258 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700259 /* GMU CX */
260 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
261 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
262 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
263 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
264 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
265 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
266 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
267 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
Lokesh Batrac367dc92017-08-24 13:40:32 -0700268 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA01,
Kyle Pieferbce21702017-06-08 09:21:28 -0700269 /* GPU RSCC */
George Shen6927d8f2017-07-19 11:38:10 -0700270 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747,
271 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
272 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
Kyle Pieferbce21702017-06-08 09:21:28 -0700273 /* GMU AO */
274 0x23B00, 0x23B16, 0x23C00, 0x23C00,
275 /* GPU CC */
276 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
277 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
278 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
279 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
280 0x26000, 0x26002,
281 /* GPU CC ACD */
282 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700283};
284
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600285static const unsigned int a6xx_rb_rac_registers[] = {
286 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E10, 0x8E1C, 0x8E20, 0x8E25,
287 0x8E28, 0x8E28, 0x8E2C, 0x8E2F, 0x8E50, 0x8E52,
288};
289
290static const unsigned int a6xx_rb_rbp_registers[] = {
291 0x8E01, 0x8E01, 0x8E0C, 0x8E0C, 0x8E3B, 0x8E3E, 0x8E40, 0x8E43,
292 0x8E53, 0x8E5F, 0x8E70, 0x8E77,
293};
294
Shrenuj Bansal41665402016-12-16 15:25:54 -0800295static const struct adreno_vbif_snapshot_registers
296a6xx_vbif_snapshot_registers[] = {
297 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
298 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
299};
300
301/*
302 * Set of registers to dump for A6XX on snapshot.
303 * Registers in pairs - first value is the start offset, second
304 * is the stop offset (inclusive)
305 */
306
307static const unsigned int a6xx_registers[] = {
308 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530309 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
310 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
311 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530312 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
313 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
314 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800315 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530316 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
317 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
318 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
319 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
320 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
321 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
322 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800323 /* VSC */
324 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
325 /* UCHE */
326 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
327 0x0E38, 0x0E39,
328 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530329 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
330 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800331 /* VPC */
332 0x9600, 0x9604, 0x9624, 0x9637,
333 /* PC */
334 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
335 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
336 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
337 /* VFD */
338 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530339 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800340};
341
Lynus Vaz030473e2017-06-22 17:33:06 +0530342/*
343 * Set of registers to dump for A6XX before actually triggering crash dumper.
344 * Registers in pairs - first value is the start offset, second
345 * is the stop offset (inclusive)
346 */
347static const unsigned int a6xx_pre_crashdumper_registers[] = {
348 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
349 0x210, 0x213,
350 /* CP: CP_STATUS_1 */
351 0x825, 0x825,
352};
353
Lynus Vaz20c81272017-02-10 16:22:12 +0530354enum a6xx_debugbus_id {
355 A6XX_DBGBUS_CP = 0x1,
356 A6XX_DBGBUS_RBBM = 0x2,
357 A6XX_DBGBUS_VBIF = 0x3,
358 A6XX_DBGBUS_HLSQ = 0x4,
359 A6XX_DBGBUS_UCHE = 0x5,
360 A6XX_DBGBUS_DPM = 0x6,
361 A6XX_DBGBUS_TESS = 0x7,
362 A6XX_DBGBUS_PC = 0x8,
363 A6XX_DBGBUS_VFDP = 0x9,
364 A6XX_DBGBUS_VPC = 0xa,
365 A6XX_DBGBUS_TSE = 0xb,
366 A6XX_DBGBUS_RAS = 0xc,
367 A6XX_DBGBUS_VSC = 0xd,
368 A6XX_DBGBUS_COM = 0xe,
369 A6XX_DBGBUS_LRZ = 0x10,
370 A6XX_DBGBUS_A2D = 0x11,
371 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530372 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530373 A6XX_DBGBUS_RBP = 0x14,
374 A6XX_DBGBUS_DCS = 0x15,
375 A6XX_DBGBUS_RBBM_CFG = 0x16,
376 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530377 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530378 A6XX_DBGBUS_TPFCHE = 0x19,
379 A6XX_DBGBUS_GPC = 0x1d,
380 A6XX_DBGBUS_LARC = 0x1e,
381 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
382 A6XX_DBGBUS_RB_0 = 0x20,
383 A6XX_DBGBUS_RB_1 = 0x21,
384 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
385 A6XX_DBGBUS_CCU_0 = 0x28,
386 A6XX_DBGBUS_CCU_1 = 0x29,
387 A6XX_DBGBUS_VFD_0 = 0x38,
388 A6XX_DBGBUS_VFD_1 = 0x39,
389 A6XX_DBGBUS_VFD_2 = 0x3a,
390 A6XX_DBGBUS_VFD_3 = 0x3b,
391 A6XX_DBGBUS_SP_0 = 0x40,
392 A6XX_DBGBUS_SP_1 = 0x41,
393 A6XX_DBGBUS_TPL1_0 = 0x48,
394 A6XX_DBGBUS_TPL1_1 = 0x49,
395 A6XX_DBGBUS_TPL1_2 = 0x4a,
396 A6XX_DBGBUS_TPL1_3 = 0x4b,
397};
398
399static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
400 { A6XX_DBGBUS_CP, 0x100, },
401 { A6XX_DBGBUS_RBBM, 0x100, },
402 { A6XX_DBGBUS_HLSQ, 0x100, },
403 { A6XX_DBGBUS_UCHE, 0x100, },
404 { A6XX_DBGBUS_DPM, 0x100, },
405 { A6XX_DBGBUS_TESS, 0x100, },
406 { A6XX_DBGBUS_PC, 0x100, },
407 { A6XX_DBGBUS_VFDP, 0x100, },
408 { A6XX_DBGBUS_VPC, 0x100, },
409 { A6XX_DBGBUS_TSE, 0x100, },
410 { A6XX_DBGBUS_RAS, 0x100, },
411 { A6XX_DBGBUS_VSC, 0x100, },
412 { A6XX_DBGBUS_COM, 0x100, },
413 { A6XX_DBGBUS_LRZ, 0x100, },
414 { A6XX_DBGBUS_A2D, 0x100, },
415 { A6XX_DBGBUS_CCUFCHE, 0x100, },
416 { A6XX_DBGBUS_RBP, 0x100, },
417 { A6XX_DBGBUS_DCS, 0x100, },
418 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530419 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530420 { A6XX_DBGBUS_TPFCHE, 0x100, },
421 { A6XX_DBGBUS_GPC, 0x100, },
422 { A6XX_DBGBUS_LARC, 0x100, },
423 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
424 { A6XX_DBGBUS_RB_0, 0x100, },
425 { A6XX_DBGBUS_RB_1, 0x100, },
426 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
427 { A6XX_DBGBUS_CCU_0, 0x100, },
428 { A6XX_DBGBUS_CCU_1, 0x100, },
429 { A6XX_DBGBUS_VFD_0, 0x100, },
430 { A6XX_DBGBUS_VFD_1, 0x100, },
431 { A6XX_DBGBUS_VFD_2, 0x100, },
432 { A6XX_DBGBUS_VFD_3, 0x100, },
433 { A6XX_DBGBUS_SP_0, 0x100, },
434 { A6XX_DBGBUS_SP_1, 0x100, },
435 { A6XX_DBGBUS_TPL1_0, 0x100, },
436 { A6XX_DBGBUS_TPL1_1, 0x100, },
437 { A6XX_DBGBUS_TPL1_2, 0x100, },
438 { A6XX_DBGBUS_TPL1_3, 0x100, },
439};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800440
Lynus Vazdaac540732017-07-27 14:23:35 +0530441static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
442 A6XX_DBGBUS_VBIF, 0x100,
443};
444
Lynus Vazff24c972017-03-07 19:27:46 +0530445static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
Lynus Vazecd472c2017-04-18 14:15:57 +0530446 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530447 { A6XX_DBGBUS_CX, 0x100, },
448};
449
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530450#define A6XX_NUM_SHADER_BANKS 3
451#define A6XX_SHADER_STATETYPE_SHIFT 8
452
453enum a6xx_shader_obj {
454 A6XX_TP0_TMO_DATA = 0x9,
455 A6XX_TP0_SMO_DATA = 0xa,
456 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
457 A6XX_TP1_TMO_DATA = 0x19,
458 A6XX_TP1_SMO_DATA = 0x1a,
459 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
460 A6XX_SP_INST_DATA = 0x29,
461 A6XX_SP_LB_0_DATA = 0x2a,
462 A6XX_SP_LB_1_DATA = 0x2b,
463 A6XX_SP_LB_2_DATA = 0x2c,
464 A6XX_SP_LB_3_DATA = 0x2d,
465 A6XX_SP_LB_4_DATA = 0x2e,
466 A6XX_SP_LB_5_DATA = 0x2f,
467 A6XX_SP_CB_BINDLESS_DATA = 0x30,
468 A6XX_SP_CB_LEGACY_DATA = 0x31,
469 A6XX_SP_UAV_DATA = 0x32,
470 A6XX_SP_INST_TAG = 0x33,
471 A6XX_SP_CB_BINDLESS_TAG = 0x34,
472 A6XX_SP_TMO_UMO_TAG = 0x35,
473 A6XX_SP_SMO_TAG = 0x36,
474 A6XX_SP_STATE_DATA = 0x37,
475 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
476 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
477 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
478 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
479 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
480 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
481 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
482 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
483 A6XX_HLSQ_INST_RAM = 0x52,
484 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
485 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
486 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
487 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
488 A6XX_HLSQ_INST_RAM_TAG = 0x57,
489 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
490 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
491 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
492 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
493 A6XX_HLSQ_DATAPATH_META = 0x60,
494 A6XX_HLSQ_FRONTEND_META = 0x61,
495 A6XX_HLSQ_INDIRECT_META = 0x62,
496 A6XX_HLSQ_BACKEND_META = 0x63
497};
498
499struct a6xx_shader_block {
500 unsigned int statetype;
501 unsigned int sz;
502 uint64_t offset;
503};
504
505struct a6xx_shader_block_info {
506 struct a6xx_shader_block *block;
507 unsigned int bank;
508 uint64_t offset;
509};
510
511static struct a6xx_shader_block a6xx_shader_blocks[] = {
512 {A6XX_TP0_TMO_DATA, 0x200},
513 {A6XX_TP0_SMO_DATA, 0x80,},
514 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
515 {A6XX_TP1_TMO_DATA, 0x200},
516 {A6XX_TP1_SMO_DATA, 0x80,},
517 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
518 {A6XX_SP_INST_DATA, 0x800},
519 {A6XX_SP_LB_0_DATA, 0x800},
520 {A6XX_SP_LB_1_DATA, 0x800},
521 {A6XX_SP_LB_2_DATA, 0x800},
522 {A6XX_SP_LB_3_DATA, 0x800},
523 {A6XX_SP_LB_4_DATA, 0x800},
524 {A6XX_SP_LB_5_DATA, 0x200},
525 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
526 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
527 {A6XX_SP_UAV_DATA, 0x80,},
528 {A6XX_SP_INST_TAG, 0x80,},
529 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
530 {A6XX_SP_TMO_UMO_TAG, 0x80,},
531 {A6XX_SP_SMO_TAG, 0x80},
532 {A6XX_SP_STATE_DATA, 0x3F},
533 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
534 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
535 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
536 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
537 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
538 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
539 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
540 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
541 {A6XX_HLSQ_INST_RAM, 0x800},
542 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
543 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
544 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
545 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
546 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
547 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
548 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
549 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
550 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
551 {A6XX_HLSQ_DATAPATH_META, 0x40,},
552 {A6XX_HLSQ_FRONTEND_META, 0x40},
553 {A6XX_HLSQ_INDIRECT_META, 0x40,}
554};
555
Shrenuj Bansal41665402016-12-16 15:25:54 -0800556static struct kgsl_memdesc a6xx_capturescript;
557static struct kgsl_memdesc a6xx_crashdump_registers;
558static bool crash_dump_valid;
559
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600560static struct reg_list {
Shrenuj Bansal41665402016-12-16 15:25:54 -0800561 const unsigned int *regs;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600562 unsigned int count;
563 const struct sel_reg *sel;
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530564 uint64_t offset;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600565} a6xx_reg_list[] = {
566 { a6xx_registers, ARRAY_SIZE(a6xx_registers) / 2, NULL },
567 { a6xx_rb_rac_registers, ARRAY_SIZE(a6xx_rb_rac_registers) / 2,
568 &_a6xx_rb_rac_aperture },
569 { a6xx_rb_rbp_registers, ARRAY_SIZE(a6xx_rb_rbp_registers) / 2,
570 &_a6xx_rb_rbp_aperture },
Shrenuj Bansal41665402016-12-16 15:25:54 -0800571};
572
573#define REG_PAIR_COUNT(_a, _i) \
574 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
575
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600576static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
Lynus Vaz96de8522017-09-13 20:17:03 +0530577 u8 *buf, size_t remain, struct reg_list *regs)
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600578{
Lynus Vaz96de8522017-09-13 20:17:03 +0530579 struct kgsl_snapshot_registers snapshot_regs = {
580 .regs = regs->regs,
581 .count = regs->count,
582 };
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600583
Lynus Vaz96de8522017-09-13 20:17:03 +0530584 if (regs->sel)
585 kgsl_regwrite(device, regs->sel->host_reg, regs->sel->val);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600586
Lynus Vaz96de8522017-09-13 20:17:03 +0530587 return kgsl_snapshot_dump_registers(device, buf, remain,
588 &snapshot_regs);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600589}
590
Shrenuj Bansal41665402016-12-16 15:25:54 -0800591static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
592 size_t remain, void *priv)
593{
594 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
Lynus Vaz96de8522017-09-13 20:17:03 +0530595 struct reg_list *regs = (struct reg_list *)priv;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800596 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530597 unsigned int *src;
Lynus Vaz96de8522017-09-13 20:17:03 +0530598 unsigned int j, k;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800599 unsigned int count = 0;
600
601 if (crash_dump_valid == false)
Lynus Vaz96de8522017-09-13 20:17:03 +0530602 return a6xx_legacy_snapshot_registers(device, buf, remain,
603 regs);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800604
605 if (remain < sizeof(*header)) {
606 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
607 return 0;
608 }
609
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530610 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800611 remain -= sizeof(*header);
612
Lynus Vaz96de8522017-09-13 20:17:03 +0530613 for (j = 0; j < regs->count; j++) {
614 unsigned int start = regs->regs[2 * j];
615 unsigned int end = regs->regs[(2 * j) + 1];
Shrenuj Bansal41665402016-12-16 15:25:54 -0800616
Lynus Vaz96de8522017-09-13 20:17:03 +0530617 if (remain < ((end - start) + 1) * 8) {
618 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
619 goto out;
620 }
Shrenuj Bansal41665402016-12-16 15:25:54 -0800621
Lynus Vaz96de8522017-09-13 20:17:03 +0530622 remain -= ((end - start) + 1) * 8;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800623
Lynus Vaz96de8522017-09-13 20:17:03 +0530624 for (k = start; k <= end; k++, count++) {
625 *data++ = k;
626 *data++ = *src++;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800627 }
628 }
629
630out:
631 header->count = count;
632
633 /* Return the size of the section */
634 return (count * 8) + sizeof(*header);
635}
636
Lynus Vaz030473e2017-06-22 17:33:06 +0530637static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
638 u8 *buf, size_t remain, void *priv)
639{
640 struct kgsl_snapshot_registers pre_cdregs = {
641 .regs = a6xx_pre_crashdumper_registers,
642 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
643 };
644
645 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
646}
647
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530648static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
649 u8 *buf, size_t remain, void *priv)
650{
651 struct kgsl_snapshot_shader *header =
652 (struct kgsl_snapshot_shader *) buf;
653 struct a6xx_shader_block_info *info =
654 (struct a6xx_shader_block_info *) priv;
655 struct a6xx_shader_block *block = info->block;
656 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
657
658 if (remain < SHADER_SECTION_SZ(block->sz)) {
659 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
660 return 0;
661 }
662
663 header->type = block->statetype;
664 header->index = info->bank;
665 header->size = block->sz;
666
667 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
Lynus Vaz24f75eb2017-11-22 11:25:04 +0530668 block->sz * sizeof(unsigned int));
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530669
670 return SHADER_SECTION_SZ(block->sz);
671}
672
673static void a6xx_snapshot_shader(struct kgsl_device *device,
674 struct kgsl_snapshot *snapshot)
675{
676 unsigned int i, j;
677 struct a6xx_shader_block_info info;
678
679 /* Shader blocks can only be read by the crash dumper */
680 if (crash_dump_valid == false)
681 return;
682
683 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
684 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
685 info.block = &a6xx_shader_blocks[i];
686 info.bank = j;
687 info.offset = a6xx_shader_blocks[i].offset +
688 (j * a6xx_shader_blocks[i].sz);
689
690 /* Shader working/shadow memory */
691 kgsl_snapshot_add_section(device,
692 KGSL_SNAPSHOT_SECTION_SHADER,
693 snapshot, a6xx_snapshot_shader_memory, &info);
694 }
695 }
696}
697
Lynus Vaza5922742017-03-14 18:50:54 +0530698static void a6xx_snapshot_mempool(struct kgsl_device *device,
699 struct kgsl_snapshot *snapshot)
700{
701 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530702 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530703
Lynus Vazb8e43d52017-04-20 14:47:37 +0530704 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530705 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
706 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
707
708 kgsl_snapshot_indexed_registers(device, snapshot,
709 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
710 0, 0x2060);
711
Lynus Vazb8e43d52017-04-20 14:47:37 +0530712 /*
713 * Data at offset 0x2000 in the mempool section is the mempool size.
714 * Since we set it to 0, patch in the original size so that the data
715 * is consistent.
716 */
717 if (buf < snapshot->ptr) {
718 unsigned int *data;
719
720 /* Skip over the headers */
721 buf += sizeof(struct kgsl_snapshot_section_header) +
722 sizeof(struct kgsl_snapshot_indexed_regs);
723
724 data = (unsigned int *)buf + 0x2000;
725 *data = pool_size;
726 }
727
Lynus Vaza5922742017-03-14 18:50:54 +0530728 /* Restore the saved mempool size */
729 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
730}
731
Lynus Vaz461e2382017-01-16 19:35:41 +0530732static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
733 unsigned int regbase, unsigned int reg)
734{
735 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
736 reg - regbase / 4;
737 unsigned int val;
738
739 kgsl_regread(device, read_reg, &val);
740 return val;
741}
742
Lynus Vaz1e258612017-04-27 21:35:22 +0530743static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
744 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530745{
746 struct kgsl_snapshot_mvc_regs *header =
747 (struct kgsl_snapshot_mvc_regs *)buf;
748 struct a6xx_cluster_dbgahb_regs_info *info =
749 (struct a6xx_cluster_dbgahb_regs_info *)priv;
750 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
751 unsigned int read_sel;
752 unsigned int data_size = 0;
753 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
754 int i, j;
755
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600756 if (!device->snapshot_legacy)
757 return 0;
758
Lynus Vaz461e2382017-01-16 19:35:41 +0530759 if (remain < sizeof(*header)) {
760 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
761 return 0;
762 }
763
764 remain -= sizeof(*header);
765
766 header->ctxt_id = info->ctxt_id;
767 header->cluster_id = cur_cluster->id;
768
769 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
770 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
771
772 for (i = 0; i < cur_cluster->num_sets; i++) {
773 unsigned int start = cur_cluster->regs[2 * i];
774 unsigned int end = cur_cluster->regs[2 * i + 1];
775
776 if (remain < (end - start + 3) * 4) {
777 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
778 goto out;
779 }
780
781 remain -= (end - start + 3) * 4;
782 data_size += (end - start + 3) * 4;
783
784 *data++ = start | (1 << 31);
785 *data++ = end;
786
787 for (j = start; j <= end; j++) {
788 unsigned int val;
789
790 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
791 *data++ = val;
792
793 }
794 }
795
796out:
797 return data_size + sizeof(*header);
798}
799
Lynus Vaz1e258612017-04-27 21:35:22 +0530800static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
801 size_t remain, void *priv)
802{
803 struct kgsl_snapshot_mvc_regs *header =
804 (struct kgsl_snapshot_mvc_regs *)buf;
805 struct a6xx_cluster_dbgahb_regs_info *info =
806 (struct a6xx_cluster_dbgahb_regs_info *)priv;
807 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
808 unsigned int data_size = 0;
809 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
810 int i, j;
811 unsigned int *src;
812
813
814 if (crash_dump_valid == false)
815 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
816 info);
817
818 if (remain < sizeof(*header)) {
819 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
820 return 0;
821 }
822
823 remain -= sizeof(*header);
824
825 header->ctxt_id = info->ctxt_id;
826 header->cluster_id = cluster->id;
827
828 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
829 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
830
831 for (i = 0; i < cluster->num_sets; i++) {
832 unsigned int start;
833 unsigned int end;
834
835 start = cluster->regs[2 * i];
836 end = cluster->regs[2 * i + 1];
837
838 if (remain < (end - start + 3) * 4) {
839 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
840 goto out;
841 }
842
843 remain -= (end - start + 3) * 4;
844 data_size += (end - start + 3) * 4;
845
846 *data++ = start | (1 << 31);
847 *data++ = end;
848 for (j = start; j <= end; j++)
849 *data++ = *src++;
850 }
851out:
852 return data_size + sizeof(*header);
853}
854
Harshdeep Dhattfbf71e62017-12-07 14:03:08 -0700855static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
856 u8 *buf, size_t remain, void *priv)
857{
858 struct kgsl_snapshot_regs *header =
859 (struct kgsl_snapshot_regs *)buf;
860 struct a6xx_non_ctx_dbgahb_registers *regs =
861 (struct a6xx_non_ctx_dbgahb_registers *)priv;
862 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
863 int count = 0;
864 unsigned int read_sel;
865 int i, j;
866
867 if (!device->snapshot_legacy)
868 return 0;
869
870 /* Figure out how many registers we are going to dump */
871 for (i = 0; i < regs->num_sets; i++) {
872 int start = regs->regs[i * 2];
873 int end = regs->regs[i * 2 + 1];
874
875 count += (end - start + 1);
876 }
877
878 if (remain < (count * 8) + sizeof(*header)) {
879 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
880 return 0;
881 }
882
883 header->count = count;
884
885 read_sel = (regs->statetype & 0xff) << 8;
886 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
887
888 for (i = 0; i < regs->num_sets; i++) {
889 unsigned int start = regs->regs[2 * i];
890 unsigned int end = regs->regs[2 * i + 1];
891
892 for (j = start; j <= end; j++) {
893 unsigned int val;
894
895 val = a6xx_read_dbgahb(device, regs->regbase, j);
896 *data++ = j;
897 *data++ = val;
898
899 }
900 }
901 return (count * 8) + sizeof(*header);
902}
903
904static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
905 size_t remain, void *priv)
906{
907 struct kgsl_snapshot_regs *header =
908 (struct kgsl_snapshot_regs *)buf;
909 struct a6xx_non_ctx_dbgahb_registers *regs =
910 (struct a6xx_non_ctx_dbgahb_registers *)priv;
911 unsigned int count = 0;
912 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
913 unsigned int i, k;
914 unsigned int *src;
915
916 if (crash_dump_valid == false)
917 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
918 regs);
919
920 if (remain < sizeof(*header)) {
921 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
922 return 0;
923 }
924
925 remain -= sizeof(*header);
926
927 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
928
929 for (i = 0; i < regs->num_sets; i++) {
930 unsigned int start;
931 unsigned int end;
932
933 start = regs->regs[2 * i];
934 end = regs->regs[(2 * i) + 1];
935
936 if (remain < (end - start + 1) * 8) {
937 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
938 goto out;
939 }
940
941 remain -= ((end - start) + 1) * 8;
942
943 for (k = start; k <= end; k++, count++) {
944 *data++ = k;
945 *data++ = *src++;
946 }
947 }
948out:
949 header->count = count;
950
951 /* Return the size of the section */
952 return (count * 8) + sizeof(*header);
953}
954
Lynus Vaz461e2382017-01-16 19:35:41 +0530955static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
956 struct kgsl_snapshot *snapshot)
957{
958 int i, j;
959
960 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
961 struct a6xx_cluster_dbgahb_registers *cluster =
962 &a6xx_dbgahb_ctx_clusters[i];
963 struct a6xx_cluster_dbgahb_regs_info info;
964
965 info.cluster = cluster;
966 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
967 info.ctxt_id = j;
968
969 kgsl_snapshot_add_section(device,
970 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
971 a6xx_snapshot_cluster_dbgahb, &info);
972 }
973 }
Harshdeep Dhattfbf71e62017-12-07 14:03:08 -0700974
975 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
976 kgsl_snapshot_add_section(device,
977 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
978 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
979 }
Lynus Vaz461e2382017-01-16 19:35:41 +0530980}
981
Shrenuj Bansal41665402016-12-16 15:25:54 -0800982static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
983 size_t remain, void *priv)
984{
985 struct kgsl_snapshot_mvc_regs *header =
986 (struct kgsl_snapshot_mvc_regs *)buf;
987 struct a6xx_cluster_regs_info *info =
988 (struct a6xx_cluster_regs_info *)priv;
989 struct a6xx_cluster_registers *cur_cluster = info->cluster;
990 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
991 unsigned int ctxt = info->ctxt_id;
992 unsigned int start, end, i, j, aperture_cntl = 0;
993 unsigned int data_size = 0;
994
995 if (remain < sizeof(*header)) {
996 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
997 return 0;
998 }
999
1000 remain -= sizeof(*header);
1001
1002 header->ctxt_id = info->ctxt_id;
1003 header->cluster_id = cur_cluster->id;
1004
1005 /*
1006 * Set the AHB control for the Host to read from the
1007 * cluster/context for this iteration.
1008 */
1009 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
1010 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
1011
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001012 if (cur_cluster->sel)
1013 kgsl_regwrite(device, cur_cluster->sel->host_reg,
1014 cur_cluster->sel->val);
1015
Shrenuj Bansal41665402016-12-16 15:25:54 -08001016 for (i = 0; i < cur_cluster->num_sets; i++) {
1017 start = cur_cluster->regs[2 * i];
1018 end = cur_cluster->regs[2 * i + 1];
1019
1020 if (remain < (end - start + 3) * 4) {
1021 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1022 goto out;
1023 }
1024
1025 remain -= (end - start + 3) * 4;
1026 data_size += (end - start + 3) * 4;
1027
1028 *data++ = start | (1 << 31);
1029 *data++ = end;
1030 for (j = start; j <= end; j++) {
1031 unsigned int val;
1032
1033 kgsl_regread(device, j, &val);
1034 *data++ = val;
1035 }
1036 }
1037out:
1038 return data_size + sizeof(*header);
1039}
1040
1041static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
1042 size_t remain, void *priv)
1043{
1044 struct kgsl_snapshot_mvc_regs *header =
1045 (struct kgsl_snapshot_mvc_regs *)buf;
1046 struct a6xx_cluster_regs_info *info =
1047 (struct a6xx_cluster_regs_info *)priv;
1048 struct a6xx_cluster_registers *cluster = info->cluster;
1049 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1050 unsigned int *src;
1051 int i, j;
1052 unsigned int start, end;
1053 size_t data_size = 0;
1054
1055 if (crash_dump_valid == false)
1056 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
1057
1058 if (remain < sizeof(*header)) {
1059 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1060 return 0;
1061 }
1062
1063 remain -= sizeof(*header);
1064
1065 header->ctxt_id = info->ctxt_id;
1066 header->cluster_id = cluster->id;
1067
1068 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
1069 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
1070
1071 for (i = 0; i < cluster->num_sets; i++) {
1072 start = cluster->regs[2 * i];
1073 end = cluster->regs[2 * i + 1];
1074
1075 if (remain < (end - start + 3) * 4) {
1076 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1077 goto out;
1078 }
1079
1080 remain -= (end - start + 3) * 4;
1081 data_size += (end - start + 3) * 4;
1082
1083 *data++ = start | (1 << 31);
1084 *data++ = end;
1085 for (j = start; j <= end; j++)
1086 *data++ = *src++;
1087 }
1088
1089out:
1090 return data_size + sizeof(*header);
1091
1092}
1093
1094static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
1095 struct kgsl_snapshot *snapshot)
1096{
1097 int i, j;
1098 struct a6xx_cluster_regs_info info;
1099
1100 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1101 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1102
1103 info.cluster = cluster;
1104 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1105 info.ctxt_id = j;
1106
1107 kgsl_snapshot_add_section(device,
1108 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
1109 a6xx_snapshot_mvc, &info);
1110 }
1111 }
1112}
1113
Lynus Vaz20c81272017-02-10 16:22:12 +05301114/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
1115static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
1116 unsigned int block_id, unsigned int index, unsigned int *val)
1117{
1118 unsigned int reg;
1119
1120 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1121 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1122
1123 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1124 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1125 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1126 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1127
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001128 /*
1129 * There needs to be a delay of 1 us to ensure enough time for correct
1130 * data is funneled into the trace buffer
1131 */
1132 udelay(1);
1133
Lynus Vaz20c81272017-02-10 16:22:12 +05301134 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1135 val++;
1136 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1137}
1138
Lynus Vazdaac540732017-07-27 14:23:35 +05301139/* a6xx_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
Lynus Vaz20c81272017-02-10 16:22:12 +05301140static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1141 u8 *buf, size_t remain, void *priv)
1142{
Lynus Vazecd472c2017-04-18 14:15:57 +05301143 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301144 struct kgsl_snapshot_debugbus *header =
1145 (struct kgsl_snapshot_debugbus *)buf;
1146 struct adreno_debugbus_block *block = priv;
1147 int i;
1148 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1149 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301150 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301151 size_t size;
1152
1153 dwords = block->dwords;
1154
1155 /* For a6xx each debug bus data unit is 2 DWORDS */
1156 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1157
1158 if (remain < size) {
1159 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1160 return 0;
1161 }
1162
1163 header->id = block->block_id;
1164 header->count = dwords * 2;
1165
Lynus Vazecd472c2017-04-18 14:15:57 +05301166 block_id = block->block_id;
1167 /* GMU_GX data is read using the GMU_CX block id on A630 */
1168 if (adreno_is_a630(adreno_dev) &&
1169 (block_id == A6XX_DBGBUS_GMU_GX))
1170 block_id = A6XX_DBGBUS_GMU_CX;
1171
Lynus Vaz20c81272017-02-10 16:22:12 +05301172 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301173 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301174
1175 return size;
1176}
1177
Lynus Vazdaac540732017-07-27 14:23:35 +05301178/* a6xx_snapshot_vbif_debugbus_block() - Capture debug data for VBIF block */
1179static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
1180 u8 *buf, size_t remain, void *priv)
1181{
1182 struct kgsl_snapshot_debugbus *header =
1183 (struct kgsl_snapshot_debugbus *)buf;
1184 struct adreno_debugbus_block *block = priv;
1185 int i, j;
1186 /*
1187 * Total number of VBIF data words considering 3 sections:
1188 * 2 arbiter blocks of 16 words
1189 * 5 AXI XIN blocks of 18 dwords each
1190 * 4 core clock side XIN blocks of 12 dwords each
1191 */
1192 unsigned int dwords = (16 * A6XX_NUM_AXI_ARB_BLOCKS) +
1193 (18 * A6XX_NUM_XIN_AXI_BLOCKS) +
1194 (12 * A6XX_NUM_XIN_CORE_BLOCKS);
1195 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1196 size_t size;
1197 unsigned int reg_clk;
1198
1199 size = (dwords * sizeof(unsigned int)) + sizeof(*header);
1200
1201 if (remain < size) {
1202 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1203 return 0;
1204 }
1205 header->id = block->block_id;
1206 header->count = dwords;
1207
1208 kgsl_regread(device, A6XX_VBIF_CLKON, &reg_clk);
1209 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk |
1210 (A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK <<
1211 A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT));
1212 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 0);
1213 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS_OUT_CTRL,
1214 (A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK <<
1215 A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT));
1216
1217 for (i = 0; i < A6XX_NUM_AXI_ARB_BLOCKS; i++) {
1218 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0,
1219 (1 << (i + 16)));
1220 for (j = 0; j < 16; j++) {
1221 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1222 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1223 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1224 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1225 data);
1226 data++;
1227 }
1228 }
1229
1230 /* XIN blocks AXI side */
1231 for (i = 0; i < A6XX_NUM_XIN_AXI_BLOCKS; i++) {
1232 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
1233 for (j = 0; j < 18; j++) {
1234 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1235 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1236 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1237 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1238 data);
1239 data++;
1240 }
1241 }
1242 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 0);
1243
1244 /* XIN blocks core clock side */
1245 for (i = 0; i < A6XX_NUM_XIN_CORE_BLOCKS; i++) {
1246 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
1247 for (j = 0; j < 12; j++) {
1248 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL1,
1249 ((j & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
1250 << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
1251 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1252 data);
1253 data++;
1254 }
1255 }
1256 /* restore the clock of VBIF */
1257 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk);
1258 return size;
1259}
1260
Lynus Vazff24c972017-03-07 19:27:46 +05301261/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1262static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1263 unsigned int block_id, unsigned int index, unsigned int *val)
1264{
1265 unsigned int reg;
1266
1267 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1268 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1269
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301270 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1271 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1272 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1273 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
Lynus Vazff24c972017-03-07 19:27:46 +05301274
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001275 /*
1276 * There needs to be a delay of 1 us to ensure enough time for correct
1277 * data is funneled into the trace buffer
1278 */
1279 udelay(1);
1280
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301281 adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
Lynus Vazff24c972017-03-07 19:27:46 +05301282 val++;
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301283 adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
Lynus Vazff24c972017-03-07 19:27:46 +05301284}
1285
1286/*
1287 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1288 * block from the CX DBGC block
1289 */
1290static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1291 u8 *buf, size_t remain, void *priv)
1292{
1293 struct kgsl_snapshot_debugbus *header =
1294 (struct kgsl_snapshot_debugbus *)buf;
1295 struct adreno_debugbus_block *block = priv;
1296 int i;
1297 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1298 unsigned int dwords;
1299 size_t size;
1300
1301 dwords = block->dwords;
1302
1303 /* For a6xx each debug bus data unit is 2 DWRODS */
1304 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1305
1306 if (remain < size) {
1307 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1308 return 0;
1309 }
1310
1311 header->id = block->block_id;
1312 header->count = dwords * 2;
1313
1314 for (i = 0; i < dwords; i++)
1315 a6xx_cx_debug_bus_read(device, block->block_id, i,
1316 &data[i*2]);
1317
1318 return size;
1319}
1320
Lynus Vaz20c81272017-02-10 16:22:12 +05301321/* a6xx_snapshot_debugbus() - Capture debug bus data */
1322static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1323 struct kgsl_snapshot *snapshot)
1324{
1325 int i;
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301326 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301327
1328 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1329 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001330 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1331 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301332
1333 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1334 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1335
1336 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1337 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1338 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1339 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1340
1341 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1342 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1343 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1344 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1345 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1346 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1347 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1348 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1349 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1350 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1351 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1352 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1353 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1354 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1355 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1356 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1357 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1358 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1359
1360 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1361 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1362 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1363 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1364
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301365 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
Lynus Vazff24c972017-03-07 19:27:46 +05301366 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001367 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1368 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301369
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301370 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1371 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
Lynus Vazff24c972017-03-07 19:27:46 +05301372
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301373 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1374 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1375 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1376 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
Lynus Vazff24c972017-03-07 19:27:46 +05301377
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301378 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1379 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1380 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1381 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1382 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1383 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1384 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1385 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1386 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1387 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1388 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1389 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1390 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1391 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1392 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1393 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1394 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1395 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301396
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301397 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1398 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1399 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1400 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
Lynus Vazff24c972017-03-07 19:27:46 +05301401
Lynus Vaz20c81272017-02-10 16:22:12 +05301402 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1403 kgsl_snapshot_add_section(device,
1404 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1405 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1406 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1407 }
Rajesh Kemisettib36bb492017-11-20 10:49:27 +05301408 /*
1409 * GBIF has same debugbus as of other GPU blocks hence fall back to
1410 * default path if GPU uses GBIF.
1411 * GBIF uses exactly same ID as of VBIF so use it as it is.
1412 */
1413 if (adreno_has_gbif(adreno_dev))
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301414 kgsl_snapshot_add_section(device,
Rajesh Kemisettib36bb492017-11-20 10:49:27 +05301415 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1416 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1417 (void *) &a6xx_vbif_debugbus_blocks);
1418 else
1419 kgsl_snapshot_add_section(device,
1420 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1421 snapshot, a6xx_snapshot_vbif_debugbus_block,
1422 (void *) &a6xx_vbif_debugbus_blocks);
Lynus Vazdaac540732017-07-27 14:23:35 +05301423
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301424 /* Dump the CX debugbus data if the block exists */
1425 if (adreno_is_cx_dbgc_register(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A)) {
Lynus Vazff24c972017-03-07 19:27:46 +05301426 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1427 kgsl_snapshot_add_section(device,
1428 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1429 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1430 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
Rajesh Kemisettib36bb492017-11-20 10:49:27 +05301431 /*
1432 * Get debugbus for GBIF CX part if GPU has GBIF block
1433 * GBIF uses exactly same ID as of VBIF so use
1434 * it as it is.
1435 */
1436 if (adreno_has_gbif(adreno_dev))
1437 kgsl_snapshot_add_section(device,
1438 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1439 snapshot,
1440 a6xx_snapshot_cx_dbgc_debugbus_block,
1441 (void *) &a6xx_vbif_debugbus_blocks);
Lynus Vazff24c972017-03-07 19:27:46 +05301442 }
Lynus Vazff24c972017-03-07 19:27:46 +05301443 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301444}
1445
Carter Cooperb88b7082017-09-14 09:03:26 -06001446/*
1447 * a6xx_snapshot_gmu() - A6XX GMU snapshot function
1448 * @adreno_dev: Device being snapshotted
1449 * @snapshot: Pointer to the snapshot instance
1450 *
1451 * This is where all of the A6XX GMU specific bits and pieces are grabbed
1452 * into the snapshot memory
1453 */
1454void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
Kyle Piefer60733aa2017-03-21 11:24:01 -07001455 struct kgsl_snapshot *snapshot)
1456{
Carter Cooperb88b7082017-09-14 09:03:26 -06001457 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
George Shen1d447b02017-07-12 13:40:28 -07001458 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
George Sheneb93bd32017-10-11 15:52:53 -07001459 unsigned int val;
George Shen1d447b02017-07-12 13:40:28 -07001460
Kyle Piefer60733aa2017-03-21 11:24:01 -07001461 if (!kgsl_gmu_isenabled(device))
1462 return;
1463
Lynus Vazd37f1d82017-05-24 16:39:15 +05301464 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1465 ARRAY_SIZE(a6xx_gmu_registers) / 2);
George Shen1d447b02017-07-12 13:40:28 -07001466
George Sheneb93bd32017-10-11 15:52:53 -07001467 if (gpudev->gx_is_on(adreno_dev)) {
1468 /* Set fence to ALLOW mode so registers can be read */
1469 kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
1470 kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
1471
1472 KGSL_DRV_ERR(device, "set FENCE to ALLOW mode:%x\n", val);
George Shen1d447b02017-07-12 13:40:28 -07001473 adreno_snapshot_registers(device, snapshot,
1474 a6xx_gmu_gx_registers,
1475 ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
George Sheneb93bd32017-10-11 15:52:53 -07001476 }
Kyle Piefer60733aa2017-03-21 11:24:01 -07001477}
1478
Lynus Vaz85150052017-02-21 17:57:48 +05301479/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1480static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1481 size_t remain, void *priv)
1482{
1483 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1484 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1485 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1486 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1487
1488 if (remain < DEBUG_SECTION_SZ(1)) {
1489 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1490 return 0;
1491 }
1492
1493 /* Dump the SQE firmware version */
1494 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1495 header->size = 1;
1496 *data = fw->version;
1497
1498 return DEBUG_SECTION_SZ(1);
1499}
1500
Shrenuj Bansal41665402016-12-16 15:25:54 -08001501static void _a6xx_do_crashdump(struct kgsl_device *device)
1502{
1503 unsigned long wait_time;
1504 unsigned int reg = 0;
1505 unsigned int val;
1506
1507 crash_dump_valid = false;
1508
Lynus Vaz0a06efd2017-09-13 20:21:07 +05301509 if (!device->snapshot_crashdumper)
1510 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001511 if (a6xx_capturescript.gpuaddr == 0 ||
1512 a6xx_crashdump_registers.gpuaddr == 0)
1513 return;
1514
1515 /* IF the SMMU is stalled we cannot do a crash dump */
1516 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1517 if (val & BIT(24))
1518 return;
1519
1520 /* Turn on APRIV so we can access the buffers */
1521 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1522
1523 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1524 lower_32_bits(a6xx_capturescript.gpuaddr));
1525 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1526 upper_32_bits(a6xx_capturescript.gpuaddr));
1527 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1528
1529 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1530 while (!time_after(jiffies, wait_time)) {
1531 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1532 if (reg & 0x2)
1533 break;
1534 cpu_relax();
1535 }
1536
1537 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1538
1539 if (!(reg & 0x2)) {
1540 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1541 return;
1542 }
1543
1544 crash_dump_valid = true;
1545}
1546
1547/*
1548 * a6xx_snapshot() - A6XX GPU snapshot function
1549 * @adreno_dev: Device being snapshotted
1550 * @snapshot: Pointer to the snapshot instance
1551 *
1552 * This is where all of the A6XX specific bits and pieces are grabbed
1553 * into the snapshot memory
1554 */
1555void a6xx_snapshot(struct adreno_device *adreno_dev,
1556 struct kgsl_snapshot *snapshot)
1557{
1558 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1559 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1560 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001561 bool sptprac_on;
Lynus Vaz96de8522017-09-13 20:17:03 +05301562 unsigned int i;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001563
Kyle Pieferda0fa542017-08-04 13:39:40 -07001564 /* GMU TCM data dumped through AHB */
1565 a6xx_snapshot_gmu(adreno_dev, snapshot);
1566
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001567 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1568
1569 /* Return if the GX is off */
Carter Cooperb88b7082017-09-14 09:03:26 -06001570 if (!gpudev->gx_is_on(adreno_dev))
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001571 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001572
Lynus Vaz030473e2017-06-22 17:33:06 +05301573 /* Dump the registers which get affected by crash dumper trigger */
1574 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1575 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1576
1577 /* Dump vbif registers as well which get affected by crash dumper */
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301578 if (!adreno_has_gbif(adreno_dev))
1579 adreno_snapshot_vbif_registers(device, snapshot,
1580 a6xx_vbif_snapshot_registers,
1581 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
Rajesh Kemisettib36bb492017-11-20 10:49:27 +05301582 else
1583 adreno_snapshot_registers(device, snapshot,
1584 a6xx_gbif_registers,
1585 ARRAY_SIZE(a6xx_gbif_registers) / 2);
Lynus Vaz030473e2017-06-22 17:33:06 +05301586
Shrenuj Bansal41665402016-12-16 15:25:54 -08001587 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001588 if (sptprac_on)
1589 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001590
Lynus Vaz96de8522017-09-13 20:17:03 +05301591 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1592 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1593 snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]);
1594 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001595
Shrenuj Bansal41665402016-12-16 15:25:54 -08001596 /* CP_SQE indexed registers */
1597 kgsl_snapshot_indexed_registers(device, snapshot,
1598 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1599 0, snap_data->sect_sizes->cp_pfp);
1600
1601 /* CP_DRAW_STATE */
1602 kgsl_snapshot_indexed_registers(device, snapshot,
1603 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1604 0, 0x100);
1605
1606 /* SQE_UCODE Cache */
1607 kgsl_snapshot_indexed_registers(device, snapshot,
1608 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1609 0, 0x6000);
1610
1611 /* CP ROQ */
1612 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1613 snapshot, adreno_snapshot_cp_roq,
1614 &snap_data->sect_sizes->roq);
1615
Lynus Vaz85150052017-02-21 17:57:48 +05301616 /* SQE Firmware */
1617 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1618 snapshot, a6xx_snapshot_sqe, NULL);
1619
Lynus Vaza5922742017-03-14 18:50:54 +05301620 /* Mempool debug data */
1621 a6xx_snapshot_mempool(device, snapshot);
1622
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001623 if (sptprac_on) {
1624 /* Shader memory */
1625 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301626
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001627 /* MVC register section */
1628 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001629
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001630 /* registers dumped through DBG AHB */
1631 a6xx_snapshot_dbgahb_regs(device, snapshot);
1632 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301633
Lynus Vaz20c81272017-02-10 16:22:12 +05301634 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001635
Shrenuj Bansal41665402016-12-16 15:25:54 -08001636}
1637
1638static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1639{
1640 int qwords = 0;
1641 unsigned int i, j, k;
1642 unsigned int count;
1643
1644 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1645 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1646
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001647 if (cluster->sel) {
1648 ptr[qwords++] = cluster->sel->val;
1649 ptr[qwords++] = ((uint64_t)cluster->sel->cd_reg << 44) |
1650 (1 << 21) | 1;
1651 }
1652
Shrenuj Bansal41665402016-12-16 15:25:54 -08001653 cluster->offset0 = *offset;
1654 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1655
1656 if (j == 1)
1657 cluster->offset1 = *offset;
1658
1659 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1660 ptr[qwords++] =
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001661 ((uint64_t)A6XX_CP_APERTURE_CNTL_CD << 44) |
Shrenuj Bansal41665402016-12-16 15:25:54 -08001662 (1 << 21) | 1;
1663
1664 for (k = 0; k < cluster->num_sets; k++) {
1665 count = REG_PAIR_COUNT(cluster->regs, k);
1666 ptr[qwords++] =
1667 a6xx_crashdump_registers.gpuaddr + *offset;
1668 ptr[qwords++] =
1669 (((uint64_t)cluster->regs[2 * k]) << 44) |
1670 count;
1671
1672 *offset += count * sizeof(unsigned int);
1673 }
1674 }
1675 }
1676
1677 return qwords;
1678}
1679
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301680static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1681 uint64_t *ptr, uint64_t *offset)
1682{
1683 int qwords = 0;
1684 unsigned int j;
1685
1686 /* Capture each bank in the block */
1687 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1688 /* Program the aperture */
1689 ptr[qwords++] =
1690 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1691 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1692 (1 << 21) | 1;
1693
1694 /* Read all the data in one chunk */
1695 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1696 ptr[qwords++] =
1697 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1698 block->sz;
1699
1700 /* Remember the offset of the first bank for easy access */
1701 if (j == 0)
1702 block->offset = *offset;
1703
1704 *offset += block->sz * sizeof(unsigned int);
1705 }
1706
1707 return qwords;
1708}
1709
Lynus Vaz1e258612017-04-27 21:35:22 +05301710static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1711{
1712 int qwords = 0;
1713 unsigned int i, j, k;
1714 unsigned int count;
1715
1716 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1717 struct a6xx_cluster_dbgahb_registers *cluster =
1718 &a6xx_dbgahb_ctx_clusters[i];
1719
1720 cluster->offset0 = *offset;
1721
1722 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1723 if (j == 1)
1724 cluster->offset1 = *offset;
1725
1726 /* Program the aperture */
1727 ptr[qwords++] =
1728 ((cluster->statetype + j * 2) & 0xff) << 8;
1729 ptr[qwords++] =
1730 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1731 (1 << 21) | 1;
1732
1733 for (k = 0; k < cluster->num_sets; k++) {
1734 unsigned int start = cluster->regs[2 * k];
1735
1736 count = REG_PAIR_COUNT(cluster->regs, k);
1737 ptr[qwords++] =
1738 a6xx_crashdump_registers.gpuaddr + *offset;
1739 ptr[qwords++] =
1740 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1741 start - cluster->regbase / 4) << 44)) |
1742 count;
1743
1744 *offset += count * sizeof(unsigned int);
1745 }
1746 }
1747 }
1748 return qwords;
1749}
1750
Harshdeep Dhattfbf71e62017-12-07 14:03:08 -07001751static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1752{
1753 int qwords = 0;
1754 unsigned int i, k;
1755 unsigned int count;
1756
1757 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1758 struct a6xx_non_ctx_dbgahb_registers *regs =
1759 &a6xx_non_ctx_dbgahb[i];
1760
1761 regs->offset = *offset;
1762
1763 /* Program the aperture */
1764 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1765 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1766 (1 << 21) | 1;
1767
1768 for (k = 0; k < regs->num_sets; k++) {
1769 unsigned int start = regs->regs[2 * k];
1770
1771 count = REG_PAIR_COUNT(regs->regs, k);
1772 ptr[qwords++] =
1773 a6xx_crashdump_registers.gpuaddr + *offset;
1774 ptr[qwords++] =
1775 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1776 start - regs->regbase / 4) << 44)) |
1777 count;
1778
1779 *offset += count * sizeof(unsigned int);
1780 }
1781 }
1782 return qwords;
1783}
1784
Shrenuj Bansal41665402016-12-16 15:25:54 -08001785void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1786{
1787 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1788 unsigned int script_size = 0;
1789 unsigned int data_size = 0;
1790 unsigned int i, j, k;
1791 uint64_t *ptr;
1792 uint64_t offset = 0;
1793
1794 if (a6xx_capturescript.gpuaddr != 0 &&
1795 a6xx_crashdump_registers.gpuaddr != 0)
1796 return;
1797
1798 /*
1799 * We need to allocate two buffers:
1800 * 1 - the buffer to hold the draw script
1801 * 2 - the buffer to hold the data
1802 */
1803
1804 /*
1805 * To save the registers, we need 16 bytes per register pair for the
1806 * script and a dword for each register in the data
1807 */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001808 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1809 struct reg_list *regs = &a6xx_reg_list[i];
1810
1811 /* 16 bytes for programming the aperture */
1812 if (regs->sel)
1813 script_size += 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001814
1815 /* Each pair needs 16 bytes (2 qwords) */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001816 script_size += regs->count * 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001817
1818 /* Each register needs a dword in the data */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001819 for (j = 0; j < regs->count; j++)
Shrenuj Bansal41665402016-12-16 15:25:54 -08001820 data_size += REG_PAIR_COUNT(regs->regs, j) *
1821 sizeof(unsigned int);
1822
1823 }
1824
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301825 /*
1826 * To save the shader blocks for each block in each type we need 32
1827 * bytes for the script (16 bytes to program the aperture and 16 to
1828 * read the data) and then a block specific number of bytes to hold
1829 * the data
1830 */
1831 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1832 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1833 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1834 A6XX_NUM_SHADER_BANKS;
1835 }
1836
Shrenuj Bansal41665402016-12-16 15:25:54 -08001837 /* Calculate the script and data size for MVC registers */
1838 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1839 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1840
1841 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1842
1843 /* 16 bytes for programming the aperture */
1844 script_size += 16;
1845
1846 /* Reading each pair of registers takes 16 bytes */
1847 script_size += 16 * cluster->num_sets;
1848
1849 /* A dword per register read from the cluster list */
1850 for (k = 0; k < cluster->num_sets; k++)
1851 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1852 sizeof(unsigned int);
1853 }
1854 }
1855
Lynus Vaz1e258612017-04-27 21:35:22 +05301856 /* Calculate the script and data size for debug AHB registers */
1857 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1858 struct a6xx_cluster_dbgahb_registers *cluster =
1859 &a6xx_dbgahb_ctx_clusters[i];
1860
1861 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1862
1863 /* 16 bytes for programming the aperture */
1864 script_size += 16;
1865
1866 /* Reading each pair of registers takes 16 bytes */
1867 script_size += 16 * cluster->num_sets;
1868
1869 /* A dword per register read from the cluster list */
1870 for (k = 0; k < cluster->num_sets; k++)
1871 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1872 sizeof(unsigned int);
1873 }
1874 }
1875
Harshdeep Dhattfbf71e62017-12-07 14:03:08 -07001876 /*
1877 * Calculate the script and data size for non context debug
1878 * AHB registers
1879 */
1880 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1881 struct a6xx_non_ctx_dbgahb_registers *regs =
1882 &a6xx_non_ctx_dbgahb[i];
1883
1884 /* 16 bytes for programming the aperture */
1885 script_size += 16;
1886
1887 /* Reading each pair of registers takes 16 bytes */
1888 script_size += 16 * regs->num_sets;
1889
1890 /* A dword per register read from the cluster list */
1891 for (k = 0; k < regs->num_sets; k++)
1892 data_size += REG_PAIR_COUNT(regs->regs, k) *
1893 sizeof(unsigned int);
1894 }
1895
Shrenuj Bansal41665402016-12-16 15:25:54 -08001896 /* Now allocate the script and data buffers */
1897
1898 /* The script buffers needs 2 extra qwords on the end */
1899 if (kgsl_allocate_global(device, &a6xx_capturescript,
1900 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1901 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1902 return;
1903
1904 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1905 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1906 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1907 return;
1908 }
1909
1910 /* Build the crash script */
1911
1912 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1913
1914 /* For the registers, program a read command for each pair */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001915 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1916 struct reg_list *regs = &a6xx_reg_list[i];
Shrenuj Bansal41665402016-12-16 15:25:54 -08001917
Lynus Vaz1bba57b2017-09-26 11:55:04 +05301918 regs->offset = offset;
1919
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001920 /* Program the SEL_CNTL_CD register appropriately */
1921 if (regs->sel) {
1922 *ptr++ = regs->sel->val;
1923 *ptr++ = (((uint64_t)regs->sel->cd_reg << 44)) |
1924 (1 << 21) | 1;
1925 }
1926
1927 for (j = 0; j < regs->count; j++) {
Shrenuj Bansal41665402016-12-16 15:25:54 -08001928 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1929 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1930 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1931 offset += r * sizeof(unsigned int);
1932 }
1933 }
1934
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301935 /* Program each shader block */
1936 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1937 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1938 &offset);
1939 }
1940
Shrenuj Bansal41665402016-12-16 15:25:54 -08001941 /* Program the capturescript for the MVC regsiters */
1942 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1943
Lynus Vaz1e258612017-04-27 21:35:22 +05301944 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1945
Harshdeep Dhattfbf71e62017-12-07 14:03:08 -07001946 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1947
Shrenuj Bansal41665402016-12-16 15:25:54 -08001948 *ptr++ = 0;
1949 *ptr++ = 0;
1950}