blob: ed0129f5705311be9bafb76dbd702837ad4b5bb1 [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
24
25static const unsigned int a6xx_gras_cluster[] = {
26 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
27 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
28 0x8400, 0x840B,
29};
30
31static const unsigned int a6xx_ps_cluster[] = {
32 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
33 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
34 0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
35 0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
36 0x9218, 0x9236, 0x9300, 0x9306,
37};
38
39static const unsigned int a6xx_fe_cluster[] = {
40 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
41 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
42};
43
44static const unsigned int a6xx_pc_vs_cluster[] = {
45 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
46};
47
48static struct a6xx_cluster_registers {
49 unsigned int id;
50 const unsigned int *regs;
51 unsigned int num_sets;
52 unsigned int offset0;
53 unsigned int offset1;
54} a6xx_clusters[] = {
55 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
56 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
57 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
58 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
59 ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
60};
61
62struct a6xx_cluster_regs_info {
63 struct a6xx_cluster_registers *cluster;
64 unsigned int ctxt_id;
65};
66
Lynus Vaz461e2382017-01-16 19:35:41 +053067static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
68 0xB800, 0xB803, 0xB820, 0xB822,
69};
70
71static const unsigned int a6xx_sp_vs_sp_cluster[] = {
72 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
73 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
74};
75
76static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
77 0xBB10, 0xBB11, 0xBB20, 0xBB29,
78};
79
80static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
81 0xBD80, 0xBD80,
82};
83
84static const unsigned int a6xx_sp_duplicate_cluster[] = {
85 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
86};
87
88static const unsigned int a6xx_tp_duplicate_cluster[] = {
89 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
90};
91
92static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
93 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
94 0xB9C0, 0xB9C9,
95};
96
97static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
98 0xBD80, 0xBD80,
99};
100
101static const unsigned int a6xx_sp_ps_sp_cluster[] = {
102 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
103 0xAA00, 0xAA00, 0xAA30, 0xAA31,
104};
105
106static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
107 0xACC0, 0xACC0,
108};
109
110static const unsigned int a6xx_sp_ps_tp_cluster[] = {
111 0xB180, 0xB183, 0xB190, 0xB191,
112};
113
114static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
115 0xB4C0, 0xB4D1,
116};
117
118static struct a6xx_cluster_dbgahb_registers {
119 unsigned int id;
120 unsigned int regbase;
121 unsigned int statetype;
122 const unsigned int *regs;
123 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530124 unsigned int offset0;
125 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530126} a6xx_dbgahb_ctx_clusters[] = {
127 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
128 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
129 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
130 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700131 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530132 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
133 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
134 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700135 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530136 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700137 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530138 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700139 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530140 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700141 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530142 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700143 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530144 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700145 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530146 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700147 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530148 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700149 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530150 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700151 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530152 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700153 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530154 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700155 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530156 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
157};
158
159struct a6xx_cluster_dbgahb_regs_info {
160 struct a6xx_cluster_dbgahb_registers *cluster;
161 unsigned int ctxt_id;
162};
163
164static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
165 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
166 0xBE20, 0xBE23,
167};
168
169static const unsigned int a6xx_sp_non_ctx_registers[] = {
170 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
171 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
172};
173
174static const unsigned int a6xx_tp_non_ctx_registers[] = {
175 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
176};
177
178static struct a6xx_non_ctx_dbgahb_registers {
179 unsigned int regbase;
180 unsigned int statetype;
181 const unsigned int *regs;
182 unsigned int num_sets;
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600183 unsigned int offset;
Lynus Vaz461e2382017-01-16 19:35:41 +0530184} a6xx_non_ctx_dbgahb[] = {
185 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
186 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
187 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
188 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
189 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
190 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
191};
192
Shrenuj Bansal41665402016-12-16 15:25:54 -0800193static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
194 /* VBIF */
195 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
196 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
197 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
198 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
199 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
200 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
201 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
202 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
203 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
204 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
205 0x3410, 0x3410, 0x3800, 0x3801,
206};
207
Kyle Piefer60733aa2017-03-21 11:24:01 -0700208static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700209 /* GMU GX */
210 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
211 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
212 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
213 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
214 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
215 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700216 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700217 /* GMU CX */
218 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
219 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
220 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
221 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
222 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
223 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
224 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
225 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
226 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA03,
227 /* GPU RSCC */
228 0x23740, 0x23742, 0x23744, 0x23747, 0x2374C, 0x23787, 0x237EC, 0x237EF,
229 0x237F4, 0x2382F, 0x23894, 0x23897, 0x2389C, 0x238D7, 0x2393C, 0x2393F,
230 0x23944, 0x2397F,
231 /* GMU AO */
232 0x23B00, 0x23B16, 0x23C00, 0x23C00,
233 /* GPU CC */
234 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
235 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
236 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
237 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
238 0x26000, 0x26002,
239 /* GPU CC ACD */
240 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700241};
242
Shrenuj Bansal41665402016-12-16 15:25:54 -0800243static const struct adreno_vbif_snapshot_registers
244a6xx_vbif_snapshot_registers[] = {
245 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
246 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
247};
248
249/*
250 * Set of registers to dump for A6XX on snapshot.
251 * Registers in pairs - first value is the start offset, second
252 * is the stop offset (inclusive)
253 */
254
255static const unsigned int a6xx_registers[] = {
256 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530257 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
258 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
259 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530260 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
261 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
262 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800263 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530264 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
265 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
266 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
267 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
268 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
269 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
270 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800271 /* VSC */
272 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
273 /* UCHE */
274 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
275 0x0E38, 0x0E39,
276 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530277 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
278 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800279 /* RB */
280 0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
281 0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
282 0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
283 /* VPC */
284 0x9600, 0x9604, 0x9624, 0x9637,
285 /* PC */
286 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
287 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
288 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
289 /* VFD */
290 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530291 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800292};
293
Lynus Vaz030473e2017-06-22 17:33:06 +0530294/*
295 * Set of registers to dump for A6XX before actually triggering crash dumper.
296 * Registers in pairs - first value is the start offset, second
297 * is the stop offset (inclusive)
298 */
299static const unsigned int a6xx_pre_crashdumper_registers[] = {
300 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
301 0x210, 0x213,
302 /* CP: CP_STATUS_1 */
303 0x825, 0x825,
304};
305
Lynus Vaz20c81272017-02-10 16:22:12 +0530306enum a6xx_debugbus_id {
307 A6XX_DBGBUS_CP = 0x1,
308 A6XX_DBGBUS_RBBM = 0x2,
309 A6XX_DBGBUS_VBIF = 0x3,
310 A6XX_DBGBUS_HLSQ = 0x4,
311 A6XX_DBGBUS_UCHE = 0x5,
312 A6XX_DBGBUS_DPM = 0x6,
313 A6XX_DBGBUS_TESS = 0x7,
314 A6XX_DBGBUS_PC = 0x8,
315 A6XX_DBGBUS_VFDP = 0x9,
316 A6XX_DBGBUS_VPC = 0xa,
317 A6XX_DBGBUS_TSE = 0xb,
318 A6XX_DBGBUS_RAS = 0xc,
319 A6XX_DBGBUS_VSC = 0xd,
320 A6XX_DBGBUS_COM = 0xe,
321 A6XX_DBGBUS_LRZ = 0x10,
322 A6XX_DBGBUS_A2D = 0x11,
323 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530324 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530325 A6XX_DBGBUS_RBP = 0x14,
326 A6XX_DBGBUS_DCS = 0x15,
327 A6XX_DBGBUS_RBBM_CFG = 0x16,
328 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530329 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530330 A6XX_DBGBUS_TPFCHE = 0x19,
331 A6XX_DBGBUS_GPC = 0x1d,
332 A6XX_DBGBUS_LARC = 0x1e,
333 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
334 A6XX_DBGBUS_RB_0 = 0x20,
335 A6XX_DBGBUS_RB_1 = 0x21,
336 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
337 A6XX_DBGBUS_CCU_0 = 0x28,
338 A6XX_DBGBUS_CCU_1 = 0x29,
339 A6XX_DBGBUS_VFD_0 = 0x38,
340 A6XX_DBGBUS_VFD_1 = 0x39,
341 A6XX_DBGBUS_VFD_2 = 0x3a,
342 A6XX_DBGBUS_VFD_3 = 0x3b,
343 A6XX_DBGBUS_SP_0 = 0x40,
344 A6XX_DBGBUS_SP_1 = 0x41,
345 A6XX_DBGBUS_TPL1_0 = 0x48,
346 A6XX_DBGBUS_TPL1_1 = 0x49,
347 A6XX_DBGBUS_TPL1_2 = 0x4a,
348 A6XX_DBGBUS_TPL1_3 = 0x4b,
349};
350
351static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
352 { A6XX_DBGBUS_CP, 0x100, },
353 { A6XX_DBGBUS_RBBM, 0x100, },
354 { A6XX_DBGBUS_HLSQ, 0x100, },
355 { A6XX_DBGBUS_UCHE, 0x100, },
356 { A6XX_DBGBUS_DPM, 0x100, },
357 { A6XX_DBGBUS_TESS, 0x100, },
358 { A6XX_DBGBUS_PC, 0x100, },
359 { A6XX_DBGBUS_VFDP, 0x100, },
360 { A6XX_DBGBUS_VPC, 0x100, },
361 { A6XX_DBGBUS_TSE, 0x100, },
362 { A6XX_DBGBUS_RAS, 0x100, },
363 { A6XX_DBGBUS_VSC, 0x100, },
364 { A6XX_DBGBUS_COM, 0x100, },
365 { A6XX_DBGBUS_LRZ, 0x100, },
366 { A6XX_DBGBUS_A2D, 0x100, },
367 { A6XX_DBGBUS_CCUFCHE, 0x100, },
368 { A6XX_DBGBUS_RBP, 0x100, },
369 { A6XX_DBGBUS_DCS, 0x100, },
370 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530371 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530372 { A6XX_DBGBUS_TPFCHE, 0x100, },
373 { A6XX_DBGBUS_GPC, 0x100, },
374 { A6XX_DBGBUS_LARC, 0x100, },
375 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
376 { A6XX_DBGBUS_RB_0, 0x100, },
377 { A6XX_DBGBUS_RB_1, 0x100, },
378 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
379 { A6XX_DBGBUS_CCU_0, 0x100, },
380 { A6XX_DBGBUS_CCU_1, 0x100, },
381 { A6XX_DBGBUS_VFD_0, 0x100, },
382 { A6XX_DBGBUS_VFD_1, 0x100, },
383 { A6XX_DBGBUS_VFD_2, 0x100, },
384 { A6XX_DBGBUS_VFD_3, 0x100, },
385 { A6XX_DBGBUS_SP_0, 0x100, },
386 { A6XX_DBGBUS_SP_1, 0x100, },
387 { A6XX_DBGBUS_TPL1_0, 0x100, },
388 { A6XX_DBGBUS_TPL1_1, 0x100, },
389 { A6XX_DBGBUS_TPL1_2, 0x100, },
390 { A6XX_DBGBUS_TPL1_3, 0x100, },
391};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800392
Lynus Vazff24c972017-03-07 19:27:46 +0530393static void __iomem *a6xx_cx_dbgc;
394static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
395 { A6XX_DBGBUS_VBIF, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530396 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530397 { A6XX_DBGBUS_CX, 0x100, },
398};
399
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530400#define A6XX_NUM_SHADER_BANKS 3
401#define A6XX_SHADER_STATETYPE_SHIFT 8
402
403enum a6xx_shader_obj {
404 A6XX_TP0_TMO_DATA = 0x9,
405 A6XX_TP0_SMO_DATA = 0xa,
406 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
407 A6XX_TP1_TMO_DATA = 0x19,
408 A6XX_TP1_SMO_DATA = 0x1a,
409 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
410 A6XX_SP_INST_DATA = 0x29,
411 A6XX_SP_LB_0_DATA = 0x2a,
412 A6XX_SP_LB_1_DATA = 0x2b,
413 A6XX_SP_LB_2_DATA = 0x2c,
414 A6XX_SP_LB_3_DATA = 0x2d,
415 A6XX_SP_LB_4_DATA = 0x2e,
416 A6XX_SP_LB_5_DATA = 0x2f,
417 A6XX_SP_CB_BINDLESS_DATA = 0x30,
418 A6XX_SP_CB_LEGACY_DATA = 0x31,
419 A6XX_SP_UAV_DATA = 0x32,
420 A6XX_SP_INST_TAG = 0x33,
421 A6XX_SP_CB_BINDLESS_TAG = 0x34,
422 A6XX_SP_TMO_UMO_TAG = 0x35,
423 A6XX_SP_SMO_TAG = 0x36,
424 A6XX_SP_STATE_DATA = 0x37,
425 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
426 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
427 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
428 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
429 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
430 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
431 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
432 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
433 A6XX_HLSQ_INST_RAM = 0x52,
434 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
435 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
436 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
437 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
438 A6XX_HLSQ_INST_RAM_TAG = 0x57,
439 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
440 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
441 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
442 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
443 A6XX_HLSQ_DATAPATH_META = 0x60,
444 A6XX_HLSQ_FRONTEND_META = 0x61,
445 A6XX_HLSQ_INDIRECT_META = 0x62,
446 A6XX_HLSQ_BACKEND_META = 0x63
447};
448
449struct a6xx_shader_block {
450 unsigned int statetype;
451 unsigned int sz;
452 uint64_t offset;
453};
454
455struct a6xx_shader_block_info {
456 struct a6xx_shader_block *block;
457 unsigned int bank;
458 uint64_t offset;
459};
460
461static struct a6xx_shader_block a6xx_shader_blocks[] = {
462 {A6XX_TP0_TMO_DATA, 0x200},
463 {A6XX_TP0_SMO_DATA, 0x80,},
464 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
465 {A6XX_TP1_TMO_DATA, 0x200},
466 {A6XX_TP1_SMO_DATA, 0x80,},
467 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
468 {A6XX_SP_INST_DATA, 0x800},
469 {A6XX_SP_LB_0_DATA, 0x800},
470 {A6XX_SP_LB_1_DATA, 0x800},
471 {A6XX_SP_LB_2_DATA, 0x800},
472 {A6XX_SP_LB_3_DATA, 0x800},
473 {A6XX_SP_LB_4_DATA, 0x800},
474 {A6XX_SP_LB_5_DATA, 0x200},
475 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
476 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
477 {A6XX_SP_UAV_DATA, 0x80,},
478 {A6XX_SP_INST_TAG, 0x80,},
479 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
480 {A6XX_SP_TMO_UMO_TAG, 0x80,},
481 {A6XX_SP_SMO_TAG, 0x80},
482 {A6XX_SP_STATE_DATA, 0x3F},
483 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
484 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
485 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
486 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
487 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
488 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
489 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
490 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
491 {A6XX_HLSQ_INST_RAM, 0x800},
492 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
493 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
494 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
495 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
496 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
497 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
498 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
499 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
500 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
501 {A6XX_HLSQ_DATAPATH_META, 0x40,},
502 {A6XX_HLSQ_FRONTEND_META, 0x40},
503 {A6XX_HLSQ_INDIRECT_META, 0x40,}
504};
505
Shrenuj Bansal41665402016-12-16 15:25:54 -0800506static struct kgsl_memdesc a6xx_capturescript;
507static struct kgsl_memdesc a6xx_crashdump_registers;
508static bool crash_dump_valid;
509
510static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
511 u8 *buf, size_t remain)
512{
513 struct kgsl_snapshot_registers regs = {
514 .regs = a6xx_registers,
515 .count = ARRAY_SIZE(a6xx_registers) / 2,
516 };
517
518 return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
519}
520
521static struct cdregs {
522 const unsigned int *regs;
523 unsigned int size;
524} _a6xx_cd_registers[] = {
525 { a6xx_registers, ARRAY_SIZE(a6xx_registers) },
526};
527
528#define REG_PAIR_COUNT(_a, _i) \
529 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
530
531static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
532 size_t remain, void *priv)
533{
534 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
535 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
536 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
537 unsigned int i, j, k;
538 unsigned int count = 0;
539
540 if (crash_dump_valid == false)
541 return a6xx_legacy_snapshot_registers(device, buf, remain);
542
543 if (remain < sizeof(*header)) {
544 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
545 return 0;
546 }
547
548 remain -= sizeof(*header);
549
550 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
551 struct cdregs *regs = &_a6xx_cd_registers[i];
552
553 for (j = 0; j < regs->size / 2; j++) {
554 unsigned int start = regs->regs[2 * j];
555 unsigned int end = regs->regs[(2 * j) + 1];
556
557 if (remain < ((end - start) + 1) * 8) {
558 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
559 goto out;
560 }
561
562 remain -= ((end - start) + 1) * 8;
563
564 for (k = start; k <= end; k++, count++) {
565 *data++ = k;
566 *data++ = *src++;
567 }
568 }
569 }
570
571out:
572 header->count = count;
573
574 /* Return the size of the section */
575 return (count * 8) + sizeof(*header);
576}
577
Lynus Vaz030473e2017-06-22 17:33:06 +0530578static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
579 u8 *buf, size_t remain, void *priv)
580{
581 struct kgsl_snapshot_registers pre_cdregs = {
582 .regs = a6xx_pre_crashdumper_registers,
583 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
584 };
585
586 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
587}
588
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530589static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
590 u8 *buf, size_t remain, void *priv)
591{
592 struct kgsl_snapshot_shader *header =
593 (struct kgsl_snapshot_shader *) buf;
594 struct a6xx_shader_block_info *info =
595 (struct a6xx_shader_block_info *) priv;
596 struct a6xx_shader_block *block = info->block;
597 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
598
599 if (remain < SHADER_SECTION_SZ(block->sz)) {
600 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
601 return 0;
602 }
603
604 header->type = block->statetype;
605 header->index = info->bank;
606 header->size = block->sz;
607
608 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
609 block->sz);
610
611 return SHADER_SECTION_SZ(block->sz);
612}
613
614static void a6xx_snapshot_shader(struct kgsl_device *device,
615 struct kgsl_snapshot *snapshot)
616{
617 unsigned int i, j;
618 struct a6xx_shader_block_info info;
619
620 /* Shader blocks can only be read by the crash dumper */
621 if (crash_dump_valid == false)
622 return;
623
624 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
625 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
626 info.block = &a6xx_shader_blocks[i];
627 info.bank = j;
628 info.offset = a6xx_shader_blocks[i].offset +
629 (j * a6xx_shader_blocks[i].sz);
630
631 /* Shader working/shadow memory */
632 kgsl_snapshot_add_section(device,
633 KGSL_SNAPSHOT_SECTION_SHADER,
634 snapshot, a6xx_snapshot_shader_memory, &info);
635 }
636 }
637}
638
Lynus Vaza5922742017-03-14 18:50:54 +0530639static void a6xx_snapshot_mempool(struct kgsl_device *device,
640 struct kgsl_snapshot *snapshot)
641{
642 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530643 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530644
Lynus Vazb8e43d52017-04-20 14:47:37 +0530645 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530646 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
647 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
648
649 kgsl_snapshot_indexed_registers(device, snapshot,
650 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
651 0, 0x2060);
652
Lynus Vazb8e43d52017-04-20 14:47:37 +0530653 /*
654 * Data at offset 0x2000 in the mempool section is the mempool size.
655 * Since we set it to 0, patch in the original size so that the data
656 * is consistent.
657 */
658 if (buf < snapshot->ptr) {
659 unsigned int *data;
660
661 /* Skip over the headers */
662 buf += sizeof(struct kgsl_snapshot_section_header) +
663 sizeof(struct kgsl_snapshot_indexed_regs);
664
665 data = (unsigned int *)buf + 0x2000;
666 *data = pool_size;
667 }
668
Lynus Vaza5922742017-03-14 18:50:54 +0530669 /* Restore the saved mempool size */
670 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
671}
672
Lynus Vaz461e2382017-01-16 19:35:41 +0530673static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
674 unsigned int regbase, unsigned int reg)
675{
676 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
677 reg - regbase / 4;
678 unsigned int val;
679
680 kgsl_regread(device, read_reg, &val);
681 return val;
682}
683
Lynus Vaz1e258612017-04-27 21:35:22 +0530684static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
685 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530686{
687 struct kgsl_snapshot_mvc_regs *header =
688 (struct kgsl_snapshot_mvc_regs *)buf;
689 struct a6xx_cluster_dbgahb_regs_info *info =
690 (struct a6xx_cluster_dbgahb_regs_info *)priv;
691 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
692 unsigned int read_sel;
693 unsigned int data_size = 0;
694 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
695 int i, j;
696
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600697 if (!device->snapshot_legacy)
698 return 0;
699
Lynus Vaz461e2382017-01-16 19:35:41 +0530700 if (remain < sizeof(*header)) {
701 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
702 return 0;
703 }
704
705 remain -= sizeof(*header);
706
707 header->ctxt_id = info->ctxt_id;
708 header->cluster_id = cur_cluster->id;
709
710 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
711 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
712
713 for (i = 0; i < cur_cluster->num_sets; i++) {
714 unsigned int start = cur_cluster->regs[2 * i];
715 unsigned int end = cur_cluster->regs[2 * i + 1];
716
717 if (remain < (end - start + 3) * 4) {
718 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
719 goto out;
720 }
721
722 remain -= (end - start + 3) * 4;
723 data_size += (end - start + 3) * 4;
724
725 *data++ = start | (1 << 31);
726 *data++ = end;
727
728 for (j = start; j <= end; j++) {
729 unsigned int val;
730
731 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
732 *data++ = val;
733
734 }
735 }
736
737out:
738 return data_size + sizeof(*header);
739}
740
Lynus Vaz1e258612017-04-27 21:35:22 +0530741static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
742 size_t remain, void *priv)
743{
744 struct kgsl_snapshot_mvc_regs *header =
745 (struct kgsl_snapshot_mvc_regs *)buf;
746 struct a6xx_cluster_dbgahb_regs_info *info =
747 (struct a6xx_cluster_dbgahb_regs_info *)priv;
748 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
749 unsigned int data_size = 0;
750 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
751 int i, j;
752 unsigned int *src;
753
754
755 if (crash_dump_valid == false)
756 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
757 info);
758
759 if (remain < sizeof(*header)) {
760 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
761 return 0;
762 }
763
764 remain -= sizeof(*header);
765
766 header->ctxt_id = info->ctxt_id;
767 header->cluster_id = cluster->id;
768
769 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
770 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
771
772 for (i = 0; i < cluster->num_sets; i++) {
773 unsigned int start;
774 unsigned int end;
775
776 start = cluster->regs[2 * i];
777 end = cluster->regs[2 * i + 1];
778
779 if (remain < (end - start + 3) * 4) {
780 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
781 goto out;
782 }
783
784 remain -= (end - start + 3) * 4;
785 data_size += (end - start + 3) * 4;
786
787 *data++ = start | (1 << 31);
788 *data++ = end;
789 for (j = start; j <= end; j++)
790 *data++ = *src++;
791 }
792out:
793 return data_size + sizeof(*header);
794}
795
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600796static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
797 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530798{
799 struct kgsl_snapshot_regs *header =
800 (struct kgsl_snapshot_regs *)buf;
801 struct a6xx_non_ctx_dbgahb_registers *regs =
802 (struct a6xx_non_ctx_dbgahb_registers *)priv;
803 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
804 int count = 0;
805 unsigned int read_sel;
806 int i, j;
807
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600808 if (!device->snapshot_legacy)
809 return 0;
810
Lynus Vaz461e2382017-01-16 19:35:41 +0530811 /* Figure out how many registers we are going to dump */
812 for (i = 0; i < regs->num_sets; i++) {
813 int start = regs->regs[i * 2];
814 int end = regs->regs[i * 2 + 1];
815
816 count += (end - start + 1);
817 }
818
819 if (remain < (count * 8) + sizeof(*header)) {
820 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
821 return 0;
822 }
823
824 header->count = count;
825
826 read_sel = (regs->statetype & 0xff) << 8;
827 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
828
829 for (i = 0; i < regs->num_sets; i++) {
830 unsigned int start = regs->regs[2 * i];
831 unsigned int end = regs->regs[2 * i + 1];
832
833 for (j = start; j <= end; j++) {
834 unsigned int val;
835
836 val = a6xx_read_dbgahb(device, regs->regbase, j);
837 *data++ = j;
838 *data++ = val;
839
840 }
841 }
842 return (count * 8) + sizeof(*header);
843}
844
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600845static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
846 size_t remain, void *priv)
847{
848 struct kgsl_snapshot_regs *header =
849 (struct kgsl_snapshot_regs *)buf;
850 struct a6xx_non_ctx_dbgahb_registers *regs =
851 (struct a6xx_non_ctx_dbgahb_registers *)priv;
852 unsigned int count = 0;
853 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
854 unsigned int i, k;
855 unsigned int *src;
856
857 if (crash_dump_valid == false)
858 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
859 regs);
860
861 if (remain < sizeof(*header)) {
862 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
863 return 0;
864 }
865
866 remain -= sizeof(*header);
867
868 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
869
870 for (i = 0; i < regs->num_sets; i++) {
871 unsigned int start;
872 unsigned int end;
873
874 start = regs->regs[2 * i];
875 end = regs->regs[(2 * i) + 1];
876
877 if (remain < (end - start + 1) * 8) {
878 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
879 goto out;
880 }
881
882 remain -= ((end - start) + 1) * 8;
883
884 for (k = start; k <= end; k++, count++) {
885 *data++ = k;
886 *data++ = *src++;
887 }
888 }
889out:
890 header->count = count;
891
892 /* Return the size of the section */
893 return (count * 8) + sizeof(*header);
894}
895
Lynus Vaz461e2382017-01-16 19:35:41 +0530896static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
897 struct kgsl_snapshot *snapshot)
898{
899 int i, j;
900
901 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
902 struct a6xx_cluster_dbgahb_registers *cluster =
903 &a6xx_dbgahb_ctx_clusters[i];
904 struct a6xx_cluster_dbgahb_regs_info info;
905
906 info.cluster = cluster;
907 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
908 info.ctxt_id = j;
909
910 kgsl_snapshot_add_section(device,
911 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
912 a6xx_snapshot_cluster_dbgahb, &info);
913 }
914 }
915
916 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
917 kgsl_snapshot_add_section(device,
918 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
919 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
920 }
921}
922
Shrenuj Bansal41665402016-12-16 15:25:54 -0800923static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
924 size_t remain, void *priv)
925{
926 struct kgsl_snapshot_mvc_regs *header =
927 (struct kgsl_snapshot_mvc_regs *)buf;
928 struct a6xx_cluster_regs_info *info =
929 (struct a6xx_cluster_regs_info *)priv;
930 struct a6xx_cluster_registers *cur_cluster = info->cluster;
931 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
932 unsigned int ctxt = info->ctxt_id;
933 unsigned int start, end, i, j, aperture_cntl = 0;
934 unsigned int data_size = 0;
935
936 if (remain < sizeof(*header)) {
937 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
938 return 0;
939 }
940
941 remain -= sizeof(*header);
942
943 header->ctxt_id = info->ctxt_id;
944 header->cluster_id = cur_cluster->id;
945
946 /*
947 * Set the AHB control for the Host to read from the
948 * cluster/context for this iteration.
949 */
950 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
951 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
952
953 for (i = 0; i < cur_cluster->num_sets; i++) {
954 start = cur_cluster->regs[2 * i];
955 end = cur_cluster->regs[2 * i + 1];
956
957 if (remain < (end - start + 3) * 4) {
958 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
959 goto out;
960 }
961
962 remain -= (end - start + 3) * 4;
963 data_size += (end - start + 3) * 4;
964
965 *data++ = start | (1 << 31);
966 *data++ = end;
967 for (j = start; j <= end; j++) {
968 unsigned int val;
969
970 kgsl_regread(device, j, &val);
971 *data++ = val;
972 }
973 }
974out:
975 return data_size + sizeof(*header);
976}
977
978static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
979 size_t remain, void *priv)
980{
981 struct kgsl_snapshot_mvc_regs *header =
982 (struct kgsl_snapshot_mvc_regs *)buf;
983 struct a6xx_cluster_regs_info *info =
984 (struct a6xx_cluster_regs_info *)priv;
985 struct a6xx_cluster_registers *cluster = info->cluster;
986 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
987 unsigned int *src;
988 int i, j;
989 unsigned int start, end;
990 size_t data_size = 0;
991
992 if (crash_dump_valid == false)
993 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
994
995 if (remain < sizeof(*header)) {
996 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
997 return 0;
998 }
999
1000 remain -= sizeof(*header);
1001
1002 header->ctxt_id = info->ctxt_id;
1003 header->cluster_id = cluster->id;
1004
1005 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
1006 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
1007
1008 for (i = 0; i < cluster->num_sets; i++) {
1009 start = cluster->regs[2 * i];
1010 end = cluster->regs[2 * i + 1];
1011
1012 if (remain < (end - start + 3) * 4) {
1013 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1014 goto out;
1015 }
1016
1017 remain -= (end - start + 3) * 4;
1018 data_size += (end - start + 3) * 4;
1019
1020 *data++ = start | (1 << 31);
1021 *data++ = end;
1022 for (j = start; j <= end; j++)
1023 *data++ = *src++;
1024 }
1025
1026out:
1027 return data_size + sizeof(*header);
1028
1029}
1030
1031static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
1032 struct kgsl_snapshot *snapshot)
1033{
1034 int i, j;
1035 struct a6xx_cluster_regs_info info;
1036
1037 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1038 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1039
1040 info.cluster = cluster;
1041 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1042 info.ctxt_id = j;
1043
1044 kgsl_snapshot_add_section(device,
1045 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
1046 a6xx_snapshot_mvc, &info);
1047 }
1048 }
1049}
1050
Lynus Vaz20c81272017-02-10 16:22:12 +05301051/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
1052static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
1053 unsigned int block_id, unsigned int index, unsigned int *val)
1054{
1055 unsigned int reg;
1056
1057 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1058 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1059
1060 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1061 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1062 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1063 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1064
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001065 /*
1066 * There needs to be a delay of 1 us to ensure enough time for correct
1067 * data is funneled into the trace buffer
1068 */
1069 udelay(1);
1070
Lynus Vaz20c81272017-02-10 16:22:12 +05301071 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1072 val++;
1073 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1074}
1075
1076/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
1077static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1078 u8 *buf, size_t remain, void *priv)
1079{
Lynus Vazecd472c2017-04-18 14:15:57 +05301080 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301081 struct kgsl_snapshot_debugbus *header =
1082 (struct kgsl_snapshot_debugbus *)buf;
1083 struct adreno_debugbus_block *block = priv;
1084 int i;
1085 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1086 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301087 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301088 size_t size;
1089
1090 dwords = block->dwords;
1091
1092 /* For a6xx each debug bus data unit is 2 DWORDS */
1093 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1094
1095 if (remain < size) {
1096 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1097 return 0;
1098 }
1099
1100 header->id = block->block_id;
1101 header->count = dwords * 2;
1102
Lynus Vazecd472c2017-04-18 14:15:57 +05301103 block_id = block->block_id;
1104 /* GMU_GX data is read using the GMU_CX block id on A630 */
1105 if (adreno_is_a630(adreno_dev) &&
1106 (block_id == A6XX_DBGBUS_GMU_GX))
1107 block_id = A6XX_DBGBUS_GMU_CX;
1108
Lynus Vaz20c81272017-02-10 16:22:12 +05301109 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301110 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301111
1112 return size;
1113}
1114
Lynus Vazff24c972017-03-07 19:27:46 +05301115static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1116{
1117 void __iomem *reg;
1118
1119 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1120 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1121 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1122 return;
1123
1124 reg = a6xx_cx_dbgc +
1125 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1126 *value = __raw_readl(reg);
1127
1128 /*
1129 * ensure this read finishes before the next one.
1130 * i.e. act like normal readl()
1131 */
1132 rmb();
1133}
1134
1135static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1136{
1137 void __iomem *reg;
1138
1139 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1140 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1141 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1142 return;
1143
1144 reg = a6xx_cx_dbgc +
1145 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1146
1147 /*
1148 * ensure previous writes post before this one,
1149 * i.e. act like normal writel()
1150 */
1151 wmb();
1152 __raw_writel(value, reg);
1153}
1154
1155/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1156static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1157 unsigned int block_id, unsigned int index, unsigned int *val)
1158{
1159 unsigned int reg;
1160
1161 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1162 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1163
1164 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1165 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1166 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1167 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1168
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001169 /*
1170 * There needs to be a delay of 1 us to ensure enough time for correct
1171 * data is funneled into the trace buffer
1172 */
1173 udelay(1);
1174
Lynus Vazff24c972017-03-07 19:27:46 +05301175 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1176 val++;
1177 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1178}
1179
1180/*
1181 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1182 * block from the CX DBGC block
1183 */
1184static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1185 u8 *buf, size_t remain, void *priv)
1186{
1187 struct kgsl_snapshot_debugbus *header =
1188 (struct kgsl_snapshot_debugbus *)buf;
1189 struct adreno_debugbus_block *block = priv;
1190 int i;
1191 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1192 unsigned int dwords;
1193 size_t size;
1194
1195 dwords = block->dwords;
1196
1197 /* For a6xx each debug bus data unit is 2 DWRODS */
1198 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1199
1200 if (remain < size) {
1201 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1202 return 0;
1203 }
1204
1205 header->id = block->block_id;
1206 header->count = dwords * 2;
1207
1208 for (i = 0; i < dwords; i++)
1209 a6xx_cx_debug_bus_read(device, block->block_id, i,
1210 &data[i*2]);
1211
1212 return size;
1213}
1214
Lynus Vaz20c81272017-02-10 16:22:12 +05301215/* a6xx_snapshot_debugbus() - Capture debug bus data */
1216static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1217 struct kgsl_snapshot *snapshot)
1218{
1219 int i;
1220
1221 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1222 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001223 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1224 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301225
1226 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1227 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1228
1229 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1230 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1231 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1232 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1233
1234 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1235 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1236 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1237 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1238 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1239 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1240 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1241 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1242 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1243 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1244 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1245 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1246 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1247 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1248 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1249 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1250 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1251 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1252
1253 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1254 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1255 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1256 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1257
Lynus Vazff24c972017-03-07 19:27:46 +05301258 a6xx_cx_dbgc = ioremap(device->reg_phys +
1259 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1260 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1261 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1262
1263 if (a6xx_cx_dbgc) {
1264 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1265 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001266 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1267 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301268
1269 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1270 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1271
1272 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1273 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1274 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1275 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1276
1277 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1278 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1279 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1280 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1281 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1282 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1283 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1284 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1285 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1286 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1287 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1288 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1289 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1290 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1291 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1292 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1293 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1294 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1295
1296 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1297 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1298 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1299 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1300 } else
1301 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1302
Lynus Vaz20c81272017-02-10 16:22:12 +05301303 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1304 kgsl_snapshot_add_section(device,
1305 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1306 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1307 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1308 }
Lynus Vazff24c972017-03-07 19:27:46 +05301309
1310 if (a6xx_cx_dbgc) {
1311 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1312 kgsl_snapshot_add_section(device,
1313 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1314 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1315 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1316 }
1317 iounmap(a6xx_cx_dbgc);
1318 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301319}
1320
Kyle Piefer60733aa2017-03-21 11:24:01 -07001321static void a6xx_snapshot_gmu(struct kgsl_device *device,
1322 struct kgsl_snapshot *snapshot)
1323{
Kyle Piefer60733aa2017-03-21 11:24:01 -07001324 if (!kgsl_gmu_isenabled(device))
1325 return;
1326
Lynus Vazd37f1d82017-05-24 16:39:15 +05301327 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1328 ARRAY_SIZE(a6xx_gmu_registers) / 2);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001329}
1330
Lynus Vaz85150052017-02-21 17:57:48 +05301331/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1332static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1333 size_t remain, void *priv)
1334{
1335 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1336 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1337 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1338 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1339
1340 if (remain < DEBUG_SECTION_SZ(1)) {
1341 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1342 return 0;
1343 }
1344
1345 /* Dump the SQE firmware version */
1346 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1347 header->size = 1;
1348 *data = fw->version;
1349
1350 return DEBUG_SECTION_SZ(1);
1351}
1352
Shrenuj Bansal41665402016-12-16 15:25:54 -08001353static void _a6xx_do_crashdump(struct kgsl_device *device)
1354{
1355 unsigned long wait_time;
1356 unsigned int reg = 0;
1357 unsigned int val;
1358
1359 crash_dump_valid = false;
1360
1361 if (a6xx_capturescript.gpuaddr == 0 ||
1362 a6xx_crashdump_registers.gpuaddr == 0)
1363 return;
1364
1365 /* IF the SMMU is stalled we cannot do a crash dump */
1366 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1367 if (val & BIT(24))
1368 return;
1369
1370 /* Turn on APRIV so we can access the buffers */
1371 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1372
1373 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1374 lower_32_bits(a6xx_capturescript.gpuaddr));
1375 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1376 upper_32_bits(a6xx_capturescript.gpuaddr));
1377 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1378
1379 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1380 while (!time_after(jiffies, wait_time)) {
1381 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1382 if (reg & 0x2)
1383 break;
1384 cpu_relax();
1385 }
1386
1387 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1388
1389 if (!(reg & 0x2)) {
1390 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1391 return;
1392 }
1393
1394 crash_dump_valid = true;
1395}
1396
1397/*
1398 * a6xx_snapshot() - A6XX GPU snapshot function
1399 * @adreno_dev: Device being snapshotted
1400 * @snapshot: Pointer to the snapshot instance
1401 *
1402 * This is where all of the A6XX specific bits and pieces are grabbed
1403 * into the snapshot memory
1404 */
1405void a6xx_snapshot(struct adreno_device *adreno_dev,
1406 struct kgsl_snapshot *snapshot)
1407{
1408 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1409 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1410 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001411 bool sptprac_on;
1412
1413 /* GMU TCM data dumped through AHB */
1414 a6xx_snapshot_gmu(device, snapshot);
1415
1416 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1417
1418 /* Return if the GX is off */
1419 if (!gpudev->gx_is_on(adreno_dev)) {
1420 pr_err("GX is off. Only dumping GMU data in snapshot\n");
1421 return;
1422 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001423
Lynus Vaz030473e2017-06-22 17:33:06 +05301424 /* Dump the registers which get affected by crash dumper trigger */
1425 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1426 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1427
1428 /* Dump vbif registers as well which get affected by crash dumper */
1429 adreno_snapshot_vbif_registers(device, snapshot,
1430 a6xx_vbif_snapshot_registers,
1431 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1432
Shrenuj Bansal41665402016-12-16 15:25:54 -08001433 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001434 if (sptprac_on)
1435 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001436
1437 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1438 snapshot, a6xx_snapshot_registers, NULL);
1439
Shrenuj Bansal41665402016-12-16 15:25:54 -08001440 /* CP_SQE indexed registers */
1441 kgsl_snapshot_indexed_registers(device, snapshot,
1442 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1443 0, snap_data->sect_sizes->cp_pfp);
1444
1445 /* CP_DRAW_STATE */
1446 kgsl_snapshot_indexed_registers(device, snapshot,
1447 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1448 0, 0x100);
1449
1450 /* SQE_UCODE Cache */
1451 kgsl_snapshot_indexed_registers(device, snapshot,
1452 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1453 0, 0x6000);
1454
1455 /* CP ROQ */
1456 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1457 snapshot, adreno_snapshot_cp_roq,
1458 &snap_data->sect_sizes->roq);
1459
Lynus Vaz85150052017-02-21 17:57:48 +05301460 /* SQE Firmware */
1461 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1462 snapshot, a6xx_snapshot_sqe, NULL);
1463
Lynus Vaza5922742017-03-14 18:50:54 +05301464 /* Mempool debug data */
1465 a6xx_snapshot_mempool(device, snapshot);
1466
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001467 if (sptprac_on) {
1468 /* Shader memory */
1469 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301470
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001471 /* MVC register section */
1472 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001473
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001474 /* registers dumped through DBG AHB */
1475 a6xx_snapshot_dbgahb_regs(device, snapshot);
1476 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301477
Lynus Vaz20c81272017-02-10 16:22:12 +05301478 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001479
Shrenuj Bansal41665402016-12-16 15:25:54 -08001480}
1481
1482static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1483{
1484 int qwords = 0;
1485 unsigned int i, j, k;
1486 unsigned int count;
1487
1488 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1489 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1490
1491 cluster->offset0 = *offset;
1492 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1493
1494 if (j == 1)
1495 cluster->offset1 = *offset;
1496
1497 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1498 ptr[qwords++] =
1499 ((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
1500 (1 << 21) | 1;
1501
1502 for (k = 0; k < cluster->num_sets; k++) {
1503 count = REG_PAIR_COUNT(cluster->regs, k);
1504 ptr[qwords++] =
1505 a6xx_crashdump_registers.gpuaddr + *offset;
1506 ptr[qwords++] =
1507 (((uint64_t)cluster->regs[2 * k]) << 44) |
1508 count;
1509
1510 *offset += count * sizeof(unsigned int);
1511 }
1512 }
1513 }
1514
1515 return qwords;
1516}
1517
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301518static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1519 uint64_t *ptr, uint64_t *offset)
1520{
1521 int qwords = 0;
1522 unsigned int j;
1523
1524 /* Capture each bank in the block */
1525 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1526 /* Program the aperture */
1527 ptr[qwords++] =
1528 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1529 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1530 (1 << 21) | 1;
1531
1532 /* Read all the data in one chunk */
1533 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1534 ptr[qwords++] =
1535 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1536 block->sz;
1537
1538 /* Remember the offset of the first bank for easy access */
1539 if (j == 0)
1540 block->offset = *offset;
1541
1542 *offset += block->sz * sizeof(unsigned int);
1543 }
1544
1545 return qwords;
1546}
1547
Lynus Vaz1e258612017-04-27 21:35:22 +05301548static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1549{
1550 int qwords = 0;
1551 unsigned int i, j, k;
1552 unsigned int count;
1553
1554 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1555 struct a6xx_cluster_dbgahb_registers *cluster =
1556 &a6xx_dbgahb_ctx_clusters[i];
1557
1558 cluster->offset0 = *offset;
1559
1560 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1561 if (j == 1)
1562 cluster->offset1 = *offset;
1563
1564 /* Program the aperture */
1565 ptr[qwords++] =
1566 ((cluster->statetype + j * 2) & 0xff) << 8;
1567 ptr[qwords++] =
1568 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1569 (1 << 21) | 1;
1570
1571 for (k = 0; k < cluster->num_sets; k++) {
1572 unsigned int start = cluster->regs[2 * k];
1573
1574 count = REG_PAIR_COUNT(cluster->regs, k);
1575 ptr[qwords++] =
1576 a6xx_crashdump_registers.gpuaddr + *offset;
1577 ptr[qwords++] =
1578 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1579 start - cluster->regbase / 4) << 44)) |
1580 count;
1581
1582 *offset += count * sizeof(unsigned int);
1583 }
1584 }
1585 }
1586 return qwords;
1587}
1588
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001589static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1590{
1591 int qwords = 0;
1592 unsigned int i, k;
1593 unsigned int count;
1594
1595 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1596 struct a6xx_non_ctx_dbgahb_registers *regs =
1597 &a6xx_non_ctx_dbgahb[i];
1598
1599 regs->offset = *offset;
1600
1601 /* Program the aperture */
1602 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1603 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1604 (1 << 21) | 1;
1605
1606 for (k = 0; k < regs->num_sets; k++) {
1607 unsigned int start = regs->regs[2 * k];
1608
1609 count = REG_PAIR_COUNT(regs->regs, k);
1610 ptr[qwords++] =
1611 a6xx_crashdump_registers.gpuaddr + *offset;
1612 ptr[qwords++] =
1613 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1614 start - regs->regbase / 4) << 44)) |
1615 count;
1616
1617 *offset += count * sizeof(unsigned int);
1618 }
1619 }
1620 return qwords;
1621}
1622
Shrenuj Bansal41665402016-12-16 15:25:54 -08001623void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1624{
1625 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1626 unsigned int script_size = 0;
1627 unsigned int data_size = 0;
1628 unsigned int i, j, k;
1629 uint64_t *ptr;
1630 uint64_t offset = 0;
1631
1632 if (a6xx_capturescript.gpuaddr != 0 &&
1633 a6xx_crashdump_registers.gpuaddr != 0)
1634 return;
1635
1636 /*
1637 * We need to allocate two buffers:
1638 * 1 - the buffer to hold the draw script
1639 * 2 - the buffer to hold the data
1640 */
1641
1642 /*
1643 * To save the registers, we need 16 bytes per register pair for the
1644 * script and a dword for each register in the data
1645 */
1646 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1647 struct cdregs *regs = &_a6xx_cd_registers[i];
1648
1649 /* Each pair needs 16 bytes (2 qwords) */
1650 script_size += (regs->size / 2) * 16;
1651
1652 /* Each register needs a dword in the data */
1653 for (j = 0; j < regs->size / 2; j++)
1654 data_size += REG_PAIR_COUNT(regs->regs, j) *
1655 sizeof(unsigned int);
1656
1657 }
1658
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301659 /*
1660 * To save the shader blocks for each block in each type we need 32
1661 * bytes for the script (16 bytes to program the aperture and 16 to
1662 * read the data) and then a block specific number of bytes to hold
1663 * the data
1664 */
1665 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1666 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1667 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1668 A6XX_NUM_SHADER_BANKS;
1669 }
1670
Shrenuj Bansal41665402016-12-16 15:25:54 -08001671 /* Calculate the script and data size for MVC registers */
1672 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1673 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1674
1675 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1676
1677 /* 16 bytes for programming the aperture */
1678 script_size += 16;
1679
1680 /* Reading each pair of registers takes 16 bytes */
1681 script_size += 16 * cluster->num_sets;
1682
1683 /* A dword per register read from the cluster list */
1684 for (k = 0; k < cluster->num_sets; k++)
1685 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1686 sizeof(unsigned int);
1687 }
1688 }
1689
Lynus Vaz1e258612017-04-27 21:35:22 +05301690 /* Calculate the script and data size for debug AHB registers */
1691 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1692 struct a6xx_cluster_dbgahb_registers *cluster =
1693 &a6xx_dbgahb_ctx_clusters[i];
1694
1695 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1696
1697 /* 16 bytes for programming the aperture */
1698 script_size += 16;
1699
1700 /* Reading each pair of registers takes 16 bytes */
1701 script_size += 16 * cluster->num_sets;
1702
1703 /* A dword per register read from the cluster list */
1704 for (k = 0; k < cluster->num_sets; k++)
1705 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1706 sizeof(unsigned int);
1707 }
1708 }
1709
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001710 /*
1711 * Calculate the script and data size for non context debug
1712 * AHB registers
1713 */
1714 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1715 struct a6xx_non_ctx_dbgahb_registers *regs =
1716 &a6xx_non_ctx_dbgahb[i];
1717
1718 /* 16 bytes for programming the aperture */
1719 script_size += 16;
1720
1721 /* Reading each pair of registers takes 16 bytes */
1722 script_size += 16 * regs->num_sets;
1723
1724 /* A dword per register read from the cluster list */
1725 for (k = 0; k < regs->num_sets; k++)
1726 data_size += REG_PAIR_COUNT(regs->regs, k) *
1727 sizeof(unsigned int);
1728 }
1729
Shrenuj Bansal41665402016-12-16 15:25:54 -08001730 /* Now allocate the script and data buffers */
1731
1732 /* The script buffers needs 2 extra qwords on the end */
1733 if (kgsl_allocate_global(device, &a6xx_capturescript,
1734 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1735 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1736 return;
1737
1738 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1739 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1740 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1741 return;
1742 }
1743
1744 /* Build the crash script */
1745
1746 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1747
1748 /* For the registers, program a read command for each pair */
1749 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1750 struct cdregs *regs = &_a6xx_cd_registers[i];
1751
1752 for (j = 0; j < regs->size / 2; j++) {
1753 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1754 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1755 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1756 offset += r * sizeof(unsigned int);
1757 }
1758 }
1759
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301760 /* Program each shader block */
1761 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1762 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1763 &offset);
1764 }
1765
Shrenuj Bansal41665402016-12-16 15:25:54 -08001766 /* Program the capturescript for the MVC regsiters */
1767 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1768
Lynus Vaz1e258612017-04-27 21:35:22 +05301769 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1770
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001771 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1772
Shrenuj Bansal41665402016-12-16 15:25:54 -08001773 *ptr++ = 0;
1774 *ptr++ = 0;
1775}