blob: 1f978885cd4a331ee8cf24f4b54b4ea5562ef6a2 [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
24
25static const unsigned int a6xx_gras_cluster[] = {
26 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
27 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
28 0x8400, 0x840B,
29};
30
31static const unsigned int a6xx_ps_cluster[] = {
32 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
33 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
34 0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
35 0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
36 0x9218, 0x9236, 0x9300, 0x9306,
37};
38
39static const unsigned int a6xx_fe_cluster[] = {
40 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
41 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
42};
43
44static const unsigned int a6xx_pc_vs_cluster[] = {
45 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
46};
47
48static struct a6xx_cluster_registers {
49 unsigned int id;
50 const unsigned int *regs;
51 unsigned int num_sets;
52 unsigned int offset0;
53 unsigned int offset1;
54} a6xx_clusters[] = {
55 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
56 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
57 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
58 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
59 ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
60};
61
62struct a6xx_cluster_regs_info {
63 struct a6xx_cluster_registers *cluster;
64 unsigned int ctxt_id;
65};
66
Lynus Vaz461e2382017-01-16 19:35:41 +053067static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
68 0xB800, 0xB803, 0xB820, 0xB822,
69};
70
71static const unsigned int a6xx_sp_vs_sp_cluster[] = {
72 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
73 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
74};
75
76static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
77 0xBB10, 0xBB11, 0xBB20, 0xBB29,
78};
79
80static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
81 0xBD80, 0xBD80,
82};
83
84static const unsigned int a6xx_sp_duplicate_cluster[] = {
85 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
86};
87
88static const unsigned int a6xx_tp_duplicate_cluster[] = {
89 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
90};
91
92static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
93 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
94 0xB9C0, 0xB9C9,
95};
96
97static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
98 0xBD80, 0xBD80,
99};
100
101static const unsigned int a6xx_sp_ps_sp_cluster[] = {
102 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
103 0xAA00, 0xAA00, 0xAA30, 0xAA31,
104};
105
106static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
107 0xACC0, 0xACC0,
108};
109
110static const unsigned int a6xx_sp_ps_tp_cluster[] = {
111 0xB180, 0xB183, 0xB190, 0xB191,
112};
113
114static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
115 0xB4C0, 0xB4D1,
116};
117
118static struct a6xx_cluster_dbgahb_registers {
119 unsigned int id;
120 unsigned int regbase;
121 unsigned int statetype;
122 const unsigned int *regs;
123 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530124 unsigned int offset0;
125 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530126} a6xx_dbgahb_ctx_clusters[] = {
127 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
128 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
129 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
130 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700131 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530132 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
133 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
134 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700135 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530136 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700137 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530138 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700139 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530140 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700141 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530142 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700143 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530144 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700145 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530146 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700147 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530148 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700149 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530150 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700151 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530152 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700153 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530154 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700155 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530156 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
157};
158
159struct a6xx_cluster_dbgahb_regs_info {
160 struct a6xx_cluster_dbgahb_registers *cluster;
161 unsigned int ctxt_id;
162};
163
164static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
165 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
166 0xBE20, 0xBE23,
167};
168
169static const unsigned int a6xx_sp_non_ctx_registers[] = {
170 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
171 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
172};
173
174static const unsigned int a6xx_tp_non_ctx_registers[] = {
175 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
176};
177
178static struct a6xx_non_ctx_dbgahb_registers {
179 unsigned int regbase;
180 unsigned int statetype;
181 const unsigned int *regs;
182 unsigned int num_sets;
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600183 unsigned int offset;
Lynus Vaz461e2382017-01-16 19:35:41 +0530184} a6xx_non_ctx_dbgahb[] = {
185 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
186 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
187 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
188 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
189 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
190 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
191};
192
Shrenuj Bansal41665402016-12-16 15:25:54 -0800193static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
194 /* VBIF */
195 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
196 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
197 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
198 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
199 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
200 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
201 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
202 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
203 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
204 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
205 0x3410, 0x3410, 0x3800, 0x3801,
206};
207
George Shen1d447b02017-07-12 13:40:28 -0700208static const unsigned int a6xx_gmu_gx_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700209 /* GMU GX */
210 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
211 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
212 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
213 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
214 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
George Shen1d447b02017-07-12 13:40:28 -0700215};
216
217static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700218 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700219 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700220 /* GMU CX */
221 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
222 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
223 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
224 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
225 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
226 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
227 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
228 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
229 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA03,
230 /* GPU RSCC */
231 0x23740, 0x23742, 0x23744, 0x23747, 0x2374C, 0x23787, 0x237EC, 0x237EF,
232 0x237F4, 0x2382F, 0x23894, 0x23897, 0x2389C, 0x238D7, 0x2393C, 0x2393F,
233 0x23944, 0x2397F,
234 /* GMU AO */
235 0x23B00, 0x23B16, 0x23C00, 0x23C00,
236 /* GPU CC */
237 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
238 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
239 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
240 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
241 0x26000, 0x26002,
242 /* GPU CC ACD */
243 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700244};
245
Shrenuj Bansal41665402016-12-16 15:25:54 -0800246static const struct adreno_vbif_snapshot_registers
247a6xx_vbif_snapshot_registers[] = {
248 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
249 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
250};
251
252/*
253 * Set of registers to dump for A6XX on snapshot.
254 * Registers in pairs - first value is the start offset, second
255 * is the stop offset (inclusive)
256 */
257
258static const unsigned int a6xx_registers[] = {
259 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530260 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
261 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
262 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530263 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
264 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
265 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800266 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530267 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
268 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
269 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
270 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
271 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
272 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
273 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800274 /* VSC */
275 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
276 /* UCHE */
277 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
278 0x0E38, 0x0E39,
279 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530280 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
281 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800282 /* RB */
283 0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
284 0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
285 0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
286 /* VPC */
287 0x9600, 0x9604, 0x9624, 0x9637,
288 /* PC */
289 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
290 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
291 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
292 /* VFD */
293 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530294 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800295};
296
Lynus Vaz030473e2017-06-22 17:33:06 +0530297/*
298 * Set of registers to dump for A6XX before actually triggering crash dumper.
299 * Registers in pairs - first value is the start offset, second
300 * is the stop offset (inclusive)
301 */
302static const unsigned int a6xx_pre_crashdumper_registers[] = {
303 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
304 0x210, 0x213,
305 /* CP: CP_STATUS_1 */
306 0x825, 0x825,
307};
308
Lynus Vaz20c81272017-02-10 16:22:12 +0530309enum a6xx_debugbus_id {
310 A6XX_DBGBUS_CP = 0x1,
311 A6XX_DBGBUS_RBBM = 0x2,
312 A6XX_DBGBUS_VBIF = 0x3,
313 A6XX_DBGBUS_HLSQ = 0x4,
314 A6XX_DBGBUS_UCHE = 0x5,
315 A6XX_DBGBUS_DPM = 0x6,
316 A6XX_DBGBUS_TESS = 0x7,
317 A6XX_DBGBUS_PC = 0x8,
318 A6XX_DBGBUS_VFDP = 0x9,
319 A6XX_DBGBUS_VPC = 0xa,
320 A6XX_DBGBUS_TSE = 0xb,
321 A6XX_DBGBUS_RAS = 0xc,
322 A6XX_DBGBUS_VSC = 0xd,
323 A6XX_DBGBUS_COM = 0xe,
324 A6XX_DBGBUS_LRZ = 0x10,
325 A6XX_DBGBUS_A2D = 0x11,
326 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530327 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530328 A6XX_DBGBUS_RBP = 0x14,
329 A6XX_DBGBUS_DCS = 0x15,
330 A6XX_DBGBUS_RBBM_CFG = 0x16,
331 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530332 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530333 A6XX_DBGBUS_TPFCHE = 0x19,
334 A6XX_DBGBUS_GPC = 0x1d,
335 A6XX_DBGBUS_LARC = 0x1e,
336 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
337 A6XX_DBGBUS_RB_0 = 0x20,
338 A6XX_DBGBUS_RB_1 = 0x21,
339 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
340 A6XX_DBGBUS_CCU_0 = 0x28,
341 A6XX_DBGBUS_CCU_1 = 0x29,
342 A6XX_DBGBUS_VFD_0 = 0x38,
343 A6XX_DBGBUS_VFD_1 = 0x39,
344 A6XX_DBGBUS_VFD_2 = 0x3a,
345 A6XX_DBGBUS_VFD_3 = 0x3b,
346 A6XX_DBGBUS_SP_0 = 0x40,
347 A6XX_DBGBUS_SP_1 = 0x41,
348 A6XX_DBGBUS_TPL1_0 = 0x48,
349 A6XX_DBGBUS_TPL1_1 = 0x49,
350 A6XX_DBGBUS_TPL1_2 = 0x4a,
351 A6XX_DBGBUS_TPL1_3 = 0x4b,
352};
353
354static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
355 { A6XX_DBGBUS_CP, 0x100, },
356 { A6XX_DBGBUS_RBBM, 0x100, },
357 { A6XX_DBGBUS_HLSQ, 0x100, },
358 { A6XX_DBGBUS_UCHE, 0x100, },
359 { A6XX_DBGBUS_DPM, 0x100, },
360 { A6XX_DBGBUS_TESS, 0x100, },
361 { A6XX_DBGBUS_PC, 0x100, },
362 { A6XX_DBGBUS_VFDP, 0x100, },
363 { A6XX_DBGBUS_VPC, 0x100, },
364 { A6XX_DBGBUS_TSE, 0x100, },
365 { A6XX_DBGBUS_RAS, 0x100, },
366 { A6XX_DBGBUS_VSC, 0x100, },
367 { A6XX_DBGBUS_COM, 0x100, },
368 { A6XX_DBGBUS_LRZ, 0x100, },
369 { A6XX_DBGBUS_A2D, 0x100, },
370 { A6XX_DBGBUS_CCUFCHE, 0x100, },
371 { A6XX_DBGBUS_RBP, 0x100, },
372 { A6XX_DBGBUS_DCS, 0x100, },
373 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530374 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530375 { A6XX_DBGBUS_TPFCHE, 0x100, },
376 { A6XX_DBGBUS_GPC, 0x100, },
377 { A6XX_DBGBUS_LARC, 0x100, },
378 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
379 { A6XX_DBGBUS_RB_0, 0x100, },
380 { A6XX_DBGBUS_RB_1, 0x100, },
381 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
382 { A6XX_DBGBUS_CCU_0, 0x100, },
383 { A6XX_DBGBUS_CCU_1, 0x100, },
384 { A6XX_DBGBUS_VFD_0, 0x100, },
385 { A6XX_DBGBUS_VFD_1, 0x100, },
386 { A6XX_DBGBUS_VFD_2, 0x100, },
387 { A6XX_DBGBUS_VFD_3, 0x100, },
388 { A6XX_DBGBUS_SP_0, 0x100, },
389 { A6XX_DBGBUS_SP_1, 0x100, },
390 { A6XX_DBGBUS_TPL1_0, 0x100, },
391 { A6XX_DBGBUS_TPL1_1, 0x100, },
392 { A6XX_DBGBUS_TPL1_2, 0x100, },
393 { A6XX_DBGBUS_TPL1_3, 0x100, },
394};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800395
Lynus Vazff24c972017-03-07 19:27:46 +0530396static void __iomem *a6xx_cx_dbgc;
397static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
398 { A6XX_DBGBUS_VBIF, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530399 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530400 { A6XX_DBGBUS_CX, 0x100, },
401};
402
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530403#define A6XX_NUM_SHADER_BANKS 3
404#define A6XX_SHADER_STATETYPE_SHIFT 8
405
406enum a6xx_shader_obj {
407 A6XX_TP0_TMO_DATA = 0x9,
408 A6XX_TP0_SMO_DATA = 0xa,
409 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
410 A6XX_TP1_TMO_DATA = 0x19,
411 A6XX_TP1_SMO_DATA = 0x1a,
412 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
413 A6XX_SP_INST_DATA = 0x29,
414 A6XX_SP_LB_0_DATA = 0x2a,
415 A6XX_SP_LB_1_DATA = 0x2b,
416 A6XX_SP_LB_2_DATA = 0x2c,
417 A6XX_SP_LB_3_DATA = 0x2d,
418 A6XX_SP_LB_4_DATA = 0x2e,
419 A6XX_SP_LB_5_DATA = 0x2f,
420 A6XX_SP_CB_BINDLESS_DATA = 0x30,
421 A6XX_SP_CB_LEGACY_DATA = 0x31,
422 A6XX_SP_UAV_DATA = 0x32,
423 A6XX_SP_INST_TAG = 0x33,
424 A6XX_SP_CB_BINDLESS_TAG = 0x34,
425 A6XX_SP_TMO_UMO_TAG = 0x35,
426 A6XX_SP_SMO_TAG = 0x36,
427 A6XX_SP_STATE_DATA = 0x37,
428 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
429 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
430 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
431 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
432 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
433 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
434 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
435 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
436 A6XX_HLSQ_INST_RAM = 0x52,
437 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
438 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
439 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
440 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
441 A6XX_HLSQ_INST_RAM_TAG = 0x57,
442 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
443 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
444 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
445 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
446 A6XX_HLSQ_DATAPATH_META = 0x60,
447 A6XX_HLSQ_FRONTEND_META = 0x61,
448 A6XX_HLSQ_INDIRECT_META = 0x62,
449 A6XX_HLSQ_BACKEND_META = 0x63
450};
451
452struct a6xx_shader_block {
453 unsigned int statetype;
454 unsigned int sz;
455 uint64_t offset;
456};
457
458struct a6xx_shader_block_info {
459 struct a6xx_shader_block *block;
460 unsigned int bank;
461 uint64_t offset;
462};
463
464static struct a6xx_shader_block a6xx_shader_blocks[] = {
465 {A6XX_TP0_TMO_DATA, 0x200},
466 {A6XX_TP0_SMO_DATA, 0x80,},
467 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
468 {A6XX_TP1_TMO_DATA, 0x200},
469 {A6XX_TP1_SMO_DATA, 0x80,},
470 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
471 {A6XX_SP_INST_DATA, 0x800},
472 {A6XX_SP_LB_0_DATA, 0x800},
473 {A6XX_SP_LB_1_DATA, 0x800},
474 {A6XX_SP_LB_2_DATA, 0x800},
475 {A6XX_SP_LB_3_DATA, 0x800},
476 {A6XX_SP_LB_4_DATA, 0x800},
477 {A6XX_SP_LB_5_DATA, 0x200},
478 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
479 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
480 {A6XX_SP_UAV_DATA, 0x80,},
481 {A6XX_SP_INST_TAG, 0x80,},
482 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
483 {A6XX_SP_TMO_UMO_TAG, 0x80,},
484 {A6XX_SP_SMO_TAG, 0x80},
485 {A6XX_SP_STATE_DATA, 0x3F},
486 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
487 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
488 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
489 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
490 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
491 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
492 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
493 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
494 {A6XX_HLSQ_INST_RAM, 0x800},
495 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
496 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
497 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
498 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
499 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
500 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
501 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
502 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
503 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
504 {A6XX_HLSQ_DATAPATH_META, 0x40,},
505 {A6XX_HLSQ_FRONTEND_META, 0x40},
506 {A6XX_HLSQ_INDIRECT_META, 0x40,}
507};
508
Shrenuj Bansal41665402016-12-16 15:25:54 -0800509static struct kgsl_memdesc a6xx_capturescript;
510static struct kgsl_memdesc a6xx_crashdump_registers;
511static bool crash_dump_valid;
512
513static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
514 u8 *buf, size_t remain)
515{
516 struct kgsl_snapshot_registers regs = {
517 .regs = a6xx_registers,
518 .count = ARRAY_SIZE(a6xx_registers) / 2,
519 };
520
521 return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
522}
523
524static struct cdregs {
525 const unsigned int *regs;
526 unsigned int size;
527} _a6xx_cd_registers[] = {
528 { a6xx_registers, ARRAY_SIZE(a6xx_registers) },
529};
530
531#define REG_PAIR_COUNT(_a, _i) \
532 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
533
534static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
535 size_t remain, void *priv)
536{
537 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
538 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
539 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
540 unsigned int i, j, k;
541 unsigned int count = 0;
542
543 if (crash_dump_valid == false)
544 return a6xx_legacy_snapshot_registers(device, buf, remain);
545
546 if (remain < sizeof(*header)) {
547 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
548 return 0;
549 }
550
551 remain -= sizeof(*header);
552
553 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
554 struct cdregs *regs = &_a6xx_cd_registers[i];
555
556 for (j = 0; j < regs->size / 2; j++) {
557 unsigned int start = regs->regs[2 * j];
558 unsigned int end = regs->regs[(2 * j) + 1];
559
560 if (remain < ((end - start) + 1) * 8) {
561 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
562 goto out;
563 }
564
565 remain -= ((end - start) + 1) * 8;
566
567 for (k = start; k <= end; k++, count++) {
568 *data++ = k;
569 *data++ = *src++;
570 }
571 }
572 }
573
574out:
575 header->count = count;
576
577 /* Return the size of the section */
578 return (count * 8) + sizeof(*header);
579}
580
Lynus Vaz030473e2017-06-22 17:33:06 +0530581static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
582 u8 *buf, size_t remain, void *priv)
583{
584 struct kgsl_snapshot_registers pre_cdregs = {
585 .regs = a6xx_pre_crashdumper_registers,
586 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
587 };
588
589 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
590}
591
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530592static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
593 u8 *buf, size_t remain, void *priv)
594{
595 struct kgsl_snapshot_shader *header =
596 (struct kgsl_snapshot_shader *) buf;
597 struct a6xx_shader_block_info *info =
598 (struct a6xx_shader_block_info *) priv;
599 struct a6xx_shader_block *block = info->block;
600 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
601
602 if (remain < SHADER_SECTION_SZ(block->sz)) {
603 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
604 return 0;
605 }
606
607 header->type = block->statetype;
608 header->index = info->bank;
609 header->size = block->sz;
610
611 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
612 block->sz);
613
614 return SHADER_SECTION_SZ(block->sz);
615}
616
617static void a6xx_snapshot_shader(struct kgsl_device *device,
618 struct kgsl_snapshot *snapshot)
619{
620 unsigned int i, j;
621 struct a6xx_shader_block_info info;
622
623 /* Shader blocks can only be read by the crash dumper */
624 if (crash_dump_valid == false)
625 return;
626
627 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
628 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
629 info.block = &a6xx_shader_blocks[i];
630 info.bank = j;
631 info.offset = a6xx_shader_blocks[i].offset +
632 (j * a6xx_shader_blocks[i].sz);
633
634 /* Shader working/shadow memory */
635 kgsl_snapshot_add_section(device,
636 KGSL_SNAPSHOT_SECTION_SHADER,
637 snapshot, a6xx_snapshot_shader_memory, &info);
638 }
639 }
640}
641
Lynus Vaza5922742017-03-14 18:50:54 +0530642static void a6xx_snapshot_mempool(struct kgsl_device *device,
643 struct kgsl_snapshot *snapshot)
644{
645 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530646 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530647
Lynus Vazb8e43d52017-04-20 14:47:37 +0530648 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530649 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
650 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
651
652 kgsl_snapshot_indexed_registers(device, snapshot,
653 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
654 0, 0x2060);
655
Lynus Vazb8e43d52017-04-20 14:47:37 +0530656 /*
657 * Data at offset 0x2000 in the mempool section is the mempool size.
658 * Since we set it to 0, patch in the original size so that the data
659 * is consistent.
660 */
661 if (buf < snapshot->ptr) {
662 unsigned int *data;
663
664 /* Skip over the headers */
665 buf += sizeof(struct kgsl_snapshot_section_header) +
666 sizeof(struct kgsl_snapshot_indexed_regs);
667
668 data = (unsigned int *)buf + 0x2000;
669 *data = pool_size;
670 }
671
Lynus Vaza5922742017-03-14 18:50:54 +0530672 /* Restore the saved mempool size */
673 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
674}
675
Lynus Vaz461e2382017-01-16 19:35:41 +0530676static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
677 unsigned int regbase, unsigned int reg)
678{
679 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
680 reg - regbase / 4;
681 unsigned int val;
682
683 kgsl_regread(device, read_reg, &val);
684 return val;
685}
686
Lynus Vaz1e258612017-04-27 21:35:22 +0530687static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
688 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530689{
690 struct kgsl_snapshot_mvc_regs *header =
691 (struct kgsl_snapshot_mvc_regs *)buf;
692 struct a6xx_cluster_dbgahb_regs_info *info =
693 (struct a6xx_cluster_dbgahb_regs_info *)priv;
694 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
695 unsigned int read_sel;
696 unsigned int data_size = 0;
697 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
698 int i, j;
699
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600700 if (!device->snapshot_legacy)
701 return 0;
702
Lynus Vaz461e2382017-01-16 19:35:41 +0530703 if (remain < sizeof(*header)) {
704 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
705 return 0;
706 }
707
708 remain -= sizeof(*header);
709
710 header->ctxt_id = info->ctxt_id;
711 header->cluster_id = cur_cluster->id;
712
713 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
714 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
715
716 for (i = 0; i < cur_cluster->num_sets; i++) {
717 unsigned int start = cur_cluster->regs[2 * i];
718 unsigned int end = cur_cluster->regs[2 * i + 1];
719
720 if (remain < (end - start + 3) * 4) {
721 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
722 goto out;
723 }
724
725 remain -= (end - start + 3) * 4;
726 data_size += (end - start + 3) * 4;
727
728 *data++ = start | (1 << 31);
729 *data++ = end;
730
731 for (j = start; j <= end; j++) {
732 unsigned int val;
733
734 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
735 *data++ = val;
736
737 }
738 }
739
740out:
741 return data_size + sizeof(*header);
742}
743
Lynus Vaz1e258612017-04-27 21:35:22 +0530744static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
745 size_t remain, void *priv)
746{
747 struct kgsl_snapshot_mvc_regs *header =
748 (struct kgsl_snapshot_mvc_regs *)buf;
749 struct a6xx_cluster_dbgahb_regs_info *info =
750 (struct a6xx_cluster_dbgahb_regs_info *)priv;
751 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
752 unsigned int data_size = 0;
753 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
754 int i, j;
755 unsigned int *src;
756
757
758 if (crash_dump_valid == false)
759 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
760 info);
761
762 if (remain < sizeof(*header)) {
763 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
764 return 0;
765 }
766
767 remain -= sizeof(*header);
768
769 header->ctxt_id = info->ctxt_id;
770 header->cluster_id = cluster->id;
771
772 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
773 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
774
775 for (i = 0; i < cluster->num_sets; i++) {
776 unsigned int start;
777 unsigned int end;
778
779 start = cluster->regs[2 * i];
780 end = cluster->regs[2 * i + 1];
781
782 if (remain < (end - start + 3) * 4) {
783 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
784 goto out;
785 }
786
787 remain -= (end - start + 3) * 4;
788 data_size += (end - start + 3) * 4;
789
790 *data++ = start | (1 << 31);
791 *data++ = end;
792 for (j = start; j <= end; j++)
793 *data++ = *src++;
794 }
795out:
796 return data_size + sizeof(*header);
797}
798
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600799static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
800 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530801{
802 struct kgsl_snapshot_regs *header =
803 (struct kgsl_snapshot_regs *)buf;
804 struct a6xx_non_ctx_dbgahb_registers *regs =
805 (struct a6xx_non_ctx_dbgahb_registers *)priv;
806 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
807 int count = 0;
808 unsigned int read_sel;
809 int i, j;
810
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600811 if (!device->snapshot_legacy)
812 return 0;
813
Lynus Vaz461e2382017-01-16 19:35:41 +0530814 /* Figure out how many registers we are going to dump */
815 for (i = 0; i < regs->num_sets; i++) {
816 int start = regs->regs[i * 2];
817 int end = regs->regs[i * 2 + 1];
818
819 count += (end - start + 1);
820 }
821
822 if (remain < (count * 8) + sizeof(*header)) {
823 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
824 return 0;
825 }
826
827 header->count = count;
828
829 read_sel = (regs->statetype & 0xff) << 8;
830 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
831
832 for (i = 0; i < regs->num_sets; i++) {
833 unsigned int start = regs->regs[2 * i];
834 unsigned int end = regs->regs[2 * i + 1];
835
836 for (j = start; j <= end; j++) {
837 unsigned int val;
838
839 val = a6xx_read_dbgahb(device, regs->regbase, j);
840 *data++ = j;
841 *data++ = val;
842
843 }
844 }
845 return (count * 8) + sizeof(*header);
846}
847
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600848static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
849 size_t remain, void *priv)
850{
851 struct kgsl_snapshot_regs *header =
852 (struct kgsl_snapshot_regs *)buf;
853 struct a6xx_non_ctx_dbgahb_registers *regs =
854 (struct a6xx_non_ctx_dbgahb_registers *)priv;
855 unsigned int count = 0;
856 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
857 unsigned int i, k;
858 unsigned int *src;
859
860 if (crash_dump_valid == false)
861 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
862 regs);
863
864 if (remain < sizeof(*header)) {
865 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
866 return 0;
867 }
868
869 remain -= sizeof(*header);
870
871 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
872
873 for (i = 0; i < regs->num_sets; i++) {
874 unsigned int start;
875 unsigned int end;
876
877 start = regs->regs[2 * i];
878 end = regs->regs[(2 * i) + 1];
879
880 if (remain < (end - start + 1) * 8) {
881 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
882 goto out;
883 }
884
885 remain -= ((end - start) + 1) * 8;
886
887 for (k = start; k <= end; k++, count++) {
888 *data++ = k;
889 *data++ = *src++;
890 }
891 }
892out:
893 header->count = count;
894
895 /* Return the size of the section */
896 return (count * 8) + sizeof(*header);
897}
898
Lynus Vaz461e2382017-01-16 19:35:41 +0530899static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
900 struct kgsl_snapshot *snapshot)
901{
902 int i, j;
903
904 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
905 struct a6xx_cluster_dbgahb_registers *cluster =
906 &a6xx_dbgahb_ctx_clusters[i];
907 struct a6xx_cluster_dbgahb_regs_info info;
908
909 info.cluster = cluster;
910 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
911 info.ctxt_id = j;
912
913 kgsl_snapshot_add_section(device,
914 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
915 a6xx_snapshot_cluster_dbgahb, &info);
916 }
917 }
918
919 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
920 kgsl_snapshot_add_section(device,
921 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
922 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
923 }
924}
925
Shrenuj Bansal41665402016-12-16 15:25:54 -0800926static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
927 size_t remain, void *priv)
928{
929 struct kgsl_snapshot_mvc_regs *header =
930 (struct kgsl_snapshot_mvc_regs *)buf;
931 struct a6xx_cluster_regs_info *info =
932 (struct a6xx_cluster_regs_info *)priv;
933 struct a6xx_cluster_registers *cur_cluster = info->cluster;
934 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
935 unsigned int ctxt = info->ctxt_id;
936 unsigned int start, end, i, j, aperture_cntl = 0;
937 unsigned int data_size = 0;
938
939 if (remain < sizeof(*header)) {
940 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
941 return 0;
942 }
943
944 remain -= sizeof(*header);
945
946 header->ctxt_id = info->ctxt_id;
947 header->cluster_id = cur_cluster->id;
948
949 /*
950 * Set the AHB control for the Host to read from the
951 * cluster/context for this iteration.
952 */
953 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
954 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
955
956 for (i = 0; i < cur_cluster->num_sets; i++) {
957 start = cur_cluster->regs[2 * i];
958 end = cur_cluster->regs[2 * i + 1];
959
960 if (remain < (end - start + 3) * 4) {
961 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
962 goto out;
963 }
964
965 remain -= (end - start + 3) * 4;
966 data_size += (end - start + 3) * 4;
967
968 *data++ = start | (1 << 31);
969 *data++ = end;
970 for (j = start; j <= end; j++) {
971 unsigned int val;
972
973 kgsl_regread(device, j, &val);
974 *data++ = val;
975 }
976 }
977out:
978 return data_size + sizeof(*header);
979}
980
981static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
982 size_t remain, void *priv)
983{
984 struct kgsl_snapshot_mvc_regs *header =
985 (struct kgsl_snapshot_mvc_regs *)buf;
986 struct a6xx_cluster_regs_info *info =
987 (struct a6xx_cluster_regs_info *)priv;
988 struct a6xx_cluster_registers *cluster = info->cluster;
989 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
990 unsigned int *src;
991 int i, j;
992 unsigned int start, end;
993 size_t data_size = 0;
994
995 if (crash_dump_valid == false)
996 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
997
998 if (remain < sizeof(*header)) {
999 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1000 return 0;
1001 }
1002
1003 remain -= sizeof(*header);
1004
1005 header->ctxt_id = info->ctxt_id;
1006 header->cluster_id = cluster->id;
1007
1008 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
1009 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
1010
1011 for (i = 0; i < cluster->num_sets; i++) {
1012 start = cluster->regs[2 * i];
1013 end = cluster->regs[2 * i + 1];
1014
1015 if (remain < (end - start + 3) * 4) {
1016 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1017 goto out;
1018 }
1019
1020 remain -= (end - start + 3) * 4;
1021 data_size += (end - start + 3) * 4;
1022
1023 *data++ = start | (1 << 31);
1024 *data++ = end;
1025 for (j = start; j <= end; j++)
1026 *data++ = *src++;
1027 }
1028
1029out:
1030 return data_size + sizeof(*header);
1031
1032}
1033
1034static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
1035 struct kgsl_snapshot *snapshot)
1036{
1037 int i, j;
1038 struct a6xx_cluster_regs_info info;
1039
1040 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1041 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1042
1043 info.cluster = cluster;
1044 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1045 info.ctxt_id = j;
1046
1047 kgsl_snapshot_add_section(device,
1048 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
1049 a6xx_snapshot_mvc, &info);
1050 }
1051 }
1052}
1053
Lynus Vaz20c81272017-02-10 16:22:12 +05301054/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
1055static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
1056 unsigned int block_id, unsigned int index, unsigned int *val)
1057{
1058 unsigned int reg;
1059
1060 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1061 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1062
1063 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1064 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1065 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1066 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1067
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001068 /*
1069 * There needs to be a delay of 1 us to ensure enough time for correct
1070 * data is funneled into the trace buffer
1071 */
1072 udelay(1);
1073
Lynus Vaz20c81272017-02-10 16:22:12 +05301074 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1075 val++;
1076 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1077}
1078
1079/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
1080static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1081 u8 *buf, size_t remain, void *priv)
1082{
Lynus Vazecd472c2017-04-18 14:15:57 +05301083 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301084 struct kgsl_snapshot_debugbus *header =
1085 (struct kgsl_snapshot_debugbus *)buf;
1086 struct adreno_debugbus_block *block = priv;
1087 int i;
1088 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1089 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301090 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301091 size_t size;
1092
1093 dwords = block->dwords;
1094
1095 /* For a6xx each debug bus data unit is 2 DWORDS */
1096 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1097
1098 if (remain < size) {
1099 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1100 return 0;
1101 }
1102
1103 header->id = block->block_id;
1104 header->count = dwords * 2;
1105
Lynus Vazecd472c2017-04-18 14:15:57 +05301106 block_id = block->block_id;
1107 /* GMU_GX data is read using the GMU_CX block id on A630 */
1108 if (adreno_is_a630(adreno_dev) &&
1109 (block_id == A6XX_DBGBUS_GMU_GX))
1110 block_id = A6XX_DBGBUS_GMU_CX;
1111
Lynus Vaz20c81272017-02-10 16:22:12 +05301112 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301113 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301114
1115 return size;
1116}
1117
Lynus Vazff24c972017-03-07 19:27:46 +05301118static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1119{
1120 void __iomem *reg;
1121
1122 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1123 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1124 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1125 return;
1126
1127 reg = a6xx_cx_dbgc +
1128 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1129 *value = __raw_readl(reg);
1130
1131 /*
1132 * ensure this read finishes before the next one.
1133 * i.e. act like normal readl()
1134 */
1135 rmb();
1136}
1137
1138static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1139{
1140 void __iomem *reg;
1141
1142 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1143 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1144 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1145 return;
1146
1147 reg = a6xx_cx_dbgc +
1148 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1149
1150 /*
1151 * ensure previous writes post before this one,
1152 * i.e. act like normal writel()
1153 */
1154 wmb();
1155 __raw_writel(value, reg);
1156}
1157
1158/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1159static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1160 unsigned int block_id, unsigned int index, unsigned int *val)
1161{
1162 unsigned int reg;
1163
1164 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1165 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1166
1167 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1168 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1169 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1170 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1171
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001172 /*
1173 * There needs to be a delay of 1 us to ensure enough time for correct
1174 * data is funneled into the trace buffer
1175 */
1176 udelay(1);
1177
Lynus Vazff24c972017-03-07 19:27:46 +05301178 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1179 val++;
1180 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1181}
1182
1183/*
1184 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1185 * block from the CX DBGC block
1186 */
1187static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1188 u8 *buf, size_t remain, void *priv)
1189{
1190 struct kgsl_snapshot_debugbus *header =
1191 (struct kgsl_snapshot_debugbus *)buf;
1192 struct adreno_debugbus_block *block = priv;
1193 int i;
1194 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1195 unsigned int dwords;
1196 size_t size;
1197
1198 dwords = block->dwords;
1199
1200 /* For a6xx each debug bus data unit is 2 DWRODS */
1201 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1202
1203 if (remain < size) {
1204 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1205 return 0;
1206 }
1207
1208 header->id = block->block_id;
1209 header->count = dwords * 2;
1210
1211 for (i = 0; i < dwords; i++)
1212 a6xx_cx_debug_bus_read(device, block->block_id, i,
1213 &data[i*2]);
1214
1215 return size;
1216}
1217
Lynus Vaz20c81272017-02-10 16:22:12 +05301218/* a6xx_snapshot_debugbus() - Capture debug bus data */
1219static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1220 struct kgsl_snapshot *snapshot)
1221{
1222 int i;
1223
1224 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1225 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001226 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1227 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301228
1229 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1230 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1231
1232 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1233 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1234 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1235 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1236
1237 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1238 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1239 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1240 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1241 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1242 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1243 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1244 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1245 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1246 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1247 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1248 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1249 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1250 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1251 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1252 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1253 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1254 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1255
1256 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1257 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1258 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1259 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1260
Lynus Vazff24c972017-03-07 19:27:46 +05301261 a6xx_cx_dbgc = ioremap(device->reg_phys +
1262 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1263 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1264 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1265
1266 if (a6xx_cx_dbgc) {
1267 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1268 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001269 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1270 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301271
1272 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1273 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1274
1275 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1276 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1277 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1278 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1279
1280 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1281 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1282 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1283 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1284 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1285 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1286 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1287 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1288 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1289 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1290 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1291 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1292 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1293 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1294 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1295 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1296 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1297 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1298
1299 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1300 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1301 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1302 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1303 } else
1304 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1305
Lynus Vaz20c81272017-02-10 16:22:12 +05301306 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1307 kgsl_snapshot_add_section(device,
1308 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1309 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1310 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1311 }
Lynus Vazff24c972017-03-07 19:27:46 +05301312
1313 if (a6xx_cx_dbgc) {
1314 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1315 kgsl_snapshot_add_section(device,
1316 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1317 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1318 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1319 }
1320 iounmap(a6xx_cx_dbgc);
1321 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301322}
1323
Kyle Piefer60733aa2017-03-21 11:24:01 -07001324static void a6xx_snapshot_gmu(struct kgsl_device *device,
1325 struct kgsl_snapshot *snapshot)
1326{
George Shen1d447b02017-07-12 13:40:28 -07001327 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1328 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1329
Kyle Piefer60733aa2017-03-21 11:24:01 -07001330 if (!kgsl_gmu_isenabled(device))
1331 return;
1332
Lynus Vazd37f1d82017-05-24 16:39:15 +05301333 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1334 ARRAY_SIZE(a6xx_gmu_registers) / 2);
George Shen1d447b02017-07-12 13:40:28 -07001335
1336 if (gpudev->gx_is_on(adreno_dev))
1337 adreno_snapshot_registers(device, snapshot,
1338 a6xx_gmu_gx_registers,
1339 ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001340}
1341
Lynus Vaz85150052017-02-21 17:57:48 +05301342/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1343static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1344 size_t remain, void *priv)
1345{
1346 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1347 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1348 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1349 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1350
1351 if (remain < DEBUG_SECTION_SZ(1)) {
1352 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1353 return 0;
1354 }
1355
1356 /* Dump the SQE firmware version */
1357 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1358 header->size = 1;
1359 *data = fw->version;
1360
1361 return DEBUG_SECTION_SZ(1);
1362}
1363
Shrenuj Bansal41665402016-12-16 15:25:54 -08001364static void _a6xx_do_crashdump(struct kgsl_device *device)
1365{
1366 unsigned long wait_time;
1367 unsigned int reg = 0;
1368 unsigned int val;
1369
1370 crash_dump_valid = false;
1371
1372 if (a6xx_capturescript.gpuaddr == 0 ||
1373 a6xx_crashdump_registers.gpuaddr == 0)
1374 return;
1375
1376 /* IF the SMMU is stalled we cannot do a crash dump */
1377 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1378 if (val & BIT(24))
1379 return;
1380
1381 /* Turn on APRIV so we can access the buffers */
1382 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1383
1384 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1385 lower_32_bits(a6xx_capturescript.gpuaddr));
1386 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1387 upper_32_bits(a6xx_capturescript.gpuaddr));
1388 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1389
1390 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1391 while (!time_after(jiffies, wait_time)) {
1392 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1393 if (reg & 0x2)
1394 break;
1395 cpu_relax();
1396 }
1397
1398 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1399
1400 if (!(reg & 0x2)) {
1401 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1402 return;
1403 }
1404
1405 crash_dump_valid = true;
1406}
1407
1408/*
1409 * a6xx_snapshot() - A6XX GPU snapshot function
1410 * @adreno_dev: Device being snapshotted
1411 * @snapshot: Pointer to the snapshot instance
1412 *
1413 * This is where all of the A6XX specific bits and pieces are grabbed
1414 * into the snapshot memory
1415 */
1416void a6xx_snapshot(struct adreno_device *adreno_dev,
1417 struct kgsl_snapshot *snapshot)
1418{
1419 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1420 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1421 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001422 bool sptprac_on;
1423
1424 /* GMU TCM data dumped through AHB */
1425 a6xx_snapshot_gmu(device, snapshot);
1426
1427 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1428
1429 /* Return if the GX is off */
1430 if (!gpudev->gx_is_on(adreno_dev)) {
1431 pr_err("GX is off. Only dumping GMU data in snapshot\n");
1432 return;
1433 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001434
Lynus Vaz030473e2017-06-22 17:33:06 +05301435 /* Dump the registers which get affected by crash dumper trigger */
1436 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1437 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1438
1439 /* Dump vbif registers as well which get affected by crash dumper */
1440 adreno_snapshot_vbif_registers(device, snapshot,
1441 a6xx_vbif_snapshot_registers,
1442 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1443
Shrenuj Bansal41665402016-12-16 15:25:54 -08001444 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001445 if (sptprac_on)
1446 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001447
1448 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1449 snapshot, a6xx_snapshot_registers, NULL);
1450
Shrenuj Bansal41665402016-12-16 15:25:54 -08001451 /* CP_SQE indexed registers */
1452 kgsl_snapshot_indexed_registers(device, snapshot,
1453 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1454 0, snap_data->sect_sizes->cp_pfp);
1455
1456 /* CP_DRAW_STATE */
1457 kgsl_snapshot_indexed_registers(device, snapshot,
1458 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1459 0, 0x100);
1460
1461 /* SQE_UCODE Cache */
1462 kgsl_snapshot_indexed_registers(device, snapshot,
1463 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1464 0, 0x6000);
1465
1466 /* CP ROQ */
1467 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1468 snapshot, adreno_snapshot_cp_roq,
1469 &snap_data->sect_sizes->roq);
1470
Lynus Vaz85150052017-02-21 17:57:48 +05301471 /* SQE Firmware */
1472 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1473 snapshot, a6xx_snapshot_sqe, NULL);
1474
Lynus Vaza5922742017-03-14 18:50:54 +05301475 /* Mempool debug data */
1476 a6xx_snapshot_mempool(device, snapshot);
1477
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001478 if (sptprac_on) {
1479 /* Shader memory */
1480 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301481
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001482 /* MVC register section */
1483 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001484
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001485 /* registers dumped through DBG AHB */
1486 a6xx_snapshot_dbgahb_regs(device, snapshot);
1487 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301488
Lynus Vaz20c81272017-02-10 16:22:12 +05301489 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001490
Shrenuj Bansal41665402016-12-16 15:25:54 -08001491}
1492
1493static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1494{
1495 int qwords = 0;
1496 unsigned int i, j, k;
1497 unsigned int count;
1498
1499 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1500 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1501
1502 cluster->offset0 = *offset;
1503 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1504
1505 if (j == 1)
1506 cluster->offset1 = *offset;
1507
1508 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1509 ptr[qwords++] =
1510 ((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
1511 (1 << 21) | 1;
1512
1513 for (k = 0; k < cluster->num_sets; k++) {
1514 count = REG_PAIR_COUNT(cluster->regs, k);
1515 ptr[qwords++] =
1516 a6xx_crashdump_registers.gpuaddr + *offset;
1517 ptr[qwords++] =
1518 (((uint64_t)cluster->regs[2 * k]) << 44) |
1519 count;
1520
1521 *offset += count * sizeof(unsigned int);
1522 }
1523 }
1524 }
1525
1526 return qwords;
1527}
1528
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301529static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1530 uint64_t *ptr, uint64_t *offset)
1531{
1532 int qwords = 0;
1533 unsigned int j;
1534
1535 /* Capture each bank in the block */
1536 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1537 /* Program the aperture */
1538 ptr[qwords++] =
1539 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1540 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1541 (1 << 21) | 1;
1542
1543 /* Read all the data in one chunk */
1544 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1545 ptr[qwords++] =
1546 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1547 block->sz;
1548
1549 /* Remember the offset of the first bank for easy access */
1550 if (j == 0)
1551 block->offset = *offset;
1552
1553 *offset += block->sz * sizeof(unsigned int);
1554 }
1555
1556 return qwords;
1557}
1558
Lynus Vaz1e258612017-04-27 21:35:22 +05301559static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1560{
1561 int qwords = 0;
1562 unsigned int i, j, k;
1563 unsigned int count;
1564
1565 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1566 struct a6xx_cluster_dbgahb_registers *cluster =
1567 &a6xx_dbgahb_ctx_clusters[i];
1568
1569 cluster->offset0 = *offset;
1570
1571 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1572 if (j == 1)
1573 cluster->offset1 = *offset;
1574
1575 /* Program the aperture */
1576 ptr[qwords++] =
1577 ((cluster->statetype + j * 2) & 0xff) << 8;
1578 ptr[qwords++] =
1579 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1580 (1 << 21) | 1;
1581
1582 for (k = 0; k < cluster->num_sets; k++) {
1583 unsigned int start = cluster->regs[2 * k];
1584
1585 count = REG_PAIR_COUNT(cluster->regs, k);
1586 ptr[qwords++] =
1587 a6xx_crashdump_registers.gpuaddr + *offset;
1588 ptr[qwords++] =
1589 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1590 start - cluster->regbase / 4) << 44)) |
1591 count;
1592
1593 *offset += count * sizeof(unsigned int);
1594 }
1595 }
1596 }
1597 return qwords;
1598}
1599
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001600static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1601{
1602 int qwords = 0;
1603 unsigned int i, k;
1604 unsigned int count;
1605
1606 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1607 struct a6xx_non_ctx_dbgahb_registers *regs =
1608 &a6xx_non_ctx_dbgahb[i];
1609
1610 regs->offset = *offset;
1611
1612 /* Program the aperture */
1613 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1614 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1615 (1 << 21) | 1;
1616
1617 for (k = 0; k < regs->num_sets; k++) {
1618 unsigned int start = regs->regs[2 * k];
1619
1620 count = REG_PAIR_COUNT(regs->regs, k);
1621 ptr[qwords++] =
1622 a6xx_crashdump_registers.gpuaddr + *offset;
1623 ptr[qwords++] =
1624 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1625 start - regs->regbase / 4) << 44)) |
1626 count;
1627
1628 *offset += count * sizeof(unsigned int);
1629 }
1630 }
1631 return qwords;
1632}
1633
Shrenuj Bansal41665402016-12-16 15:25:54 -08001634void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1635{
1636 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1637 unsigned int script_size = 0;
1638 unsigned int data_size = 0;
1639 unsigned int i, j, k;
1640 uint64_t *ptr;
1641 uint64_t offset = 0;
1642
1643 if (a6xx_capturescript.gpuaddr != 0 &&
1644 a6xx_crashdump_registers.gpuaddr != 0)
1645 return;
1646
1647 /*
1648 * We need to allocate two buffers:
1649 * 1 - the buffer to hold the draw script
1650 * 2 - the buffer to hold the data
1651 */
1652
1653 /*
1654 * To save the registers, we need 16 bytes per register pair for the
1655 * script and a dword for each register in the data
1656 */
1657 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1658 struct cdregs *regs = &_a6xx_cd_registers[i];
1659
1660 /* Each pair needs 16 bytes (2 qwords) */
1661 script_size += (regs->size / 2) * 16;
1662
1663 /* Each register needs a dword in the data */
1664 for (j = 0; j < regs->size / 2; j++)
1665 data_size += REG_PAIR_COUNT(regs->regs, j) *
1666 sizeof(unsigned int);
1667
1668 }
1669
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301670 /*
1671 * To save the shader blocks for each block in each type we need 32
1672 * bytes for the script (16 bytes to program the aperture and 16 to
1673 * read the data) and then a block specific number of bytes to hold
1674 * the data
1675 */
1676 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1677 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1678 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1679 A6XX_NUM_SHADER_BANKS;
1680 }
1681
Shrenuj Bansal41665402016-12-16 15:25:54 -08001682 /* Calculate the script and data size for MVC registers */
1683 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1684 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1685
1686 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1687
1688 /* 16 bytes for programming the aperture */
1689 script_size += 16;
1690
1691 /* Reading each pair of registers takes 16 bytes */
1692 script_size += 16 * cluster->num_sets;
1693
1694 /* A dword per register read from the cluster list */
1695 for (k = 0; k < cluster->num_sets; k++)
1696 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1697 sizeof(unsigned int);
1698 }
1699 }
1700
Lynus Vaz1e258612017-04-27 21:35:22 +05301701 /* Calculate the script and data size for debug AHB registers */
1702 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1703 struct a6xx_cluster_dbgahb_registers *cluster =
1704 &a6xx_dbgahb_ctx_clusters[i];
1705
1706 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1707
1708 /* 16 bytes for programming the aperture */
1709 script_size += 16;
1710
1711 /* Reading each pair of registers takes 16 bytes */
1712 script_size += 16 * cluster->num_sets;
1713
1714 /* A dword per register read from the cluster list */
1715 for (k = 0; k < cluster->num_sets; k++)
1716 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1717 sizeof(unsigned int);
1718 }
1719 }
1720
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001721 /*
1722 * Calculate the script and data size for non context debug
1723 * AHB registers
1724 */
1725 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1726 struct a6xx_non_ctx_dbgahb_registers *regs =
1727 &a6xx_non_ctx_dbgahb[i];
1728
1729 /* 16 bytes for programming the aperture */
1730 script_size += 16;
1731
1732 /* Reading each pair of registers takes 16 bytes */
1733 script_size += 16 * regs->num_sets;
1734
1735 /* A dword per register read from the cluster list */
1736 for (k = 0; k < regs->num_sets; k++)
1737 data_size += REG_PAIR_COUNT(regs->regs, k) *
1738 sizeof(unsigned int);
1739 }
1740
Shrenuj Bansal41665402016-12-16 15:25:54 -08001741 /* Now allocate the script and data buffers */
1742
1743 /* The script buffers needs 2 extra qwords on the end */
1744 if (kgsl_allocate_global(device, &a6xx_capturescript,
1745 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1746 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1747 return;
1748
1749 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1750 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1751 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1752 return;
1753 }
1754
1755 /* Build the crash script */
1756
1757 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1758
1759 /* For the registers, program a read command for each pair */
1760 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1761 struct cdregs *regs = &_a6xx_cd_registers[i];
1762
1763 for (j = 0; j < regs->size / 2; j++) {
1764 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1765 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1766 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1767 offset += r * sizeof(unsigned int);
1768 }
1769 }
1770
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301771 /* Program each shader block */
1772 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1773 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1774 &offset);
1775 }
1776
Shrenuj Bansal41665402016-12-16 15:25:54 -08001777 /* Program the capturescript for the MVC regsiters */
1778 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1779
Lynus Vaz1e258612017-04-27 21:35:22 +05301780 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1781
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001782 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1783
Shrenuj Bansal41665402016-12-16 15:25:54 -08001784 *ptr++ = 0;
1785 *ptr++ = 0;
1786}