blob: e1f15955c1157acf248b6587f9c7083363df7c2f [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
Lynus Vazdaac540732017-07-27 14:23:35 +053024#define A6XX_NUM_AXI_ARB_BLOCKS 2
25#define A6XX_NUM_XIN_AXI_BLOCKS 5
26#define A6XX_NUM_XIN_CORE_BLOCKS 4
Shrenuj Bansal41665402016-12-16 15:25:54 -080027
28static const unsigned int a6xx_gras_cluster[] = {
29 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
30 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
31 0x8400, 0x840B,
32};
33
34static const unsigned int a6xx_ps_cluster[] = {
35 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
36 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
37 0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
38 0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
39 0x9218, 0x9236, 0x9300, 0x9306,
40};
41
42static const unsigned int a6xx_fe_cluster[] = {
43 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
44 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
45};
46
47static const unsigned int a6xx_pc_vs_cluster[] = {
48 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
49};
50
51static struct a6xx_cluster_registers {
52 unsigned int id;
53 const unsigned int *regs;
54 unsigned int num_sets;
55 unsigned int offset0;
56 unsigned int offset1;
57} a6xx_clusters[] = {
58 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
59 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
60 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
61 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
62 ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
63};
64
65struct a6xx_cluster_regs_info {
66 struct a6xx_cluster_registers *cluster;
67 unsigned int ctxt_id;
68};
69
Lynus Vaz461e2382017-01-16 19:35:41 +053070static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
71 0xB800, 0xB803, 0xB820, 0xB822,
72};
73
74static const unsigned int a6xx_sp_vs_sp_cluster[] = {
75 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
76 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
77};
78
79static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
80 0xBB10, 0xBB11, 0xBB20, 0xBB29,
81};
82
83static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
84 0xBD80, 0xBD80,
85};
86
87static const unsigned int a6xx_sp_duplicate_cluster[] = {
88 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
89};
90
91static const unsigned int a6xx_tp_duplicate_cluster[] = {
92 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
93};
94
95static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
96 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
97 0xB9C0, 0xB9C9,
98};
99
100static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
101 0xBD80, 0xBD80,
102};
103
104static const unsigned int a6xx_sp_ps_sp_cluster[] = {
105 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
106 0xAA00, 0xAA00, 0xAA30, 0xAA31,
107};
108
109static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
110 0xACC0, 0xACC0,
111};
112
113static const unsigned int a6xx_sp_ps_tp_cluster[] = {
114 0xB180, 0xB183, 0xB190, 0xB191,
115};
116
117static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
118 0xB4C0, 0xB4D1,
119};
120
121static struct a6xx_cluster_dbgahb_registers {
122 unsigned int id;
123 unsigned int regbase;
124 unsigned int statetype;
125 const unsigned int *regs;
126 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530127 unsigned int offset0;
128 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530129} a6xx_dbgahb_ctx_clusters[] = {
130 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
131 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
132 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
133 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700134 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530135 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
136 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
137 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700138 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530139 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700140 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530141 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700142 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530143 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700144 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530145 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700146 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530147 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700148 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530149 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700150 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530151 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700152 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530153 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700154 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530155 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700156 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530157 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700158 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530159 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
160};
161
162struct a6xx_cluster_dbgahb_regs_info {
163 struct a6xx_cluster_dbgahb_registers *cluster;
164 unsigned int ctxt_id;
165};
166
167static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
168 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
169 0xBE20, 0xBE23,
170};
171
172static const unsigned int a6xx_sp_non_ctx_registers[] = {
173 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
174 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
175};
176
177static const unsigned int a6xx_tp_non_ctx_registers[] = {
178 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
179};
180
181static struct a6xx_non_ctx_dbgahb_registers {
182 unsigned int regbase;
183 unsigned int statetype;
184 const unsigned int *regs;
185 unsigned int num_sets;
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600186 unsigned int offset;
Lynus Vaz461e2382017-01-16 19:35:41 +0530187} a6xx_non_ctx_dbgahb[] = {
188 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
189 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
190 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
191 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
192 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
193 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
194};
195
Shrenuj Bansal41665402016-12-16 15:25:54 -0800196static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
197 /* VBIF */
198 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
199 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
200 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
201 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
202 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
203 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
204 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
205 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
206 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
207 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
208 0x3410, 0x3410, 0x3800, 0x3801,
209};
210
George Shen1d447b02017-07-12 13:40:28 -0700211static const unsigned int a6xx_gmu_gx_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700212 /* GMU GX */
213 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
214 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
215 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
216 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
217 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
George Shen1d447b02017-07-12 13:40:28 -0700218};
219
220static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700221 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700222 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700223 /* GMU CX */
224 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
225 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
226 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
227 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
228 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
229 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
230 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
231 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
232 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA03,
233 /* GPU RSCC */
George Shen6927d8f2017-07-19 11:38:10 -0700234 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747,
235 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
236 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
Kyle Pieferbce21702017-06-08 09:21:28 -0700237 /* GMU AO */
238 0x23B00, 0x23B16, 0x23C00, 0x23C00,
239 /* GPU CC */
240 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
241 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
242 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
243 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
244 0x26000, 0x26002,
245 /* GPU CC ACD */
246 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700247};
248
Shrenuj Bansal41665402016-12-16 15:25:54 -0800249static const struct adreno_vbif_snapshot_registers
250a6xx_vbif_snapshot_registers[] = {
251 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
252 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
253};
254
255/*
256 * Set of registers to dump for A6XX on snapshot.
257 * Registers in pairs - first value is the start offset, second
258 * is the stop offset (inclusive)
259 */
260
261static const unsigned int a6xx_registers[] = {
262 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530263 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
264 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
265 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530266 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
267 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
268 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800269 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530270 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
271 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
272 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
273 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
274 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
275 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
276 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800277 /* VSC */
278 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
279 /* UCHE */
280 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
281 0x0E38, 0x0E39,
282 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530283 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
284 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800285 /* RB */
286 0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
287 0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
288 0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
289 /* VPC */
290 0x9600, 0x9604, 0x9624, 0x9637,
291 /* PC */
292 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
293 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
294 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
295 /* VFD */
296 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530297 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800298};
299
Lynus Vaz030473e2017-06-22 17:33:06 +0530300/*
301 * Set of registers to dump for A6XX before actually triggering crash dumper.
302 * Registers in pairs - first value is the start offset, second
303 * is the stop offset (inclusive)
304 */
305static const unsigned int a6xx_pre_crashdumper_registers[] = {
306 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
307 0x210, 0x213,
308 /* CP: CP_STATUS_1 */
309 0x825, 0x825,
310};
311
Lynus Vaz20c81272017-02-10 16:22:12 +0530312enum a6xx_debugbus_id {
313 A6XX_DBGBUS_CP = 0x1,
314 A6XX_DBGBUS_RBBM = 0x2,
315 A6XX_DBGBUS_VBIF = 0x3,
316 A6XX_DBGBUS_HLSQ = 0x4,
317 A6XX_DBGBUS_UCHE = 0x5,
318 A6XX_DBGBUS_DPM = 0x6,
319 A6XX_DBGBUS_TESS = 0x7,
320 A6XX_DBGBUS_PC = 0x8,
321 A6XX_DBGBUS_VFDP = 0x9,
322 A6XX_DBGBUS_VPC = 0xa,
323 A6XX_DBGBUS_TSE = 0xb,
324 A6XX_DBGBUS_RAS = 0xc,
325 A6XX_DBGBUS_VSC = 0xd,
326 A6XX_DBGBUS_COM = 0xe,
327 A6XX_DBGBUS_LRZ = 0x10,
328 A6XX_DBGBUS_A2D = 0x11,
329 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530330 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530331 A6XX_DBGBUS_RBP = 0x14,
332 A6XX_DBGBUS_DCS = 0x15,
333 A6XX_DBGBUS_RBBM_CFG = 0x16,
334 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530335 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530336 A6XX_DBGBUS_TPFCHE = 0x19,
337 A6XX_DBGBUS_GPC = 0x1d,
338 A6XX_DBGBUS_LARC = 0x1e,
339 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
340 A6XX_DBGBUS_RB_0 = 0x20,
341 A6XX_DBGBUS_RB_1 = 0x21,
342 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
343 A6XX_DBGBUS_CCU_0 = 0x28,
344 A6XX_DBGBUS_CCU_1 = 0x29,
345 A6XX_DBGBUS_VFD_0 = 0x38,
346 A6XX_DBGBUS_VFD_1 = 0x39,
347 A6XX_DBGBUS_VFD_2 = 0x3a,
348 A6XX_DBGBUS_VFD_3 = 0x3b,
349 A6XX_DBGBUS_SP_0 = 0x40,
350 A6XX_DBGBUS_SP_1 = 0x41,
351 A6XX_DBGBUS_TPL1_0 = 0x48,
352 A6XX_DBGBUS_TPL1_1 = 0x49,
353 A6XX_DBGBUS_TPL1_2 = 0x4a,
354 A6XX_DBGBUS_TPL1_3 = 0x4b,
355};
356
357static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
358 { A6XX_DBGBUS_CP, 0x100, },
359 { A6XX_DBGBUS_RBBM, 0x100, },
360 { A6XX_DBGBUS_HLSQ, 0x100, },
361 { A6XX_DBGBUS_UCHE, 0x100, },
362 { A6XX_DBGBUS_DPM, 0x100, },
363 { A6XX_DBGBUS_TESS, 0x100, },
364 { A6XX_DBGBUS_PC, 0x100, },
365 { A6XX_DBGBUS_VFDP, 0x100, },
366 { A6XX_DBGBUS_VPC, 0x100, },
367 { A6XX_DBGBUS_TSE, 0x100, },
368 { A6XX_DBGBUS_RAS, 0x100, },
369 { A6XX_DBGBUS_VSC, 0x100, },
370 { A6XX_DBGBUS_COM, 0x100, },
371 { A6XX_DBGBUS_LRZ, 0x100, },
372 { A6XX_DBGBUS_A2D, 0x100, },
373 { A6XX_DBGBUS_CCUFCHE, 0x100, },
374 { A6XX_DBGBUS_RBP, 0x100, },
375 { A6XX_DBGBUS_DCS, 0x100, },
376 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530377 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530378 { A6XX_DBGBUS_TPFCHE, 0x100, },
379 { A6XX_DBGBUS_GPC, 0x100, },
380 { A6XX_DBGBUS_LARC, 0x100, },
381 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
382 { A6XX_DBGBUS_RB_0, 0x100, },
383 { A6XX_DBGBUS_RB_1, 0x100, },
384 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
385 { A6XX_DBGBUS_CCU_0, 0x100, },
386 { A6XX_DBGBUS_CCU_1, 0x100, },
387 { A6XX_DBGBUS_VFD_0, 0x100, },
388 { A6XX_DBGBUS_VFD_1, 0x100, },
389 { A6XX_DBGBUS_VFD_2, 0x100, },
390 { A6XX_DBGBUS_VFD_3, 0x100, },
391 { A6XX_DBGBUS_SP_0, 0x100, },
392 { A6XX_DBGBUS_SP_1, 0x100, },
393 { A6XX_DBGBUS_TPL1_0, 0x100, },
394 { A6XX_DBGBUS_TPL1_1, 0x100, },
395 { A6XX_DBGBUS_TPL1_2, 0x100, },
396 { A6XX_DBGBUS_TPL1_3, 0x100, },
397};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800398
Lynus Vazdaac540732017-07-27 14:23:35 +0530399static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
400 A6XX_DBGBUS_VBIF, 0x100,
401};
402
Lynus Vazff24c972017-03-07 19:27:46 +0530403static void __iomem *a6xx_cx_dbgc;
404static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
Lynus Vazecd472c2017-04-18 14:15:57 +0530405 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530406 { A6XX_DBGBUS_CX, 0x100, },
407};
408
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530409#define A6XX_NUM_SHADER_BANKS 3
410#define A6XX_SHADER_STATETYPE_SHIFT 8
411
412enum a6xx_shader_obj {
413 A6XX_TP0_TMO_DATA = 0x9,
414 A6XX_TP0_SMO_DATA = 0xa,
415 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
416 A6XX_TP1_TMO_DATA = 0x19,
417 A6XX_TP1_SMO_DATA = 0x1a,
418 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
419 A6XX_SP_INST_DATA = 0x29,
420 A6XX_SP_LB_0_DATA = 0x2a,
421 A6XX_SP_LB_1_DATA = 0x2b,
422 A6XX_SP_LB_2_DATA = 0x2c,
423 A6XX_SP_LB_3_DATA = 0x2d,
424 A6XX_SP_LB_4_DATA = 0x2e,
425 A6XX_SP_LB_5_DATA = 0x2f,
426 A6XX_SP_CB_BINDLESS_DATA = 0x30,
427 A6XX_SP_CB_LEGACY_DATA = 0x31,
428 A6XX_SP_UAV_DATA = 0x32,
429 A6XX_SP_INST_TAG = 0x33,
430 A6XX_SP_CB_BINDLESS_TAG = 0x34,
431 A6XX_SP_TMO_UMO_TAG = 0x35,
432 A6XX_SP_SMO_TAG = 0x36,
433 A6XX_SP_STATE_DATA = 0x37,
434 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
435 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
436 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
437 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
438 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
439 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
440 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
441 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
442 A6XX_HLSQ_INST_RAM = 0x52,
443 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
444 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
445 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
446 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
447 A6XX_HLSQ_INST_RAM_TAG = 0x57,
448 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
449 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
450 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
451 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
452 A6XX_HLSQ_DATAPATH_META = 0x60,
453 A6XX_HLSQ_FRONTEND_META = 0x61,
454 A6XX_HLSQ_INDIRECT_META = 0x62,
455 A6XX_HLSQ_BACKEND_META = 0x63
456};
457
458struct a6xx_shader_block {
459 unsigned int statetype;
460 unsigned int sz;
461 uint64_t offset;
462};
463
464struct a6xx_shader_block_info {
465 struct a6xx_shader_block *block;
466 unsigned int bank;
467 uint64_t offset;
468};
469
470static struct a6xx_shader_block a6xx_shader_blocks[] = {
471 {A6XX_TP0_TMO_DATA, 0x200},
472 {A6XX_TP0_SMO_DATA, 0x80,},
473 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
474 {A6XX_TP1_TMO_DATA, 0x200},
475 {A6XX_TP1_SMO_DATA, 0x80,},
476 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
477 {A6XX_SP_INST_DATA, 0x800},
478 {A6XX_SP_LB_0_DATA, 0x800},
479 {A6XX_SP_LB_1_DATA, 0x800},
480 {A6XX_SP_LB_2_DATA, 0x800},
481 {A6XX_SP_LB_3_DATA, 0x800},
482 {A6XX_SP_LB_4_DATA, 0x800},
483 {A6XX_SP_LB_5_DATA, 0x200},
484 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
485 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
486 {A6XX_SP_UAV_DATA, 0x80,},
487 {A6XX_SP_INST_TAG, 0x80,},
488 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
489 {A6XX_SP_TMO_UMO_TAG, 0x80,},
490 {A6XX_SP_SMO_TAG, 0x80},
491 {A6XX_SP_STATE_DATA, 0x3F},
492 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
493 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
494 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
495 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
496 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
497 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
498 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
499 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
500 {A6XX_HLSQ_INST_RAM, 0x800},
501 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
502 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
503 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
504 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
505 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
506 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
507 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
508 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
509 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
510 {A6XX_HLSQ_DATAPATH_META, 0x40,},
511 {A6XX_HLSQ_FRONTEND_META, 0x40},
512 {A6XX_HLSQ_INDIRECT_META, 0x40,}
513};
514
Shrenuj Bansal41665402016-12-16 15:25:54 -0800515static struct kgsl_memdesc a6xx_capturescript;
516static struct kgsl_memdesc a6xx_crashdump_registers;
517static bool crash_dump_valid;
518
519static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
520 u8 *buf, size_t remain)
521{
522 struct kgsl_snapshot_registers regs = {
523 .regs = a6xx_registers,
524 .count = ARRAY_SIZE(a6xx_registers) / 2,
525 };
526
527 return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
528}
529
530static struct cdregs {
531 const unsigned int *regs;
532 unsigned int size;
533} _a6xx_cd_registers[] = {
534 { a6xx_registers, ARRAY_SIZE(a6xx_registers) },
535};
536
537#define REG_PAIR_COUNT(_a, _i) \
538 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
539
540static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
541 size_t remain, void *priv)
542{
543 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
544 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
545 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
546 unsigned int i, j, k;
547 unsigned int count = 0;
548
549 if (crash_dump_valid == false)
550 return a6xx_legacy_snapshot_registers(device, buf, remain);
551
552 if (remain < sizeof(*header)) {
553 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
554 return 0;
555 }
556
557 remain -= sizeof(*header);
558
559 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
560 struct cdregs *regs = &_a6xx_cd_registers[i];
561
562 for (j = 0; j < regs->size / 2; j++) {
563 unsigned int start = regs->regs[2 * j];
564 unsigned int end = regs->regs[(2 * j) + 1];
565
566 if (remain < ((end - start) + 1) * 8) {
567 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
568 goto out;
569 }
570
571 remain -= ((end - start) + 1) * 8;
572
573 for (k = start; k <= end; k++, count++) {
574 *data++ = k;
575 *data++ = *src++;
576 }
577 }
578 }
579
580out:
581 header->count = count;
582
583 /* Return the size of the section */
584 return (count * 8) + sizeof(*header);
585}
586
Lynus Vaz030473e2017-06-22 17:33:06 +0530587static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
588 u8 *buf, size_t remain, void *priv)
589{
590 struct kgsl_snapshot_registers pre_cdregs = {
591 .regs = a6xx_pre_crashdumper_registers,
592 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
593 };
594
595 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
596}
597
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530598static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
599 u8 *buf, size_t remain, void *priv)
600{
601 struct kgsl_snapshot_shader *header =
602 (struct kgsl_snapshot_shader *) buf;
603 struct a6xx_shader_block_info *info =
604 (struct a6xx_shader_block_info *) priv;
605 struct a6xx_shader_block *block = info->block;
606 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
607
608 if (remain < SHADER_SECTION_SZ(block->sz)) {
609 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
610 return 0;
611 }
612
613 header->type = block->statetype;
614 header->index = info->bank;
615 header->size = block->sz;
616
617 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
618 block->sz);
619
620 return SHADER_SECTION_SZ(block->sz);
621}
622
623static void a6xx_snapshot_shader(struct kgsl_device *device,
624 struct kgsl_snapshot *snapshot)
625{
626 unsigned int i, j;
627 struct a6xx_shader_block_info info;
628
629 /* Shader blocks can only be read by the crash dumper */
630 if (crash_dump_valid == false)
631 return;
632
633 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
634 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
635 info.block = &a6xx_shader_blocks[i];
636 info.bank = j;
637 info.offset = a6xx_shader_blocks[i].offset +
638 (j * a6xx_shader_blocks[i].sz);
639
640 /* Shader working/shadow memory */
641 kgsl_snapshot_add_section(device,
642 KGSL_SNAPSHOT_SECTION_SHADER,
643 snapshot, a6xx_snapshot_shader_memory, &info);
644 }
645 }
646}
647
Lynus Vaza5922742017-03-14 18:50:54 +0530648static void a6xx_snapshot_mempool(struct kgsl_device *device,
649 struct kgsl_snapshot *snapshot)
650{
651 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530652 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530653
Lynus Vazb8e43d52017-04-20 14:47:37 +0530654 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530655 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
656 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
657
658 kgsl_snapshot_indexed_registers(device, snapshot,
659 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
660 0, 0x2060);
661
Lynus Vazb8e43d52017-04-20 14:47:37 +0530662 /*
663 * Data at offset 0x2000 in the mempool section is the mempool size.
664 * Since we set it to 0, patch in the original size so that the data
665 * is consistent.
666 */
667 if (buf < snapshot->ptr) {
668 unsigned int *data;
669
670 /* Skip over the headers */
671 buf += sizeof(struct kgsl_snapshot_section_header) +
672 sizeof(struct kgsl_snapshot_indexed_regs);
673
674 data = (unsigned int *)buf + 0x2000;
675 *data = pool_size;
676 }
677
Lynus Vaza5922742017-03-14 18:50:54 +0530678 /* Restore the saved mempool size */
679 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
680}
681
Lynus Vaz461e2382017-01-16 19:35:41 +0530682static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
683 unsigned int regbase, unsigned int reg)
684{
685 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
686 reg - regbase / 4;
687 unsigned int val;
688
689 kgsl_regread(device, read_reg, &val);
690 return val;
691}
692
Lynus Vaz1e258612017-04-27 21:35:22 +0530693static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
694 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530695{
696 struct kgsl_snapshot_mvc_regs *header =
697 (struct kgsl_snapshot_mvc_regs *)buf;
698 struct a6xx_cluster_dbgahb_regs_info *info =
699 (struct a6xx_cluster_dbgahb_regs_info *)priv;
700 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
701 unsigned int read_sel;
702 unsigned int data_size = 0;
703 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
704 int i, j;
705
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600706 if (!device->snapshot_legacy)
707 return 0;
708
Lynus Vaz461e2382017-01-16 19:35:41 +0530709 if (remain < sizeof(*header)) {
710 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
711 return 0;
712 }
713
714 remain -= sizeof(*header);
715
716 header->ctxt_id = info->ctxt_id;
717 header->cluster_id = cur_cluster->id;
718
719 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
720 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
721
722 for (i = 0; i < cur_cluster->num_sets; i++) {
723 unsigned int start = cur_cluster->regs[2 * i];
724 unsigned int end = cur_cluster->regs[2 * i + 1];
725
726 if (remain < (end - start + 3) * 4) {
727 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
728 goto out;
729 }
730
731 remain -= (end - start + 3) * 4;
732 data_size += (end - start + 3) * 4;
733
734 *data++ = start | (1 << 31);
735 *data++ = end;
736
737 for (j = start; j <= end; j++) {
738 unsigned int val;
739
740 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
741 *data++ = val;
742
743 }
744 }
745
746out:
747 return data_size + sizeof(*header);
748}
749
Lynus Vaz1e258612017-04-27 21:35:22 +0530750static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
751 size_t remain, void *priv)
752{
753 struct kgsl_snapshot_mvc_regs *header =
754 (struct kgsl_snapshot_mvc_regs *)buf;
755 struct a6xx_cluster_dbgahb_regs_info *info =
756 (struct a6xx_cluster_dbgahb_regs_info *)priv;
757 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
758 unsigned int data_size = 0;
759 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
760 int i, j;
761 unsigned int *src;
762
763
764 if (crash_dump_valid == false)
765 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
766 info);
767
768 if (remain < sizeof(*header)) {
769 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
770 return 0;
771 }
772
773 remain -= sizeof(*header);
774
775 header->ctxt_id = info->ctxt_id;
776 header->cluster_id = cluster->id;
777
778 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
779 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
780
781 for (i = 0; i < cluster->num_sets; i++) {
782 unsigned int start;
783 unsigned int end;
784
785 start = cluster->regs[2 * i];
786 end = cluster->regs[2 * i + 1];
787
788 if (remain < (end - start + 3) * 4) {
789 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
790 goto out;
791 }
792
793 remain -= (end - start + 3) * 4;
794 data_size += (end - start + 3) * 4;
795
796 *data++ = start | (1 << 31);
797 *data++ = end;
798 for (j = start; j <= end; j++)
799 *data++ = *src++;
800 }
801out:
802 return data_size + sizeof(*header);
803}
804
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600805static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
806 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530807{
808 struct kgsl_snapshot_regs *header =
809 (struct kgsl_snapshot_regs *)buf;
810 struct a6xx_non_ctx_dbgahb_registers *regs =
811 (struct a6xx_non_ctx_dbgahb_registers *)priv;
812 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
813 int count = 0;
814 unsigned int read_sel;
815 int i, j;
816
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600817 if (!device->snapshot_legacy)
818 return 0;
819
Lynus Vaz461e2382017-01-16 19:35:41 +0530820 /* Figure out how many registers we are going to dump */
821 for (i = 0; i < regs->num_sets; i++) {
822 int start = regs->regs[i * 2];
823 int end = regs->regs[i * 2 + 1];
824
825 count += (end - start + 1);
826 }
827
828 if (remain < (count * 8) + sizeof(*header)) {
829 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
830 return 0;
831 }
832
833 header->count = count;
834
835 read_sel = (regs->statetype & 0xff) << 8;
836 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
837
838 for (i = 0; i < regs->num_sets; i++) {
839 unsigned int start = regs->regs[2 * i];
840 unsigned int end = regs->regs[2 * i + 1];
841
842 for (j = start; j <= end; j++) {
843 unsigned int val;
844
845 val = a6xx_read_dbgahb(device, regs->regbase, j);
846 *data++ = j;
847 *data++ = val;
848
849 }
850 }
851 return (count * 8) + sizeof(*header);
852}
853
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600854static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
855 size_t remain, void *priv)
856{
857 struct kgsl_snapshot_regs *header =
858 (struct kgsl_snapshot_regs *)buf;
859 struct a6xx_non_ctx_dbgahb_registers *regs =
860 (struct a6xx_non_ctx_dbgahb_registers *)priv;
861 unsigned int count = 0;
862 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
863 unsigned int i, k;
864 unsigned int *src;
865
866 if (crash_dump_valid == false)
867 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
868 regs);
869
870 if (remain < sizeof(*header)) {
871 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
872 return 0;
873 }
874
875 remain -= sizeof(*header);
876
877 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
878
879 for (i = 0; i < regs->num_sets; i++) {
880 unsigned int start;
881 unsigned int end;
882
883 start = regs->regs[2 * i];
884 end = regs->regs[(2 * i) + 1];
885
886 if (remain < (end - start + 1) * 8) {
887 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
888 goto out;
889 }
890
891 remain -= ((end - start) + 1) * 8;
892
893 for (k = start; k <= end; k++, count++) {
894 *data++ = k;
895 *data++ = *src++;
896 }
897 }
898out:
899 header->count = count;
900
901 /* Return the size of the section */
902 return (count * 8) + sizeof(*header);
903}
904
Lynus Vaz461e2382017-01-16 19:35:41 +0530905static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
906 struct kgsl_snapshot *snapshot)
907{
908 int i, j;
909
910 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
911 struct a6xx_cluster_dbgahb_registers *cluster =
912 &a6xx_dbgahb_ctx_clusters[i];
913 struct a6xx_cluster_dbgahb_regs_info info;
914
915 info.cluster = cluster;
916 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
917 info.ctxt_id = j;
918
919 kgsl_snapshot_add_section(device,
920 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
921 a6xx_snapshot_cluster_dbgahb, &info);
922 }
923 }
924
925 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
926 kgsl_snapshot_add_section(device,
927 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
928 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
929 }
930}
931
Shrenuj Bansal41665402016-12-16 15:25:54 -0800932static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
933 size_t remain, void *priv)
934{
935 struct kgsl_snapshot_mvc_regs *header =
936 (struct kgsl_snapshot_mvc_regs *)buf;
937 struct a6xx_cluster_regs_info *info =
938 (struct a6xx_cluster_regs_info *)priv;
939 struct a6xx_cluster_registers *cur_cluster = info->cluster;
940 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
941 unsigned int ctxt = info->ctxt_id;
942 unsigned int start, end, i, j, aperture_cntl = 0;
943 unsigned int data_size = 0;
944
945 if (remain < sizeof(*header)) {
946 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
947 return 0;
948 }
949
950 remain -= sizeof(*header);
951
952 header->ctxt_id = info->ctxt_id;
953 header->cluster_id = cur_cluster->id;
954
955 /*
956 * Set the AHB control for the Host to read from the
957 * cluster/context for this iteration.
958 */
959 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
960 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
961
962 for (i = 0; i < cur_cluster->num_sets; i++) {
963 start = cur_cluster->regs[2 * i];
964 end = cur_cluster->regs[2 * i + 1];
965
966 if (remain < (end - start + 3) * 4) {
967 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
968 goto out;
969 }
970
971 remain -= (end - start + 3) * 4;
972 data_size += (end - start + 3) * 4;
973
974 *data++ = start | (1 << 31);
975 *data++ = end;
976 for (j = start; j <= end; j++) {
977 unsigned int val;
978
979 kgsl_regread(device, j, &val);
980 *data++ = val;
981 }
982 }
983out:
984 return data_size + sizeof(*header);
985}
986
987static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
988 size_t remain, void *priv)
989{
990 struct kgsl_snapshot_mvc_regs *header =
991 (struct kgsl_snapshot_mvc_regs *)buf;
992 struct a6xx_cluster_regs_info *info =
993 (struct a6xx_cluster_regs_info *)priv;
994 struct a6xx_cluster_registers *cluster = info->cluster;
995 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
996 unsigned int *src;
997 int i, j;
998 unsigned int start, end;
999 size_t data_size = 0;
1000
1001 if (crash_dump_valid == false)
1002 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
1003
1004 if (remain < sizeof(*header)) {
1005 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1006 return 0;
1007 }
1008
1009 remain -= sizeof(*header);
1010
1011 header->ctxt_id = info->ctxt_id;
1012 header->cluster_id = cluster->id;
1013
1014 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
1015 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
1016
1017 for (i = 0; i < cluster->num_sets; i++) {
1018 start = cluster->regs[2 * i];
1019 end = cluster->regs[2 * i + 1];
1020
1021 if (remain < (end - start + 3) * 4) {
1022 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1023 goto out;
1024 }
1025
1026 remain -= (end - start + 3) * 4;
1027 data_size += (end - start + 3) * 4;
1028
1029 *data++ = start | (1 << 31);
1030 *data++ = end;
1031 for (j = start; j <= end; j++)
1032 *data++ = *src++;
1033 }
1034
1035out:
1036 return data_size + sizeof(*header);
1037
1038}
1039
1040static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
1041 struct kgsl_snapshot *snapshot)
1042{
1043 int i, j;
1044 struct a6xx_cluster_regs_info info;
1045
1046 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1047 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1048
1049 info.cluster = cluster;
1050 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1051 info.ctxt_id = j;
1052
1053 kgsl_snapshot_add_section(device,
1054 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
1055 a6xx_snapshot_mvc, &info);
1056 }
1057 }
1058}
1059
Lynus Vaz20c81272017-02-10 16:22:12 +05301060/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
1061static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
1062 unsigned int block_id, unsigned int index, unsigned int *val)
1063{
1064 unsigned int reg;
1065
1066 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1067 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1068
1069 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1070 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1071 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1072 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1073
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001074 /*
1075 * There needs to be a delay of 1 us to ensure enough time for correct
1076 * data is funneled into the trace buffer
1077 */
1078 udelay(1);
1079
Lynus Vaz20c81272017-02-10 16:22:12 +05301080 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1081 val++;
1082 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1083}
1084
Lynus Vazdaac540732017-07-27 14:23:35 +05301085/* a6xx_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
Lynus Vaz20c81272017-02-10 16:22:12 +05301086static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1087 u8 *buf, size_t remain, void *priv)
1088{
Lynus Vazecd472c2017-04-18 14:15:57 +05301089 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301090 struct kgsl_snapshot_debugbus *header =
1091 (struct kgsl_snapshot_debugbus *)buf;
1092 struct adreno_debugbus_block *block = priv;
1093 int i;
1094 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1095 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301096 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301097 size_t size;
1098
1099 dwords = block->dwords;
1100
1101 /* For a6xx each debug bus data unit is 2 DWORDS */
1102 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1103
1104 if (remain < size) {
1105 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1106 return 0;
1107 }
1108
1109 header->id = block->block_id;
1110 header->count = dwords * 2;
1111
Lynus Vazecd472c2017-04-18 14:15:57 +05301112 block_id = block->block_id;
1113 /* GMU_GX data is read using the GMU_CX block id on A630 */
1114 if (adreno_is_a630(adreno_dev) &&
1115 (block_id == A6XX_DBGBUS_GMU_GX))
1116 block_id = A6XX_DBGBUS_GMU_CX;
1117
Lynus Vaz20c81272017-02-10 16:22:12 +05301118 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301119 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301120
1121 return size;
1122}
1123
Lynus Vazdaac540732017-07-27 14:23:35 +05301124/* a6xx_snapshot_vbif_debugbus_block() - Capture debug data for VBIF block */
1125static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
1126 u8 *buf, size_t remain, void *priv)
1127{
1128 struct kgsl_snapshot_debugbus *header =
1129 (struct kgsl_snapshot_debugbus *)buf;
1130 struct adreno_debugbus_block *block = priv;
1131 int i, j;
1132 /*
1133 * Total number of VBIF data words considering 3 sections:
1134 * 2 arbiter blocks of 16 words
1135 * 5 AXI XIN blocks of 18 dwords each
1136 * 4 core clock side XIN blocks of 12 dwords each
1137 */
1138 unsigned int dwords = (16 * A6XX_NUM_AXI_ARB_BLOCKS) +
1139 (18 * A6XX_NUM_XIN_AXI_BLOCKS) +
1140 (12 * A6XX_NUM_XIN_CORE_BLOCKS);
1141 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1142 size_t size;
1143 unsigned int reg_clk;
1144
1145 size = (dwords * sizeof(unsigned int)) + sizeof(*header);
1146
1147 if (remain < size) {
1148 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1149 return 0;
1150 }
1151 header->id = block->block_id;
1152 header->count = dwords;
1153
1154 kgsl_regread(device, A6XX_VBIF_CLKON, &reg_clk);
1155 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk |
1156 (A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK <<
1157 A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT));
1158 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 0);
1159 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS_OUT_CTRL,
1160 (A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK <<
1161 A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT));
1162
1163 for (i = 0; i < A6XX_NUM_AXI_ARB_BLOCKS; i++) {
1164 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0,
1165 (1 << (i + 16)));
1166 for (j = 0; j < 16; j++) {
1167 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1168 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1169 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1170 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1171 data);
1172 data++;
1173 }
1174 }
1175
1176 /* XIN blocks AXI side */
1177 for (i = 0; i < A6XX_NUM_XIN_AXI_BLOCKS; i++) {
1178 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
1179 for (j = 0; j < 18; j++) {
1180 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1181 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1182 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1183 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1184 data);
1185 data++;
1186 }
1187 }
1188 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 0);
1189
1190 /* XIN blocks core clock side */
1191 for (i = 0; i < A6XX_NUM_XIN_CORE_BLOCKS; i++) {
1192 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
1193 for (j = 0; j < 12; j++) {
1194 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL1,
1195 ((j & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
1196 << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
1197 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1198 data);
1199 data++;
1200 }
1201 }
1202 /* restore the clock of VBIF */
1203 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk);
1204 return size;
1205}
1206
Lynus Vazff24c972017-03-07 19:27:46 +05301207static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1208{
1209 void __iomem *reg;
1210
1211 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1212 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1213 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1214 return;
1215
1216 reg = a6xx_cx_dbgc +
1217 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1218 *value = __raw_readl(reg);
1219
1220 /*
1221 * ensure this read finishes before the next one.
1222 * i.e. act like normal readl()
1223 */
1224 rmb();
1225}
1226
1227static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1228{
1229 void __iomem *reg;
1230
1231 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1232 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1233 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1234 return;
1235
1236 reg = a6xx_cx_dbgc +
1237 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1238
1239 /*
1240 * ensure previous writes post before this one,
1241 * i.e. act like normal writel()
1242 */
1243 wmb();
1244 __raw_writel(value, reg);
1245}
1246
1247/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1248static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1249 unsigned int block_id, unsigned int index, unsigned int *val)
1250{
1251 unsigned int reg;
1252
1253 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1254 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1255
1256 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1257 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1258 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1259 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1260
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001261 /*
1262 * There needs to be a delay of 1 us to ensure enough time for correct
1263 * data is funneled into the trace buffer
1264 */
1265 udelay(1);
1266
Lynus Vazff24c972017-03-07 19:27:46 +05301267 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1268 val++;
1269 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1270}
1271
1272/*
1273 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1274 * block from the CX DBGC block
1275 */
1276static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1277 u8 *buf, size_t remain, void *priv)
1278{
1279 struct kgsl_snapshot_debugbus *header =
1280 (struct kgsl_snapshot_debugbus *)buf;
1281 struct adreno_debugbus_block *block = priv;
1282 int i;
1283 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1284 unsigned int dwords;
1285 size_t size;
1286
1287 dwords = block->dwords;
1288
1289 /* For a6xx each debug bus data unit is 2 DWRODS */
1290 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1291
1292 if (remain < size) {
1293 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1294 return 0;
1295 }
1296
1297 header->id = block->block_id;
1298 header->count = dwords * 2;
1299
1300 for (i = 0; i < dwords; i++)
1301 a6xx_cx_debug_bus_read(device, block->block_id, i,
1302 &data[i*2]);
1303
1304 return size;
1305}
1306
Lynus Vaz20c81272017-02-10 16:22:12 +05301307/* a6xx_snapshot_debugbus() - Capture debug bus data */
1308static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1309 struct kgsl_snapshot *snapshot)
1310{
1311 int i;
1312
1313 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1314 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001315 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1316 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301317
1318 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1319 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1320
1321 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1322 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1323 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1324 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1325
1326 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1327 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1328 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1329 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1330 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1331 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1332 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1333 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1334 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1335 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1336 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1337 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1338 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1339 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1340 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1341 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1342 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1343 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1344
1345 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1346 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1347 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1348 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1349
Lynus Vazff24c972017-03-07 19:27:46 +05301350 a6xx_cx_dbgc = ioremap(device->reg_phys +
1351 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1352 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1353 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1354
1355 if (a6xx_cx_dbgc) {
1356 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1357 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001358 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1359 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301360
1361 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1362 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1363
1364 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1365 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1366 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1367 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1368
1369 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1370 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1371 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1372 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1373 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1374 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1375 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1376 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1377 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1378 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1379 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1380 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1381 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1382 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1383 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1384 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1385 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1386 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1387
1388 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1389 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1390 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1391 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1392 } else
1393 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1394
Lynus Vaz20c81272017-02-10 16:22:12 +05301395 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1396 kgsl_snapshot_add_section(device,
1397 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1398 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1399 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1400 }
Lynus Vazff24c972017-03-07 19:27:46 +05301401
Lynus Vazdaac540732017-07-27 14:23:35 +05301402 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1403 snapshot, a6xx_snapshot_vbif_debugbus_block,
1404 (void *) &a6xx_vbif_debugbus_blocks);
1405
Lynus Vazff24c972017-03-07 19:27:46 +05301406 if (a6xx_cx_dbgc) {
1407 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1408 kgsl_snapshot_add_section(device,
1409 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1410 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1411 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1412 }
1413 iounmap(a6xx_cx_dbgc);
1414 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301415}
1416
Kyle Piefer60733aa2017-03-21 11:24:01 -07001417static void a6xx_snapshot_gmu(struct kgsl_device *device,
1418 struct kgsl_snapshot *snapshot)
1419{
George Shen1d447b02017-07-12 13:40:28 -07001420 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1421 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1422
Kyle Piefer60733aa2017-03-21 11:24:01 -07001423 if (!kgsl_gmu_isenabled(device))
1424 return;
1425
Lynus Vazd37f1d82017-05-24 16:39:15 +05301426 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1427 ARRAY_SIZE(a6xx_gmu_registers) / 2);
George Shen1d447b02017-07-12 13:40:28 -07001428
1429 if (gpudev->gx_is_on(adreno_dev))
1430 adreno_snapshot_registers(device, snapshot,
1431 a6xx_gmu_gx_registers,
1432 ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001433}
1434
Lynus Vaz85150052017-02-21 17:57:48 +05301435/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1436static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1437 size_t remain, void *priv)
1438{
1439 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1440 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1441 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1442 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1443
1444 if (remain < DEBUG_SECTION_SZ(1)) {
1445 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1446 return 0;
1447 }
1448
1449 /* Dump the SQE firmware version */
1450 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1451 header->size = 1;
1452 *data = fw->version;
1453
1454 return DEBUG_SECTION_SZ(1);
1455}
1456
Shrenuj Bansal41665402016-12-16 15:25:54 -08001457static void _a6xx_do_crashdump(struct kgsl_device *device)
1458{
1459 unsigned long wait_time;
1460 unsigned int reg = 0;
1461 unsigned int val;
1462
1463 crash_dump_valid = false;
1464
1465 if (a6xx_capturescript.gpuaddr == 0 ||
1466 a6xx_crashdump_registers.gpuaddr == 0)
1467 return;
1468
1469 /* IF the SMMU is stalled we cannot do a crash dump */
1470 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1471 if (val & BIT(24))
1472 return;
1473
1474 /* Turn on APRIV so we can access the buffers */
1475 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1476
1477 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1478 lower_32_bits(a6xx_capturescript.gpuaddr));
1479 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1480 upper_32_bits(a6xx_capturescript.gpuaddr));
1481 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1482
1483 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1484 while (!time_after(jiffies, wait_time)) {
1485 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1486 if (reg & 0x2)
1487 break;
1488 cpu_relax();
1489 }
1490
1491 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1492
1493 if (!(reg & 0x2)) {
1494 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1495 return;
1496 }
1497
1498 crash_dump_valid = true;
1499}
1500
1501/*
1502 * a6xx_snapshot() - A6XX GPU snapshot function
1503 * @adreno_dev: Device being snapshotted
1504 * @snapshot: Pointer to the snapshot instance
1505 *
1506 * This is where all of the A6XX specific bits and pieces are grabbed
1507 * into the snapshot memory
1508 */
1509void a6xx_snapshot(struct adreno_device *adreno_dev,
1510 struct kgsl_snapshot *snapshot)
1511{
1512 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1513 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1514 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001515 bool sptprac_on;
1516
1517 /* GMU TCM data dumped through AHB */
1518 a6xx_snapshot_gmu(device, snapshot);
1519
1520 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1521
1522 /* Return if the GX is off */
1523 if (!gpudev->gx_is_on(adreno_dev)) {
1524 pr_err("GX is off. Only dumping GMU data in snapshot\n");
1525 return;
1526 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001527
Lynus Vaz030473e2017-06-22 17:33:06 +05301528 /* Dump the registers which get affected by crash dumper trigger */
1529 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1530 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1531
1532 /* Dump vbif registers as well which get affected by crash dumper */
1533 adreno_snapshot_vbif_registers(device, snapshot,
1534 a6xx_vbif_snapshot_registers,
1535 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1536
Shrenuj Bansal41665402016-12-16 15:25:54 -08001537 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001538 if (sptprac_on)
1539 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001540
1541 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1542 snapshot, a6xx_snapshot_registers, NULL);
1543
Shrenuj Bansal41665402016-12-16 15:25:54 -08001544 /* CP_SQE indexed registers */
1545 kgsl_snapshot_indexed_registers(device, snapshot,
1546 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1547 0, snap_data->sect_sizes->cp_pfp);
1548
1549 /* CP_DRAW_STATE */
1550 kgsl_snapshot_indexed_registers(device, snapshot,
1551 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1552 0, 0x100);
1553
1554 /* SQE_UCODE Cache */
1555 kgsl_snapshot_indexed_registers(device, snapshot,
1556 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1557 0, 0x6000);
1558
1559 /* CP ROQ */
1560 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1561 snapshot, adreno_snapshot_cp_roq,
1562 &snap_data->sect_sizes->roq);
1563
Lynus Vaz85150052017-02-21 17:57:48 +05301564 /* SQE Firmware */
1565 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1566 snapshot, a6xx_snapshot_sqe, NULL);
1567
Lynus Vaza5922742017-03-14 18:50:54 +05301568 /* Mempool debug data */
1569 a6xx_snapshot_mempool(device, snapshot);
1570
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001571 if (sptprac_on) {
1572 /* Shader memory */
1573 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301574
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001575 /* MVC register section */
1576 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001577
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001578 /* registers dumped through DBG AHB */
1579 a6xx_snapshot_dbgahb_regs(device, snapshot);
1580 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301581
Lynus Vaz20c81272017-02-10 16:22:12 +05301582 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001583
Shrenuj Bansal41665402016-12-16 15:25:54 -08001584}
1585
1586static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1587{
1588 int qwords = 0;
1589 unsigned int i, j, k;
1590 unsigned int count;
1591
1592 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1593 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1594
1595 cluster->offset0 = *offset;
1596 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1597
1598 if (j == 1)
1599 cluster->offset1 = *offset;
1600
1601 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1602 ptr[qwords++] =
1603 ((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
1604 (1 << 21) | 1;
1605
1606 for (k = 0; k < cluster->num_sets; k++) {
1607 count = REG_PAIR_COUNT(cluster->regs, k);
1608 ptr[qwords++] =
1609 a6xx_crashdump_registers.gpuaddr + *offset;
1610 ptr[qwords++] =
1611 (((uint64_t)cluster->regs[2 * k]) << 44) |
1612 count;
1613
1614 *offset += count * sizeof(unsigned int);
1615 }
1616 }
1617 }
1618
1619 return qwords;
1620}
1621
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301622static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1623 uint64_t *ptr, uint64_t *offset)
1624{
1625 int qwords = 0;
1626 unsigned int j;
1627
1628 /* Capture each bank in the block */
1629 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1630 /* Program the aperture */
1631 ptr[qwords++] =
1632 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1633 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1634 (1 << 21) | 1;
1635
1636 /* Read all the data in one chunk */
1637 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1638 ptr[qwords++] =
1639 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1640 block->sz;
1641
1642 /* Remember the offset of the first bank for easy access */
1643 if (j == 0)
1644 block->offset = *offset;
1645
1646 *offset += block->sz * sizeof(unsigned int);
1647 }
1648
1649 return qwords;
1650}
1651
Lynus Vaz1e258612017-04-27 21:35:22 +05301652static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1653{
1654 int qwords = 0;
1655 unsigned int i, j, k;
1656 unsigned int count;
1657
1658 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1659 struct a6xx_cluster_dbgahb_registers *cluster =
1660 &a6xx_dbgahb_ctx_clusters[i];
1661
1662 cluster->offset0 = *offset;
1663
1664 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1665 if (j == 1)
1666 cluster->offset1 = *offset;
1667
1668 /* Program the aperture */
1669 ptr[qwords++] =
1670 ((cluster->statetype + j * 2) & 0xff) << 8;
1671 ptr[qwords++] =
1672 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1673 (1 << 21) | 1;
1674
1675 for (k = 0; k < cluster->num_sets; k++) {
1676 unsigned int start = cluster->regs[2 * k];
1677
1678 count = REG_PAIR_COUNT(cluster->regs, k);
1679 ptr[qwords++] =
1680 a6xx_crashdump_registers.gpuaddr + *offset;
1681 ptr[qwords++] =
1682 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1683 start - cluster->regbase / 4) << 44)) |
1684 count;
1685
1686 *offset += count * sizeof(unsigned int);
1687 }
1688 }
1689 }
1690 return qwords;
1691}
1692
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001693static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1694{
1695 int qwords = 0;
1696 unsigned int i, k;
1697 unsigned int count;
1698
1699 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1700 struct a6xx_non_ctx_dbgahb_registers *regs =
1701 &a6xx_non_ctx_dbgahb[i];
1702
1703 regs->offset = *offset;
1704
1705 /* Program the aperture */
1706 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1707 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1708 (1 << 21) | 1;
1709
1710 for (k = 0; k < regs->num_sets; k++) {
1711 unsigned int start = regs->regs[2 * k];
1712
1713 count = REG_PAIR_COUNT(regs->regs, k);
1714 ptr[qwords++] =
1715 a6xx_crashdump_registers.gpuaddr + *offset;
1716 ptr[qwords++] =
1717 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1718 start - regs->regbase / 4) << 44)) |
1719 count;
1720
1721 *offset += count * sizeof(unsigned int);
1722 }
1723 }
1724 return qwords;
1725}
1726
Shrenuj Bansal41665402016-12-16 15:25:54 -08001727void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1728{
1729 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1730 unsigned int script_size = 0;
1731 unsigned int data_size = 0;
1732 unsigned int i, j, k;
1733 uint64_t *ptr;
1734 uint64_t offset = 0;
1735
1736 if (a6xx_capturescript.gpuaddr != 0 &&
1737 a6xx_crashdump_registers.gpuaddr != 0)
1738 return;
1739
1740 /*
1741 * We need to allocate two buffers:
1742 * 1 - the buffer to hold the draw script
1743 * 2 - the buffer to hold the data
1744 */
1745
1746 /*
1747 * To save the registers, we need 16 bytes per register pair for the
1748 * script and a dword for each register in the data
1749 */
1750 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1751 struct cdregs *regs = &_a6xx_cd_registers[i];
1752
1753 /* Each pair needs 16 bytes (2 qwords) */
1754 script_size += (regs->size / 2) * 16;
1755
1756 /* Each register needs a dword in the data */
1757 for (j = 0; j < regs->size / 2; j++)
1758 data_size += REG_PAIR_COUNT(regs->regs, j) *
1759 sizeof(unsigned int);
1760
1761 }
1762
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301763 /*
1764 * To save the shader blocks for each block in each type we need 32
1765 * bytes for the script (16 bytes to program the aperture and 16 to
1766 * read the data) and then a block specific number of bytes to hold
1767 * the data
1768 */
1769 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1770 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1771 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1772 A6XX_NUM_SHADER_BANKS;
1773 }
1774
Shrenuj Bansal41665402016-12-16 15:25:54 -08001775 /* Calculate the script and data size for MVC registers */
1776 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1777 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1778
1779 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1780
1781 /* 16 bytes for programming the aperture */
1782 script_size += 16;
1783
1784 /* Reading each pair of registers takes 16 bytes */
1785 script_size += 16 * cluster->num_sets;
1786
1787 /* A dword per register read from the cluster list */
1788 for (k = 0; k < cluster->num_sets; k++)
1789 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1790 sizeof(unsigned int);
1791 }
1792 }
1793
Lynus Vaz1e258612017-04-27 21:35:22 +05301794 /* Calculate the script and data size for debug AHB registers */
1795 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1796 struct a6xx_cluster_dbgahb_registers *cluster =
1797 &a6xx_dbgahb_ctx_clusters[i];
1798
1799 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1800
1801 /* 16 bytes for programming the aperture */
1802 script_size += 16;
1803
1804 /* Reading each pair of registers takes 16 bytes */
1805 script_size += 16 * cluster->num_sets;
1806
1807 /* A dword per register read from the cluster list */
1808 for (k = 0; k < cluster->num_sets; k++)
1809 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1810 sizeof(unsigned int);
1811 }
1812 }
1813
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001814 /*
1815 * Calculate the script and data size for non context debug
1816 * AHB registers
1817 */
1818 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1819 struct a6xx_non_ctx_dbgahb_registers *regs =
1820 &a6xx_non_ctx_dbgahb[i];
1821
1822 /* 16 bytes for programming the aperture */
1823 script_size += 16;
1824
1825 /* Reading each pair of registers takes 16 bytes */
1826 script_size += 16 * regs->num_sets;
1827
1828 /* A dword per register read from the cluster list */
1829 for (k = 0; k < regs->num_sets; k++)
1830 data_size += REG_PAIR_COUNT(regs->regs, k) *
1831 sizeof(unsigned int);
1832 }
1833
Shrenuj Bansal41665402016-12-16 15:25:54 -08001834 /* Now allocate the script and data buffers */
1835
1836 /* The script buffers needs 2 extra qwords on the end */
1837 if (kgsl_allocate_global(device, &a6xx_capturescript,
1838 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1839 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1840 return;
1841
1842 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1843 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1844 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1845 return;
1846 }
1847
1848 /* Build the crash script */
1849
1850 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1851
1852 /* For the registers, program a read command for each pair */
1853 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
1854 struct cdregs *regs = &_a6xx_cd_registers[i];
1855
1856 for (j = 0; j < regs->size / 2; j++) {
1857 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1858 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1859 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1860 offset += r * sizeof(unsigned int);
1861 }
1862 }
1863
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301864 /* Program each shader block */
1865 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1866 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1867 &offset);
1868 }
1869
Shrenuj Bansal41665402016-12-16 15:25:54 -08001870 /* Program the capturescript for the MVC regsiters */
1871 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1872
Lynus Vaz1e258612017-04-27 21:35:22 +05301873 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1874
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001875 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1876
Shrenuj Bansal41665402016-12-16 15:25:54 -08001877 *ptr++ = 0;
1878 *ptr++ = 0;
1879}