blob: 3f92f75123845c79a4d378ae6505bc64106229ee [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
Lynus Vazdaac540732017-07-27 14:23:35 +053024#define A6XX_NUM_AXI_ARB_BLOCKS 2
25#define A6XX_NUM_XIN_AXI_BLOCKS 5
26#define A6XX_NUM_XIN_CORE_BLOCKS 4
Shrenuj Bansal41665402016-12-16 15:25:54 -080027
28static const unsigned int a6xx_gras_cluster[] = {
29 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
30 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
31 0x8400, 0x840B,
32};
33
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060034static const unsigned int a6xx_ps_cluster_rac[] = {
Shrenuj Bansal41665402016-12-16 15:25:54 -080035 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
36 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060037 0x88C0, 0x88C1, 0x88D0, 0x88E3, 0x8900, 0x890C, 0x890F, 0x891A,
38 0x8C00, 0x8C01, 0x8C08, 0x8C10, 0x8C17, 0x8C1F, 0x8C26, 0x8C33,
39};
40
41static const unsigned int a6xx_ps_cluster_rbp[] = {
42 0x88F0, 0x88F3, 0x890D, 0x890E, 0x8927, 0x8928, 0x8BF0, 0x8BF1,
43 0x8C02, 0x8C07, 0x8C11, 0x8C16, 0x8C20, 0x8C25,
44};
45
46static const unsigned int a6xx_ps_cluster[] = {
47 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
Shrenuj Bansal41665402016-12-16 15:25:54 -080048};
49
50static const unsigned int a6xx_fe_cluster[] = {
51 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
52 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
53};
54
55static const unsigned int a6xx_pc_vs_cluster[] = {
56 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
57};
58
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060059static const struct sel_reg {
60 unsigned int host_reg;
61 unsigned int cd_reg;
62 unsigned int val;
63} _a6xx_rb_rac_aperture = {
64 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
65 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
66 .val = 0x0,
67},
68_a6xx_rb_rbp_aperture = {
69 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
70 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
71 .val = 0x9,
72};
73
Shrenuj Bansal41665402016-12-16 15:25:54 -080074static struct a6xx_cluster_registers {
75 unsigned int id;
76 const unsigned int *regs;
77 unsigned int num_sets;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060078 const struct sel_reg *sel;
Shrenuj Bansal41665402016-12-16 15:25:54 -080079 unsigned int offset0;
80 unsigned int offset1;
81} a6xx_clusters[] = {
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060082 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2,
83 NULL },
84 { CP_CLUSTER_PS, a6xx_ps_cluster_rac, ARRAY_SIZE(a6xx_ps_cluster_rac)/2,
85 &_a6xx_rb_rac_aperture },
86 { CP_CLUSTER_PS, a6xx_ps_cluster_rbp, ARRAY_SIZE(a6xx_ps_cluster_rbp)/2,
87 &_a6xx_rb_rbp_aperture },
88 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2,
89 NULL },
90 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2,
91 NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080092 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060093 ARRAY_SIZE(a6xx_pc_vs_cluster)/2, NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080094};
95
96struct a6xx_cluster_regs_info {
97 struct a6xx_cluster_registers *cluster;
98 unsigned int ctxt_id;
99};
100
Lynus Vaz461e2382017-01-16 19:35:41 +0530101static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
102 0xB800, 0xB803, 0xB820, 0xB822,
103};
104
105static const unsigned int a6xx_sp_vs_sp_cluster[] = {
106 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
107 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
108};
109
110static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
111 0xBB10, 0xBB11, 0xBB20, 0xBB29,
112};
113
114static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
115 0xBD80, 0xBD80,
116};
117
118static const unsigned int a6xx_sp_duplicate_cluster[] = {
119 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
120};
121
122static const unsigned int a6xx_tp_duplicate_cluster[] = {
123 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
124};
125
126static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
127 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
128 0xB9C0, 0xB9C9,
129};
130
131static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
132 0xBD80, 0xBD80,
133};
134
135static const unsigned int a6xx_sp_ps_sp_cluster[] = {
136 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
137 0xAA00, 0xAA00, 0xAA30, 0xAA31,
138};
139
140static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
141 0xACC0, 0xACC0,
142};
143
144static const unsigned int a6xx_sp_ps_tp_cluster[] = {
145 0xB180, 0xB183, 0xB190, 0xB191,
146};
147
148static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
149 0xB4C0, 0xB4D1,
150};
151
152static struct a6xx_cluster_dbgahb_registers {
153 unsigned int id;
154 unsigned int regbase;
155 unsigned int statetype;
156 const unsigned int *regs;
157 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530158 unsigned int offset0;
159 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530160} a6xx_dbgahb_ctx_clusters[] = {
161 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
162 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
163 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
164 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700165 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530166 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
167 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
168 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700169 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530170 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700171 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530172 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700173 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530174 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700175 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530176 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700177 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530178 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700179 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530180 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700181 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530182 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700183 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530184 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700185 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530186 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700187 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530188 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700189 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530190 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
191};
192
193struct a6xx_cluster_dbgahb_regs_info {
194 struct a6xx_cluster_dbgahb_registers *cluster;
195 unsigned int ctxt_id;
196};
197
Shrenuj Bansal41665402016-12-16 15:25:54 -0800198static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
199 /* VBIF */
200 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
201 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
202 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
203 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
204 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
205 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
206 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
207 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
208 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
209 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
210 0x3410, 0x3410, 0x3800, 0x3801,
211};
212
Rajesh Kemisettib36bb492017-11-20 10:49:27 +0530213static const unsigned int a6xx_gbif_registers[] = {
214 /* GBIF */
215 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1,
216};
217
George Shen1d447b02017-07-12 13:40:28 -0700218static const unsigned int a6xx_gmu_gx_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700219 /* GMU GX */
220 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
221 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
222 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
223 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
224 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
George Shen1d447b02017-07-12 13:40:28 -0700225};
226
227static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700228 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700229 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700230 /* GMU CX */
231 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
232 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
233 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
234 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
235 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
236 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
237 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
238 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
Lokesh Batrac367dc92017-08-24 13:40:32 -0700239 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA01,
Kyle Pieferbce21702017-06-08 09:21:28 -0700240 /* GPU RSCC */
George Shen6927d8f2017-07-19 11:38:10 -0700241 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747,
242 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
243 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
Kyle Pieferbce21702017-06-08 09:21:28 -0700244 /* GMU AO */
245 0x23B00, 0x23B16, 0x23C00, 0x23C00,
246 /* GPU CC */
247 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
248 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
249 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
250 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
251 0x26000, 0x26002,
252 /* GPU CC ACD */
253 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700254};
255
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600256static const unsigned int a6xx_rb_rac_registers[] = {
257 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E10, 0x8E1C, 0x8E20, 0x8E25,
258 0x8E28, 0x8E28, 0x8E2C, 0x8E2F, 0x8E50, 0x8E52,
259};
260
261static const unsigned int a6xx_rb_rbp_registers[] = {
262 0x8E01, 0x8E01, 0x8E0C, 0x8E0C, 0x8E3B, 0x8E3E, 0x8E40, 0x8E43,
263 0x8E53, 0x8E5F, 0x8E70, 0x8E77,
264};
265
Shrenuj Bansal41665402016-12-16 15:25:54 -0800266static const struct adreno_vbif_snapshot_registers
267a6xx_vbif_snapshot_registers[] = {
268 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
269 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
270};
271
272/*
273 * Set of registers to dump for A6XX on snapshot.
274 * Registers in pairs - first value is the start offset, second
275 * is the stop offset (inclusive)
276 */
277
278static const unsigned int a6xx_registers[] = {
279 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530280 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
281 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
282 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530283 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
284 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
285 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800286 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530287 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
288 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
289 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
290 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
291 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
292 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
293 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800294 /* VSC */
295 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
296 /* UCHE */
297 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
298 0x0E38, 0x0E39,
299 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530300 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
301 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800302 /* VPC */
303 0x9600, 0x9604, 0x9624, 0x9637,
304 /* PC */
305 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
306 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
307 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
308 /* VFD */
309 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530310 0xA630, 0xA630,
Lynus Vaz3a5a8eb2017-11-08 12:38:10 +0530311 /* SP */
312 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
313 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
314 /* TP */
315 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
316 /* HLSQ */
317 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
318 0xBE20, 0xBE23,
319
Shrenuj Bansal41665402016-12-16 15:25:54 -0800320};
321
Lynus Vaz030473e2017-06-22 17:33:06 +0530322/*
323 * Set of registers to dump for A6XX before actually triggering crash dumper.
324 * Registers in pairs - first value is the start offset, second
325 * is the stop offset (inclusive)
326 */
327static const unsigned int a6xx_pre_crashdumper_registers[] = {
328 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
329 0x210, 0x213,
330 /* CP: CP_STATUS_1 */
331 0x825, 0x825,
332};
333
Lynus Vaz20c81272017-02-10 16:22:12 +0530334enum a6xx_debugbus_id {
335 A6XX_DBGBUS_CP = 0x1,
336 A6XX_DBGBUS_RBBM = 0x2,
337 A6XX_DBGBUS_VBIF = 0x3,
338 A6XX_DBGBUS_HLSQ = 0x4,
339 A6XX_DBGBUS_UCHE = 0x5,
340 A6XX_DBGBUS_DPM = 0x6,
341 A6XX_DBGBUS_TESS = 0x7,
342 A6XX_DBGBUS_PC = 0x8,
343 A6XX_DBGBUS_VFDP = 0x9,
344 A6XX_DBGBUS_VPC = 0xa,
345 A6XX_DBGBUS_TSE = 0xb,
346 A6XX_DBGBUS_RAS = 0xc,
347 A6XX_DBGBUS_VSC = 0xd,
348 A6XX_DBGBUS_COM = 0xe,
349 A6XX_DBGBUS_LRZ = 0x10,
350 A6XX_DBGBUS_A2D = 0x11,
351 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530352 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530353 A6XX_DBGBUS_RBP = 0x14,
354 A6XX_DBGBUS_DCS = 0x15,
355 A6XX_DBGBUS_RBBM_CFG = 0x16,
356 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530357 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530358 A6XX_DBGBUS_TPFCHE = 0x19,
359 A6XX_DBGBUS_GPC = 0x1d,
360 A6XX_DBGBUS_LARC = 0x1e,
361 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
362 A6XX_DBGBUS_RB_0 = 0x20,
363 A6XX_DBGBUS_RB_1 = 0x21,
364 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
365 A6XX_DBGBUS_CCU_0 = 0x28,
366 A6XX_DBGBUS_CCU_1 = 0x29,
367 A6XX_DBGBUS_VFD_0 = 0x38,
368 A6XX_DBGBUS_VFD_1 = 0x39,
369 A6XX_DBGBUS_VFD_2 = 0x3a,
370 A6XX_DBGBUS_VFD_3 = 0x3b,
371 A6XX_DBGBUS_SP_0 = 0x40,
372 A6XX_DBGBUS_SP_1 = 0x41,
373 A6XX_DBGBUS_TPL1_0 = 0x48,
374 A6XX_DBGBUS_TPL1_1 = 0x49,
375 A6XX_DBGBUS_TPL1_2 = 0x4a,
376 A6XX_DBGBUS_TPL1_3 = 0x4b,
377};
378
379static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
380 { A6XX_DBGBUS_CP, 0x100, },
381 { A6XX_DBGBUS_RBBM, 0x100, },
382 { A6XX_DBGBUS_HLSQ, 0x100, },
383 { A6XX_DBGBUS_UCHE, 0x100, },
384 { A6XX_DBGBUS_DPM, 0x100, },
385 { A6XX_DBGBUS_TESS, 0x100, },
386 { A6XX_DBGBUS_PC, 0x100, },
387 { A6XX_DBGBUS_VFDP, 0x100, },
388 { A6XX_DBGBUS_VPC, 0x100, },
389 { A6XX_DBGBUS_TSE, 0x100, },
390 { A6XX_DBGBUS_RAS, 0x100, },
391 { A6XX_DBGBUS_VSC, 0x100, },
392 { A6XX_DBGBUS_COM, 0x100, },
393 { A6XX_DBGBUS_LRZ, 0x100, },
394 { A6XX_DBGBUS_A2D, 0x100, },
395 { A6XX_DBGBUS_CCUFCHE, 0x100, },
396 { A6XX_DBGBUS_RBP, 0x100, },
397 { A6XX_DBGBUS_DCS, 0x100, },
398 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530399 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530400 { A6XX_DBGBUS_TPFCHE, 0x100, },
401 { A6XX_DBGBUS_GPC, 0x100, },
402 { A6XX_DBGBUS_LARC, 0x100, },
403 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
404 { A6XX_DBGBUS_RB_0, 0x100, },
405 { A6XX_DBGBUS_RB_1, 0x100, },
406 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
407 { A6XX_DBGBUS_CCU_0, 0x100, },
408 { A6XX_DBGBUS_CCU_1, 0x100, },
409 { A6XX_DBGBUS_VFD_0, 0x100, },
410 { A6XX_DBGBUS_VFD_1, 0x100, },
411 { A6XX_DBGBUS_VFD_2, 0x100, },
412 { A6XX_DBGBUS_VFD_3, 0x100, },
413 { A6XX_DBGBUS_SP_0, 0x100, },
414 { A6XX_DBGBUS_SP_1, 0x100, },
415 { A6XX_DBGBUS_TPL1_0, 0x100, },
416 { A6XX_DBGBUS_TPL1_1, 0x100, },
417 { A6XX_DBGBUS_TPL1_2, 0x100, },
418 { A6XX_DBGBUS_TPL1_3, 0x100, },
419};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800420
Lynus Vazdaac540732017-07-27 14:23:35 +0530421static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
422 A6XX_DBGBUS_VBIF, 0x100,
423};
424
Lynus Vazff24c972017-03-07 19:27:46 +0530425static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
Lynus Vazecd472c2017-04-18 14:15:57 +0530426 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530427 { A6XX_DBGBUS_CX, 0x100, },
428};
429
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530430#define A6XX_NUM_SHADER_BANKS 3
431#define A6XX_SHADER_STATETYPE_SHIFT 8
432
433enum a6xx_shader_obj {
434 A6XX_TP0_TMO_DATA = 0x9,
435 A6XX_TP0_SMO_DATA = 0xa,
436 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
437 A6XX_TP1_TMO_DATA = 0x19,
438 A6XX_TP1_SMO_DATA = 0x1a,
439 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
440 A6XX_SP_INST_DATA = 0x29,
441 A6XX_SP_LB_0_DATA = 0x2a,
442 A6XX_SP_LB_1_DATA = 0x2b,
443 A6XX_SP_LB_2_DATA = 0x2c,
444 A6XX_SP_LB_3_DATA = 0x2d,
445 A6XX_SP_LB_4_DATA = 0x2e,
446 A6XX_SP_LB_5_DATA = 0x2f,
447 A6XX_SP_CB_BINDLESS_DATA = 0x30,
448 A6XX_SP_CB_LEGACY_DATA = 0x31,
449 A6XX_SP_UAV_DATA = 0x32,
450 A6XX_SP_INST_TAG = 0x33,
451 A6XX_SP_CB_BINDLESS_TAG = 0x34,
452 A6XX_SP_TMO_UMO_TAG = 0x35,
453 A6XX_SP_SMO_TAG = 0x36,
454 A6XX_SP_STATE_DATA = 0x37,
455 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
456 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
457 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
458 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
459 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
460 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
461 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
462 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
463 A6XX_HLSQ_INST_RAM = 0x52,
464 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
465 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
466 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
467 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
468 A6XX_HLSQ_INST_RAM_TAG = 0x57,
469 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
470 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
471 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
472 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
473 A6XX_HLSQ_DATAPATH_META = 0x60,
474 A6XX_HLSQ_FRONTEND_META = 0x61,
475 A6XX_HLSQ_INDIRECT_META = 0x62,
476 A6XX_HLSQ_BACKEND_META = 0x63
477};
478
479struct a6xx_shader_block {
480 unsigned int statetype;
481 unsigned int sz;
482 uint64_t offset;
483};
484
485struct a6xx_shader_block_info {
486 struct a6xx_shader_block *block;
487 unsigned int bank;
488 uint64_t offset;
489};
490
491static struct a6xx_shader_block a6xx_shader_blocks[] = {
492 {A6XX_TP0_TMO_DATA, 0x200},
493 {A6XX_TP0_SMO_DATA, 0x80,},
494 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
495 {A6XX_TP1_TMO_DATA, 0x200},
496 {A6XX_TP1_SMO_DATA, 0x80,},
497 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
498 {A6XX_SP_INST_DATA, 0x800},
499 {A6XX_SP_LB_0_DATA, 0x800},
500 {A6XX_SP_LB_1_DATA, 0x800},
501 {A6XX_SP_LB_2_DATA, 0x800},
502 {A6XX_SP_LB_3_DATA, 0x800},
503 {A6XX_SP_LB_4_DATA, 0x800},
504 {A6XX_SP_LB_5_DATA, 0x200},
505 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
506 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
507 {A6XX_SP_UAV_DATA, 0x80,},
508 {A6XX_SP_INST_TAG, 0x80,},
509 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
510 {A6XX_SP_TMO_UMO_TAG, 0x80,},
511 {A6XX_SP_SMO_TAG, 0x80},
512 {A6XX_SP_STATE_DATA, 0x3F},
513 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
514 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
515 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
516 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
517 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
518 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
519 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
520 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
521 {A6XX_HLSQ_INST_RAM, 0x800},
522 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
523 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
524 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
525 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
526 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
527 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
528 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
529 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
530 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
531 {A6XX_HLSQ_DATAPATH_META, 0x40,},
532 {A6XX_HLSQ_FRONTEND_META, 0x40},
533 {A6XX_HLSQ_INDIRECT_META, 0x40,}
534};
535
Shrenuj Bansal41665402016-12-16 15:25:54 -0800536static struct kgsl_memdesc a6xx_capturescript;
537static struct kgsl_memdesc a6xx_crashdump_registers;
538static bool crash_dump_valid;
539
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600540static struct reg_list {
Shrenuj Bansal41665402016-12-16 15:25:54 -0800541 const unsigned int *regs;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600542 unsigned int count;
543 const struct sel_reg *sel;
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530544 uint64_t offset;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600545} a6xx_reg_list[] = {
546 { a6xx_registers, ARRAY_SIZE(a6xx_registers) / 2, NULL },
547 { a6xx_rb_rac_registers, ARRAY_SIZE(a6xx_rb_rac_registers) / 2,
548 &_a6xx_rb_rac_aperture },
549 { a6xx_rb_rbp_registers, ARRAY_SIZE(a6xx_rb_rbp_registers) / 2,
550 &_a6xx_rb_rbp_aperture },
Shrenuj Bansal41665402016-12-16 15:25:54 -0800551};
552
553#define REG_PAIR_COUNT(_a, _i) \
554 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
555
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600556static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
Lynus Vaz96de8522017-09-13 20:17:03 +0530557 u8 *buf, size_t remain, struct reg_list *regs)
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600558{
Lynus Vaz96de8522017-09-13 20:17:03 +0530559 struct kgsl_snapshot_registers snapshot_regs = {
560 .regs = regs->regs,
561 .count = regs->count,
562 };
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600563
Lynus Vaz96de8522017-09-13 20:17:03 +0530564 if (regs->sel)
565 kgsl_regwrite(device, regs->sel->host_reg, regs->sel->val);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600566
Lynus Vaz96de8522017-09-13 20:17:03 +0530567 return kgsl_snapshot_dump_registers(device, buf, remain,
568 &snapshot_regs);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600569}
570
Shrenuj Bansal41665402016-12-16 15:25:54 -0800571static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
572 size_t remain, void *priv)
573{
574 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
Lynus Vaz96de8522017-09-13 20:17:03 +0530575 struct reg_list *regs = (struct reg_list *)priv;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800576 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530577 unsigned int *src;
Lynus Vaz96de8522017-09-13 20:17:03 +0530578 unsigned int j, k;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800579 unsigned int count = 0;
580
581 if (crash_dump_valid == false)
Lynus Vaz96de8522017-09-13 20:17:03 +0530582 return a6xx_legacy_snapshot_registers(device, buf, remain,
583 regs);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800584
585 if (remain < sizeof(*header)) {
586 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
587 return 0;
588 }
589
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530590 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800591 remain -= sizeof(*header);
592
Lynus Vaz96de8522017-09-13 20:17:03 +0530593 for (j = 0; j < regs->count; j++) {
594 unsigned int start = regs->regs[2 * j];
595 unsigned int end = regs->regs[(2 * j) + 1];
Shrenuj Bansal41665402016-12-16 15:25:54 -0800596
Lynus Vaz96de8522017-09-13 20:17:03 +0530597 if (remain < ((end - start) + 1) * 8) {
598 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
599 goto out;
600 }
Shrenuj Bansal41665402016-12-16 15:25:54 -0800601
Lynus Vaz96de8522017-09-13 20:17:03 +0530602 remain -= ((end - start) + 1) * 8;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800603
Lynus Vaz96de8522017-09-13 20:17:03 +0530604 for (k = start; k <= end; k++, count++) {
605 *data++ = k;
606 *data++ = *src++;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800607 }
608 }
609
610out:
611 header->count = count;
612
613 /* Return the size of the section */
614 return (count * 8) + sizeof(*header);
615}
616
Lynus Vaz030473e2017-06-22 17:33:06 +0530617static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
618 u8 *buf, size_t remain, void *priv)
619{
620 struct kgsl_snapshot_registers pre_cdregs = {
621 .regs = a6xx_pre_crashdumper_registers,
622 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
623 };
624
625 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
626}
627
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530628static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
629 u8 *buf, size_t remain, void *priv)
630{
631 struct kgsl_snapshot_shader *header =
632 (struct kgsl_snapshot_shader *) buf;
633 struct a6xx_shader_block_info *info =
634 (struct a6xx_shader_block_info *) priv;
635 struct a6xx_shader_block *block = info->block;
636 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
637
638 if (remain < SHADER_SECTION_SZ(block->sz)) {
639 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
640 return 0;
641 }
642
643 header->type = block->statetype;
644 header->index = info->bank;
645 header->size = block->sz;
646
647 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
Lynus Vaz24f75eb2017-11-22 11:25:04 +0530648 block->sz * sizeof(unsigned int));
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530649
650 return SHADER_SECTION_SZ(block->sz);
651}
652
653static void a6xx_snapshot_shader(struct kgsl_device *device,
654 struct kgsl_snapshot *snapshot)
655{
656 unsigned int i, j;
657 struct a6xx_shader_block_info info;
658
659 /* Shader blocks can only be read by the crash dumper */
660 if (crash_dump_valid == false)
661 return;
662
663 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
664 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
665 info.block = &a6xx_shader_blocks[i];
666 info.bank = j;
667 info.offset = a6xx_shader_blocks[i].offset +
668 (j * a6xx_shader_blocks[i].sz);
669
670 /* Shader working/shadow memory */
671 kgsl_snapshot_add_section(device,
672 KGSL_SNAPSHOT_SECTION_SHADER,
673 snapshot, a6xx_snapshot_shader_memory, &info);
674 }
675 }
676}
677
Lynus Vaza5922742017-03-14 18:50:54 +0530678static void a6xx_snapshot_mempool(struct kgsl_device *device,
679 struct kgsl_snapshot *snapshot)
680{
681 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530682 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530683
Lynus Vazb8e43d52017-04-20 14:47:37 +0530684 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530685 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
686 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
687
688 kgsl_snapshot_indexed_registers(device, snapshot,
689 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
690 0, 0x2060);
691
Lynus Vazb8e43d52017-04-20 14:47:37 +0530692 /*
693 * Data at offset 0x2000 in the mempool section is the mempool size.
694 * Since we set it to 0, patch in the original size so that the data
695 * is consistent.
696 */
697 if (buf < snapshot->ptr) {
698 unsigned int *data;
699
700 /* Skip over the headers */
701 buf += sizeof(struct kgsl_snapshot_section_header) +
702 sizeof(struct kgsl_snapshot_indexed_regs);
703
704 data = (unsigned int *)buf + 0x2000;
705 *data = pool_size;
706 }
707
Lynus Vaza5922742017-03-14 18:50:54 +0530708 /* Restore the saved mempool size */
709 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
710}
711
Lynus Vaz461e2382017-01-16 19:35:41 +0530712static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
713 unsigned int regbase, unsigned int reg)
714{
715 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
716 reg - regbase / 4;
717 unsigned int val;
718
719 kgsl_regread(device, read_reg, &val);
720 return val;
721}
722
Lynus Vaz1e258612017-04-27 21:35:22 +0530723static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
724 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530725{
726 struct kgsl_snapshot_mvc_regs *header =
727 (struct kgsl_snapshot_mvc_regs *)buf;
728 struct a6xx_cluster_dbgahb_regs_info *info =
729 (struct a6xx_cluster_dbgahb_regs_info *)priv;
730 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
731 unsigned int read_sel;
732 unsigned int data_size = 0;
733 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
734 int i, j;
735
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600736 if (!device->snapshot_legacy)
737 return 0;
738
Lynus Vaz461e2382017-01-16 19:35:41 +0530739 if (remain < sizeof(*header)) {
740 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
741 return 0;
742 }
743
744 remain -= sizeof(*header);
745
746 header->ctxt_id = info->ctxt_id;
747 header->cluster_id = cur_cluster->id;
748
749 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
750 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
751
752 for (i = 0; i < cur_cluster->num_sets; i++) {
753 unsigned int start = cur_cluster->regs[2 * i];
754 unsigned int end = cur_cluster->regs[2 * i + 1];
755
756 if (remain < (end - start + 3) * 4) {
757 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
758 goto out;
759 }
760
761 remain -= (end - start + 3) * 4;
762 data_size += (end - start + 3) * 4;
763
764 *data++ = start | (1 << 31);
765 *data++ = end;
766
767 for (j = start; j <= end; j++) {
768 unsigned int val;
769
770 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
771 *data++ = val;
772
773 }
774 }
775
776out:
777 return data_size + sizeof(*header);
778}
779
Lynus Vaz1e258612017-04-27 21:35:22 +0530780static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
781 size_t remain, void *priv)
782{
783 struct kgsl_snapshot_mvc_regs *header =
784 (struct kgsl_snapshot_mvc_regs *)buf;
785 struct a6xx_cluster_dbgahb_regs_info *info =
786 (struct a6xx_cluster_dbgahb_regs_info *)priv;
787 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
788 unsigned int data_size = 0;
789 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
790 int i, j;
791 unsigned int *src;
792
793
794 if (crash_dump_valid == false)
795 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
796 info);
797
798 if (remain < sizeof(*header)) {
799 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
800 return 0;
801 }
802
803 remain -= sizeof(*header);
804
805 header->ctxt_id = info->ctxt_id;
806 header->cluster_id = cluster->id;
807
808 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
809 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
810
811 for (i = 0; i < cluster->num_sets; i++) {
812 unsigned int start;
813 unsigned int end;
814
815 start = cluster->regs[2 * i];
816 end = cluster->regs[2 * i + 1];
817
818 if (remain < (end - start + 3) * 4) {
819 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
820 goto out;
821 }
822
823 remain -= (end - start + 3) * 4;
824 data_size += (end - start + 3) * 4;
825
826 *data++ = start | (1 << 31);
827 *data++ = end;
828 for (j = start; j <= end; j++)
829 *data++ = *src++;
830 }
831out:
832 return data_size + sizeof(*header);
833}
834
Lynus Vaz461e2382017-01-16 19:35:41 +0530835static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
836 struct kgsl_snapshot *snapshot)
837{
838 int i, j;
839
840 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
841 struct a6xx_cluster_dbgahb_registers *cluster =
842 &a6xx_dbgahb_ctx_clusters[i];
843 struct a6xx_cluster_dbgahb_regs_info info;
844
845 info.cluster = cluster;
846 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
847 info.ctxt_id = j;
848
849 kgsl_snapshot_add_section(device,
850 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
851 a6xx_snapshot_cluster_dbgahb, &info);
852 }
853 }
Lynus Vaz461e2382017-01-16 19:35:41 +0530854}
855
Shrenuj Bansal41665402016-12-16 15:25:54 -0800856static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
857 size_t remain, void *priv)
858{
859 struct kgsl_snapshot_mvc_regs *header =
860 (struct kgsl_snapshot_mvc_regs *)buf;
861 struct a6xx_cluster_regs_info *info =
862 (struct a6xx_cluster_regs_info *)priv;
863 struct a6xx_cluster_registers *cur_cluster = info->cluster;
864 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
865 unsigned int ctxt = info->ctxt_id;
866 unsigned int start, end, i, j, aperture_cntl = 0;
867 unsigned int data_size = 0;
868
869 if (remain < sizeof(*header)) {
870 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
871 return 0;
872 }
873
874 remain -= sizeof(*header);
875
876 header->ctxt_id = info->ctxt_id;
877 header->cluster_id = cur_cluster->id;
878
879 /*
880 * Set the AHB control for the Host to read from the
881 * cluster/context for this iteration.
882 */
883 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
884 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
885
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600886 if (cur_cluster->sel)
887 kgsl_regwrite(device, cur_cluster->sel->host_reg,
888 cur_cluster->sel->val);
889
Shrenuj Bansal41665402016-12-16 15:25:54 -0800890 for (i = 0; i < cur_cluster->num_sets; i++) {
891 start = cur_cluster->regs[2 * i];
892 end = cur_cluster->regs[2 * i + 1];
893
894 if (remain < (end - start + 3) * 4) {
895 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
896 goto out;
897 }
898
899 remain -= (end - start + 3) * 4;
900 data_size += (end - start + 3) * 4;
901
902 *data++ = start | (1 << 31);
903 *data++ = end;
904 for (j = start; j <= end; j++) {
905 unsigned int val;
906
907 kgsl_regread(device, j, &val);
908 *data++ = val;
909 }
910 }
911out:
912 return data_size + sizeof(*header);
913}
914
915static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
916 size_t remain, void *priv)
917{
918 struct kgsl_snapshot_mvc_regs *header =
919 (struct kgsl_snapshot_mvc_regs *)buf;
920 struct a6xx_cluster_regs_info *info =
921 (struct a6xx_cluster_regs_info *)priv;
922 struct a6xx_cluster_registers *cluster = info->cluster;
923 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
924 unsigned int *src;
925 int i, j;
926 unsigned int start, end;
927 size_t data_size = 0;
928
929 if (crash_dump_valid == false)
930 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
931
932 if (remain < sizeof(*header)) {
933 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
934 return 0;
935 }
936
937 remain -= sizeof(*header);
938
939 header->ctxt_id = info->ctxt_id;
940 header->cluster_id = cluster->id;
941
942 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
943 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
944
945 for (i = 0; i < cluster->num_sets; i++) {
946 start = cluster->regs[2 * i];
947 end = cluster->regs[2 * i + 1];
948
949 if (remain < (end - start + 3) * 4) {
950 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
951 goto out;
952 }
953
954 remain -= (end - start + 3) * 4;
955 data_size += (end - start + 3) * 4;
956
957 *data++ = start | (1 << 31);
958 *data++ = end;
959 for (j = start; j <= end; j++)
960 *data++ = *src++;
961 }
962
963out:
964 return data_size + sizeof(*header);
965
966}
967
968static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
969 struct kgsl_snapshot *snapshot)
970{
971 int i, j;
972 struct a6xx_cluster_regs_info info;
973
974 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
975 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
976
977 info.cluster = cluster;
978 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
979 info.ctxt_id = j;
980
981 kgsl_snapshot_add_section(device,
982 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
983 a6xx_snapshot_mvc, &info);
984 }
985 }
986}
987
Lynus Vaz20c81272017-02-10 16:22:12 +0530988/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
989static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
990 unsigned int block_id, unsigned int index, unsigned int *val)
991{
992 unsigned int reg;
993
994 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
995 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
996
997 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
998 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
999 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1000 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1001
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001002 /*
1003 * There needs to be a delay of 1 us to ensure enough time for correct
1004 * data is funneled into the trace buffer
1005 */
1006 udelay(1);
1007
Lynus Vaz20c81272017-02-10 16:22:12 +05301008 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1009 val++;
1010 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1011}
1012
Lynus Vazdaac540732017-07-27 14:23:35 +05301013/* a6xx_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
Lynus Vaz20c81272017-02-10 16:22:12 +05301014static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1015 u8 *buf, size_t remain, void *priv)
1016{
Lynus Vazecd472c2017-04-18 14:15:57 +05301017 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301018 struct kgsl_snapshot_debugbus *header =
1019 (struct kgsl_snapshot_debugbus *)buf;
1020 struct adreno_debugbus_block *block = priv;
1021 int i;
1022 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1023 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301024 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301025 size_t size;
1026
1027 dwords = block->dwords;
1028
1029 /* For a6xx each debug bus data unit is 2 DWORDS */
1030 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1031
1032 if (remain < size) {
1033 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1034 return 0;
1035 }
1036
1037 header->id = block->block_id;
1038 header->count = dwords * 2;
1039
Lynus Vazecd472c2017-04-18 14:15:57 +05301040 block_id = block->block_id;
1041 /* GMU_GX data is read using the GMU_CX block id on A630 */
1042 if (adreno_is_a630(adreno_dev) &&
1043 (block_id == A6XX_DBGBUS_GMU_GX))
1044 block_id = A6XX_DBGBUS_GMU_CX;
1045
Lynus Vaz20c81272017-02-10 16:22:12 +05301046 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301047 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301048
1049 return size;
1050}
1051
Lynus Vazdaac540732017-07-27 14:23:35 +05301052/* a6xx_snapshot_vbif_debugbus_block() - Capture debug data for VBIF block */
1053static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
1054 u8 *buf, size_t remain, void *priv)
1055{
1056 struct kgsl_snapshot_debugbus *header =
1057 (struct kgsl_snapshot_debugbus *)buf;
1058 struct adreno_debugbus_block *block = priv;
1059 int i, j;
1060 /*
1061 * Total number of VBIF data words considering 3 sections:
1062 * 2 arbiter blocks of 16 words
1063 * 5 AXI XIN blocks of 18 dwords each
1064 * 4 core clock side XIN blocks of 12 dwords each
1065 */
1066 unsigned int dwords = (16 * A6XX_NUM_AXI_ARB_BLOCKS) +
1067 (18 * A6XX_NUM_XIN_AXI_BLOCKS) +
1068 (12 * A6XX_NUM_XIN_CORE_BLOCKS);
1069 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1070 size_t size;
1071 unsigned int reg_clk;
1072
1073 size = (dwords * sizeof(unsigned int)) + sizeof(*header);
1074
1075 if (remain < size) {
1076 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1077 return 0;
1078 }
1079 header->id = block->block_id;
1080 header->count = dwords;
1081
1082 kgsl_regread(device, A6XX_VBIF_CLKON, &reg_clk);
1083 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk |
1084 (A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK <<
1085 A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT));
1086 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 0);
1087 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS_OUT_CTRL,
1088 (A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK <<
1089 A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT));
1090
1091 for (i = 0; i < A6XX_NUM_AXI_ARB_BLOCKS; i++) {
1092 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0,
1093 (1 << (i + 16)));
1094 for (j = 0; j < 16; j++) {
1095 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1096 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1097 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1098 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1099 data);
1100 data++;
1101 }
1102 }
1103
1104 /* XIN blocks AXI side */
1105 for (i = 0; i < A6XX_NUM_XIN_AXI_BLOCKS; i++) {
1106 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
1107 for (j = 0; j < 18; j++) {
1108 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1109 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1110 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1111 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1112 data);
1113 data++;
1114 }
1115 }
1116 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 0);
1117
1118 /* XIN blocks core clock side */
1119 for (i = 0; i < A6XX_NUM_XIN_CORE_BLOCKS; i++) {
1120 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
1121 for (j = 0; j < 12; j++) {
1122 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL1,
1123 ((j & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
1124 << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
1125 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1126 data);
1127 data++;
1128 }
1129 }
1130 /* restore the clock of VBIF */
1131 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk);
1132 return size;
1133}
1134
Lynus Vazff24c972017-03-07 19:27:46 +05301135/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1136static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1137 unsigned int block_id, unsigned int index, unsigned int *val)
1138{
1139 unsigned int reg;
1140
1141 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1142 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1143
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301144 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1145 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1146 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1147 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
Lynus Vazff24c972017-03-07 19:27:46 +05301148
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001149 /*
1150 * There needs to be a delay of 1 us to ensure enough time for correct
1151 * data is funneled into the trace buffer
1152 */
1153 udelay(1);
1154
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301155 adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
Lynus Vazff24c972017-03-07 19:27:46 +05301156 val++;
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301157 adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
Lynus Vazff24c972017-03-07 19:27:46 +05301158}
1159
1160/*
1161 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1162 * block from the CX DBGC block
1163 */
1164static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1165 u8 *buf, size_t remain, void *priv)
1166{
1167 struct kgsl_snapshot_debugbus *header =
1168 (struct kgsl_snapshot_debugbus *)buf;
1169 struct adreno_debugbus_block *block = priv;
1170 int i;
1171 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1172 unsigned int dwords;
1173 size_t size;
1174
1175 dwords = block->dwords;
1176
1177 /* For a6xx each debug bus data unit is 2 DWRODS */
1178 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1179
1180 if (remain < size) {
1181 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1182 return 0;
1183 }
1184
1185 header->id = block->block_id;
1186 header->count = dwords * 2;
1187
1188 for (i = 0; i < dwords; i++)
1189 a6xx_cx_debug_bus_read(device, block->block_id, i,
1190 &data[i*2]);
1191
1192 return size;
1193}
1194
Lynus Vaz20c81272017-02-10 16:22:12 +05301195/* a6xx_snapshot_debugbus() - Capture debug bus data */
1196static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1197 struct kgsl_snapshot *snapshot)
1198{
1199 int i;
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301200 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301201
1202 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1203 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001204 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1205 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301206
1207 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1208 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1209
1210 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1211 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1212 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1213 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1214
1215 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1216 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1217 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1218 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1219 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1220 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1221 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1222 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1223 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1224 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1225 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1226 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1227 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1228 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1229 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1230 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1231 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1232 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1233
1234 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1235 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1236 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1237 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1238
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301239 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
Lynus Vazff24c972017-03-07 19:27:46 +05301240 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001241 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1242 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301243
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301244 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1245 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
Lynus Vazff24c972017-03-07 19:27:46 +05301246
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301247 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1248 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1249 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1250 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
Lynus Vazff24c972017-03-07 19:27:46 +05301251
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301252 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1253 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1254 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1255 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1256 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1257 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1258 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1259 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1260 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1261 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1262 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1263 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1264 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1265 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1266 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1267 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1268 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1269 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301270
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301271 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1272 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1273 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1274 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
Lynus Vazff24c972017-03-07 19:27:46 +05301275
Lynus Vaz20c81272017-02-10 16:22:12 +05301276 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1277 kgsl_snapshot_add_section(device,
1278 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1279 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1280 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1281 }
Rajesh Kemisettib36bb492017-11-20 10:49:27 +05301282 /*
1283 * GBIF has same debugbus as of other GPU blocks hence fall back to
1284 * default path if GPU uses GBIF.
1285 * GBIF uses exactly same ID as of VBIF so use it as it is.
1286 */
1287 if (adreno_has_gbif(adreno_dev))
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301288 kgsl_snapshot_add_section(device,
Rajesh Kemisettib36bb492017-11-20 10:49:27 +05301289 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1290 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1291 (void *) &a6xx_vbif_debugbus_blocks);
1292 else
1293 kgsl_snapshot_add_section(device,
1294 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1295 snapshot, a6xx_snapshot_vbif_debugbus_block,
1296 (void *) &a6xx_vbif_debugbus_blocks);
Lynus Vazdaac540732017-07-27 14:23:35 +05301297
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301298 /* Dump the CX debugbus data if the block exists */
1299 if (adreno_is_cx_dbgc_register(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A)) {
Lynus Vazff24c972017-03-07 19:27:46 +05301300 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1301 kgsl_snapshot_add_section(device,
1302 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1303 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1304 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
Rajesh Kemisettib36bb492017-11-20 10:49:27 +05301305 /*
1306 * Get debugbus for GBIF CX part if GPU has GBIF block
1307 * GBIF uses exactly same ID as of VBIF so use
1308 * it as it is.
1309 */
1310 if (adreno_has_gbif(adreno_dev))
1311 kgsl_snapshot_add_section(device,
1312 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1313 snapshot,
1314 a6xx_snapshot_cx_dbgc_debugbus_block,
1315 (void *) &a6xx_vbif_debugbus_blocks);
Lynus Vazff24c972017-03-07 19:27:46 +05301316 }
Lynus Vazff24c972017-03-07 19:27:46 +05301317 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301318}
1319
Carter Cooperb88b7082017-09-14 09:03:26 -06001320/*
1321 * a6xx_snapshot_gmu() - A6XX GMU snapshot function
1322 * @adreno_dev: Device being snapshotted
1323 * @snapshot: Pointer to the snapshot instance
1324 *
1325 * This is where all of the A6XX GMU specific bits and pieces are grabbed
1326 * into the snapshot memory
1327 */
1328void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
Kyle Piefer60733aa2017-03-21 11:24:01 -07001329 struct kgsl_snapshot *snapshot)
1330{
Carter Cooperb88b7082017-09-14 09:03:26 -06001331 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
George Shen1d447b02017-07-12 13:40:28 -07001332 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
George Sheneb93bd32017-10-11 15:52:53 -07001333 unsigned int val;
George Shen1d447b02017-07-12 13:40:28 -07001334
Kyle Piefer60733aa2017-03-21 11:24:01 -07001335 if (!kgsl_gmu_isenabled(device))
1336 return;
1337
Lynus Vazd37f1d82017-05-24 16:39:15 +05301338 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1339 ARRAY_SIZE(a6xx_gmu_registers) / 2);
George Shen1d447b02017-07-12 13:40:28 -07001340
George Sheneb93bd32017-10-11 15:52:53 -07001341 if (gpudev->gx_is_on(adreno_dev)) {
1342 /* Set fence to ALLOW mode so registers can be read */
1343 kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
1344 kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
1345
1346 KGSL_DRV_ERR(device, "set FENCE to ALLOW mode:%x\n", val);
George Shen1d447b02017-07-12 13:40:28 -07001347 adreno_snapshot_registers(device, snapshot,
1348 a6xx_gmu_gx_registers,
1349 ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
George Sheneb93bd32017-10-11 15:52:53 -07001350 }
Kyle Piefer60733aa2017-03-21 11:24:01 -07001351}
1352
Lynus Vaz85150052017-02-21 17:57:48 +05301353/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1354static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1355 size_t remain, void *priv)
1356{
1357 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1358 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1359 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1360 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1361
1362 if (remain < DEBUG_SECTION_SZ(1)) {
1363 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1364 return 0;
1365 }
1366
1367 /* Dump the SQE firmware version */
1368 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1369 header->size = 1;
1370 *data = fw->version;
1371
1372 return DEBUG_SECTION_SZ(1);
1373}
1374
Shrenuj Bansal41665402016-12-16 15:25:54 -08001375static void _a6xx_do_crashdump(struct kgsl_device *device)
1376{
1377 unsigned long wait_time;
1378 unsigned int reg = 0;
1379 unsigned int val;
1380
1381 crash_dump_valid = false;
1382
Lynus Vaz0a06efd2017-09-13 20:21:07 +05301383 if (!device->snapshot_crashdumper)
1384 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001385 if (a6xx_capturescript.gpuaddr == 0 ||
1386 a6xx_crashdump_registers.gpuaddr == 0)
1387 return;
1388
1389 /* IF the SMMU is stalled we cannot do a crash dump */
1390 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1391 if (val & BIT(24))
1392 return;
1393
1394 /* Turn on APRIV so we can access the buffers */
1395 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1396
1397 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1398 lower_32_bits(a6xx_capturescript.gpuaddr));
1399 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1400 upper_32_bits(a6xx_capturescript.gpuaddr));
1401 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1402
1403 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1404 while (!time_after(jiffies, wait_time)) {
1405 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1406 if (reg & 0x2)
1407 break;
1408 cpu_relax();
1409 }
1410
1411 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1412
1413 if (!(reg & 0x2)) {
1414 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1415 return;
1416 }
1417
1418 crash_dump_valid = true;
1419}
1420
1421/*
1422 * a6xx_snapshot() - A6XX GPU snapshot function
1423 * @adreno_dev: Device being snapshotted
1424 * @snapshot: Pointer to the snapshot instance
1425 *
1426 * This is where all of the A6XX specific bits and pieces are grabbed
1427 * into the snapshot memory
1428 */
1429void a6xx_snapshot(struct adreno_device *adreno_dev,
1430 struct kgsl_snapshot *snapshot)
1431{
1432 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1433 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1434 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001435 bool sptprac_on;
Lynus Vaz96de8522017-09-13 20:17:03 +05301436 unsigned int i;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001437
Kyle Pieferda0fa542017-08-04 13:39:40 -07001438 /* GMU TCM data dumped through AHB */
1439 a6xx_snapshot_gmu(adreno_dev, snapshot);
1440
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001441 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1442
1443 /* Return if the GX is off */
Carter Cooperb88b7082017-09-14 09:03:26 -06001444 if (!gpudev->gx_is_on(adreno_dev))
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001445 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001446
Lynus Vaz030473e2017-06-22 17:33:06 +05301447 /* Dump the registers which get affected by crash dumper trigger */
1448 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1449 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1450
1451 /* Dump vbif registers as well which get affected by crash dumper */
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301452 if (!adreno_has_gbif(adreno_dev))
1453 adreno_snapshot_vbif_registers(device, snapshot,
1454 a6xx_vbif_snapshot_registers,
1455 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
Rajesh Kemisettib36bb492017-11-20 10:49:27 +05301456 else
1457 adreno_snapshot_registers(device, snapshot,
1458 a6xx_gbif_registers,
1459 ARRAY_SIZE(a6xx_gbif_registers) / 2);
Lynus Vaz030473e2017-06-22 17:33:06 +05301460
Shrenuj Bansal41665402016-12-16 15:25:54 -08001461 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001462 if (sptprac_on)
1463 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001464
Lynus Vaz96de8522017-09-13 20:17:03 +05301465 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1466 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1467 snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]);
1468 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001469
Shrenuj Bansal41665402016-12-16 15:25:54 -08001470 /* CP_SQE indexed registers */
1471 kgsl_snapshot_indexed_registers(device, snapshot,
1472 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1473 0, snap_data->sect_sizes->cp_pfp);
1474
1475 /* CP_DRAW_STATE */
1476 kgsl_snapshot_indexed_registers(device, snapshot,
1477 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1478 0, 0x100);
1479
1480 /* SQE_UCODE Cache */
1481 kgsl_snapshot_indexed_registers(device, snapshot,
1482 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1483 0, 0x6000);
1484
1485 /* CP ROQ */
1486 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1487 snapshot, adreno_snapshot_cp_roq,
1488 &snap_data->sect_sizes->roq);
1489
Lynus Vaz85150052017-02-21 17:57:48 +05301490 /* SQE Firmware */
1491 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1492 snapshot, a6xx_snapshot_sqe, NULL);
1493
Lynus Vaza5922742017-03-14 18:50:54 +05301494 /* Mempool debug data */
1495 a6xx_snapshot_mempool(device, snapshot);
1496
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001497 if (sptprac_on) {
1498 /* Shader memory */
1499 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301500
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001501 /* MVC register section */
1502 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001503
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001504 /* registers dumped through DBG AHB */
1505 a6xx_snapshot_dbgahb_regs(device, snapshot);
1506 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301507
Lynus Vaz20c81272017-02-10 16:22:12 +05301508 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001509
Shrenuj Bansal41665402016-12-16 15:25:54 -08001510}
1511
1512static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1513{
1514 int qwords = 0;
1515 unsigned int i, j, k;
1516 unsigned int count;
1517
1518 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1519 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1520
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001521 if (cluster->sel) {
1522 ptr[qwords++] = cluster->sel->val;
1523 ptr[qwords++] = ((uint64_t)cluster->sel->cd_reg << 44) |
1524 (1 << 21) | 1;
1525 }
1526
Shrenuj Bansal41665402016-12-16 15:25:54 -08001527 cluster->offset0 = *offset;
1528 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1529
1530 if (j == 1)
1531 cluster->offset1 = *offset;
1532
1533 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1534 ptr[qwords++] =
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001535 ((uint64_t)A6XX_CP_APERTURE_CNTL_CD << 44) |
Shrenuj Bansal41665402016-12-16 15:25:54 -08001536 (1 << 21) | 1;
1537
1538 for (k = 0; k < cluster->num_sets; k++) {
1539 count = REG_PAIR_COUNT(cluster->regs, k);
1540 ptr[qwords++] =
1541 a6xx_crashdump_registers.gpuaddr + *offset;
1542 ptr[qwords++] =
1543 (((uint64_t)cluster->regs[2 * k]) << 44) |
1544 count;
1545
1546 *offset += count * sizeof(unsigned int);
1547 }
1548 }
1549 }
1550
1551 return qwords;
1552}
1553
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301554static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1555 uint64_t *ptr, uint64_t *offset)
1556{
1557 int qwords = 0;
1558 unsigned int j;
1559
1560 /* Capture each bank in the block */
1561 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1562 /* Program the aperture */
1563 ptr[qwords++] =
1564 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1565 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1566 (1 << 21) | 1;
1567
1568 /* Read all the data in one chunk */
1569 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1570 ptr[qwords++] =
1571 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1572 block->sz;
1573
1574 /* Remember the offset of the first bank for easy access */
1575 if (j == 0)
1576 block->offset = *offset;
1577
1578 *offset += block->sz * sizeof(unsigned int);
1579 }
1580
1581 return qwords;
1582}
1583
Lynus Vaz1e258612017-04-27 21:35:22 +05301584static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1585{
1586 int qwords = 0;
1587 unsigned int i, j, k;
1588 unsigned int count;
1589
1590 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1591 struct a6xx_cluster_dbgahb_registers *cluster =
1592 &a6xx_dbgahb_ctx_clusters[i];
1593
1594 cluster->offset0 = *offset;
1595
1596 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1597 if (j == 1)
1598 cluster->offset1 = *offset;
1599
1600 /* Program the aperture */
1601 ptr[qwords++] =
1602 ((cluster->statetype + j * 2) & 0xff) << 8;
1603 ptr[qwords++] =
1604 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1605 (1 << 21) | 1;
1606
1607 for (k = 0; k < cluster->num_sets; k++) {
1608 unsigned int start = cluster->regs[2 * k];
1609
1610 count = REG_PAIR_COUNT(cluster->regs, k);
1611 ptr[qwords++] =
1612 a6xx_crashdump_registers.gpuaddr + *offset;
1613 ptr[qwords++] =
1614 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1615 start - cluster->regbase / 4) << 44)) |
1616 count;
1617
1618 *offset += count * sizeof(unsigned int);
1619 }
1620 }
1621 }
1622 return qwords;
1623}
1624
Shrenuj Bansal41665402016-12-16 15:25:54 -08001625void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1626{
1627 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1628 unsigned int script_size = 0;
1629 unsigned int data_size = 0;
1630 unsigned int i, j, k;
1631 uint64_t *ptr;
1632 uint64_t offset = 0;
1633
1634 if (a6xx_capturescript.gpuaddr != 0 &&
1635 a6xx_crashdump_registers.gpuaddr != 0)
1636 return;
1637
1638 /*
1639 * We need to allocate two buffers:
1640 * 1 - the buffer to hold the draw script
1641 * 2 - the buffer to hold the data
1642 */
1643
1644 /*
1645 * To save the registers, we need 16 bytes per register pair for the
1646 * script and a dword for each register in the data
1647 */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001648 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1649 struct reg_list *regs = &a6xx_reg_list[i];
1650
1651 /* 16 bytes for programming the aperture */
1652 if (regs->sel)
1653 script_size += 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001654
1655 /* Each pair needs 16 bytes (2 qwords) */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001656 script_size += regs->count * 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001657
1658 /* Each register needs a dword in the data */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001659 for (j = 0; j < regs->count; j++)
Shrenuj Bansal41665402016-12-16 15:25:54 -08001660 data_size += REG_PAIR_COUNT(regs->regs, j) *
1661 sizeof(unsigned int);
1662
1663 }
1664
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301665 /*
1666 * To save the shader blocks for each block in each type we need 32
1667 * bytes for the script (16 bytes to program the aperture and 16 to
1668 * read the data) and then a block specific number of bytes to hold
1669 * the data
1670 */
1671 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1672 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1673 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1674 A6XX_NUM_SHADER_BANKS;
1675 }
1676
Shrenuj Bansal41665402016-12-16 15:25:54 -08001677 /* Calculate the script and data size for MVC registers */
1678 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1679 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1680
1681 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1682
1683 /* 16 bytes for programming the aperture */
1684 script_size += 16;
1685
1686 /* Reading each pair of registers takes 16 bytes */
1687 script_size += 16 * cluster->num_sets;
1688
1689 /* A dword per register read from the cluster list */
1690 for (k = 0; k < cluster->num_sets; k++)
1691 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1692 sizeof(unsigned int);
1693 }
1694 }
1695
Lynus Vaz1e258612017-04-27 21:35:22 +05301696 /* Calculate the script and data size for debug AHB registers */
1697 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1698 struct a6xx_cluster_dbgahb_registers *cluster =
1699 &a6xx_dbgahb_ctx_clusters[i];
1700
1701 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1702
1703 /* 16 bytes for programming the aperture */
1704 script_size += 16;
1705
1706 /* Reading each pair of registers takes 16 bytes */
1707 script_size += 16 * cluster->num_sets;
1708
1709 /* A dword per register read from the cluster list */
1710 for (k = 0; k < cluster->num_sets; k++)
1711 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1712 sizeof(unsigned int);
1713 }
1714 }
1715
Shrenuj Bansal41665402016-12-16 15:25:54 -08001716 /* Now allocate the script and data buffers */
1717
1718 /* The script buffers needs 2 extra qwords on the end */
1719 if (kgsl_allocate_global(device, &a6xx_capturescript,
1720 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1721 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1722 return;
1723
1724 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1725 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1726 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1727 return;
1728 }
1729
1730 /* Build the crash script */
1731
1732 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1733
1734 /* For the registers, program a read command for each pair */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001735 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1736 struct reg_list *regs = &a6xx_reg_list[i];
Shrenuj Bansal41665402016-12-16 15:25:54 -08001737
Lynus Vaz1bba57b2017-09-26 11:55:04 +05301738 regs->offset = offset;
1739
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001740 /* Program the SEL_CNTL_CD register appropriately */
1741 if (regs->sel) {
1742 *ptr++ = regs->sel->val;
1743 *ptr++ = (((uint64_t)regs->sel->cd_reg << 44)) |
1744 (1 << 21) | 1;
1745 }
1746
1747 for (j = 0; j < regs->count; j++) {
Shrenuj Bansal41665402016-12-16 15:25:54 -08001748 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1749 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1750 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1751 offset += r * sizeof(unsigned int);
1752 }
1753 }
1754
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301755 /* Program each shader block */
1756 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1757 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1758 &offset);
1759 }
1760
Shrenuj Bansal41665402016-12-16 15:25:54 -08001761 /* Program the capturescript for the MVC regsiters */
1762 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1763
Lynus Vaz1e258612017-04-27 21:35:22 +05301764 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1765
Shrenuj Bansal41665402016-12-16 15:25:54 -08001766 *ptr++ = 0;
1767 *ptr++ = 0;
1768}