blob: 1d3fe0085beafa18068af3c056a5c0e698df3a07 [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
Lynus Vazdaac540732017-07-27 14:23:35 +053024#define A6XX_NUM_AXI_ARB_BLOCKS 2
25#define A6XX_NUM_XIN_AXI_BLOCKS 5
26#define A6XX_NUM_XIN_CORE_BLOCKS 4
Shrenuj Bansal41665402016-12-16 15:25:54 -080027
28static const unsigned int a6xx_gras_cluster[] = {
29 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
30 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
31 0x8400, 0x840B,
32};
33
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060034static const unsigned int a6xx_ps_cluster_rac[] = {
Shrenuj Bansal41665402016-12-16 15:25:54 -080035 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
36 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060037 0x88C0, 0x88C1, 0x88D0, 0x88E3, 0x8900, 0x890C, 0x890F, 0x891A,
38 0x8C00, 0x8C01, 0x8C08, 0x8C10, 0x8C17, 0x8C1F, 0x8C26, 0x8C33,
39};
40
41static const unsigned int a6xx_ps_cluster_rbp[] = {
42 0x88F0, 0x88F3, 0x890D, 0x890E, 0x8927, 0x8928, 0x8BF0, 0x8BF1,
43 0x8C02, 0x8C07, 0x8C11, 0x8C16, 0x8C20, 0x8C25,
44};
45
46static const unsigned int a6xx_ps_cluster[] = {
47 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
Shrenuj Bansal41665402016-12-16 15:25:54 -080048};
49
50static const unsigned int a6xx_fe_cluster[] = {
51 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
52 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
53};
54
55static const unsigned int a6xx_pc_vs_cluster[] = {
56 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
57};
58
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060059static const struct sel_reg {
60 unsigned int host_reg;
61 unsigned int cd_reg;
62 unsigned int val;
63} _a6xx_rb_rac_aperture = {
64 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
65 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
66 .val = 0x0,
67},
68_a6xx_rb_rbp_aperture = {
69 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
70 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
71 .val = 0x9,
72};
73
Shrenuj Bansal41665402016-12-16 15:25:54 -080074static struct a6xx_cluster_registers {
75 unsigned int id;
76 const unsigned int *regs;
77 unsigned int num_sets;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060078 const struct sel_reg *sel;
Shrenuj Bansal41665402016-12-16 15:25:54 -080079 unsigned int offset0;
80 unsigned int offset1;
81} a6xx_clusters[] = {
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060082 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2,
83 NULL },
84 { CP_CLUSTER_PS, a6xx_ps_cluster_rac, ARRAY_SIZE(a6xx_ps_cluster_rac)/2,
85 &_a6xx_rb_rac_aperture },
86 { CP_CLUSTER_PS, a6xx_ps_cluster_rbp, ARRAY_SIZE(a6xx_ps_cluster_rbp)/2,
87 &_a6xx_rb_rbp_aperture },
88 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2,
89 NULL },
90 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2,
91 NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080092 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060093 ARRAY_SIZE(a6xx_pc_vs_cluster)/2, NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080094};
95
96struct a6xx_cluster_regs_info {
97 struct a6xx_cluster_registers *cluster;
98 unsigned int ctxt_id;
99};
100
Lynus Vaz461e2382017-01-16 19:35:41 +0530101static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
102 0xB800, 0xB803, 0xB820, 0xB822,
103};
104
105static const unsigned int a6xx_sp_vs_sp_cluster[] = {
106 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
107 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
108};
109
110static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
111 0xBB10, 0xBB11, 0xBB20, 0xBB29,
112};
113
114static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
115 0xBD80, 0xBD80,
116};
117
118static const unsigned int a6xx_sp_duplicate_cluster[] = {
119 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
120};
121
122static const unsigned int a6xx_tp_duplicate_cluster[] = {
123 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
124};
125
126static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
127 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
128 0xB9C0, 0xB9C9,
129};
130
131static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
132 0xBD80, 0xBD80,
133};
134
135static const unsigned int a6xx_sp_ps_sp_cluster[] = {
136 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
137 0xAA00, 0xAA00, 0xAA30, 0xAA31,
138};
139
140static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
141 0xACC0, 0xACC0,
142};
143
144static const unsigned int a6xx_sp_ps_tp_cluster[] = {
145 0xB180, 0xB183, 0xB190, 0xB191,
146};
147
148static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
149 0xB4C0, 0xB4D1,
150};
151
152static struct a6xx_cluster_dbgahb_registers {
153 unsigned int id;
154 unsigned int regbase;
155 unsigned int statetype;
156 const unsigned int *regs;
157 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530158 unsigned int offset0;
159 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530160} a6xx_dbgahb_ctx_clusters[] = {
161 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
162 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
163 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
164 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700165 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530166 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
167 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
168 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700169 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530170 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700171 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530172 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700173 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530174 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700175 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530176 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700177 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530178 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700179 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530180 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700181 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530182 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700183 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530184 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700185 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530186 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700187 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530188 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700189 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530190 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
191};
192
193struct a6xx_cluster_dbgahb_regs_info {
194 struct a6xx_cluster_dbgahb_registers *cluster;
195 unsigned int ctxt_id;
196};
197
198static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
199 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
200 0xBE20, 0xBE23,
201};
202
203static const unsigned int a6xx_sp_non_ctx_registers[] = {
204 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
205 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
206};
207
208static const unsigned int a6xx_tp_non_ctx_registers[] = {
209 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
210};
211
212static struct a6xx_non_ctx_dbgahb_registers {
213 unsigned int regbase;
214 unsigned int statetype;
215 const unsigned int *regs;
216 unsigned int num_sets;
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600217 unsigned int offset;
Lynus Vaz461e2382017-01-16 19:35:41 +0530218} a6xx_non_ctx_dbgahb[] = {
219 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
220 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
221 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
222 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
223 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
224 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
225};
226
Shrenuj Bansal41665402016-12-16 15:25:54 -0800227static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
228 /* VBIF */
229 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
230 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
231 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
232 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
233 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
234 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
235 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
236 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
237 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
238 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
239 0x3410, 0x3410, 0x3800, 0x3801,
240};
241
George Shen1d447b02017-07-12 13:40:28 -0700242static const unsigned int a6xx_gmu_gx_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700243 /* GMU GX */
244 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
245 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
246 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
247 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
248 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
George Shen1d447b02017-07-12 13:40:28 -0700249};
250
251static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700252 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700253 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700254 /* GMU CX */
255 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
256 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
257 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
258 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
259 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
260 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
261 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
262 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
Lokesh Batrac367dc92017-08-24 13:40:32 -0700263 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA01,
Kyle Pieferbce21702017-06-08 09:21:28 -0700264 /* GPU RSCC */
George Shen6927d8f2017-07-19 11:38:10 -0700265 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747,
266 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
267 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
Kyle Pieferbce21702017-06-08 09:21:28 -0700268 /* GMU AO */
269 0x23B00, 0x23B16, 0x23C00, 0x23C00,
270 /* GPU CC */
271 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
272 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
273 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
274 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
275 0x26000, 0x26002,
276 /* GPU CC ACD */
277 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700278};
279
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600280static const unsigned int a6xx_rb_rac_registers[] = {
281 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E10, 0x8E1C, 0x8E20, 0x8E25,
282 0x8E28, 0x8E28, 0x8E2C, 0x8E2F, 0x8E50, 0x8E52,
283};
284
285static const unsigned int a6xx_rb_rbp_registers[] = {
286 0x8E01, 0x8E01, 0x8E0C, 0x8E0C, 0x8E3B, 0x8E3E, 0x8E40, 0x8E43,
287 0x8E53, 0x8E5F, 0x8E70, 0x8E77,
288};
289
Shrenuj Bansal41665402016-12-16 15:25:54 -0800290static const struct adreno_vbif_snapshot_registers
291a6xx_vbif_snapshot_registers[] = {
292 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
293 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
294};
295
296/*
297 * Set of registers to dump for A6XX on snapshot.
298 * Registers in pairs - first value is the start offset, second
299 * is the stop offset (inclusive)
300 */
301
302static const unsigned int a6xx_registers[] = {
303 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530304 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
305 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
306 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530307 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
308 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
309 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800310 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530311 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
312 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
313 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
314 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
315 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
316 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
317 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800318 /* VSC */
319 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
320 /* UCHE */
321 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
322 0x0E38, 0x0E39,
323 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530324 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
325 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800326 /* VPC */
327 0x9600, 0x9604, 0x9624, 0x9637,
328 /* PC */
329 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
330 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
331 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
332 /* VFD */
333 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530334 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800335};
336
Lynus Vaz030473e2017-06-22 17:33:06 +0530337/*
338 * Set of registers to dump for A6XX before actually triggering crash dumper.
339 * Registers in pairs - first value is the start offset, second
340 * is the stop offset (inclusive)
341 */
342static const unsigned int a6xx_pre_crashdumper_registers[] = {
343 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
344 0x210, 0x213,
345 /* CP: CP_STATUS_1 */
346 0x825, 0x825,
347};
348
Lynus Vaz20c81272017-02-10 16:22:12 +0530349enum a6xx_debugbus_id {
350 A6XX_DBGBUS_CP = 0x1,
351 A6XX_DBGBUS_RBBM = 0x2,
352 A6XX_DBGBUS_VBIF = 0x3,
353 A6XX_DBGBUS_HLSQ = 0x4,
354 A6XX_DBGBUS_UCHE = 0x5,
355 A6XX_DBGBUS_DPM = 0x6,
356 A6XX_DBGBUS_TESS = 0x7,
357 A6XX_DBGBUS_PC = 0x8,
358 A6XX_DBGBUS_VFDP = 0x9,
359 A6XX_DBGBUS_VPC = 0xa,
360 A6XX_DBGBUS_TSE = 0xb,
361 A6XX_DBGBUS_RAS = 0xc,
362 A6XX_DBGBUS_VSC = 0xd,
363 A6XX_DBGBUS_COM = 0xe,
364 A6XX_DBGBUS_LRZ = 0x10,
365 A6XX_DBGBUS_A2D = 0x11,
366 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530367 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530368 A6XX_DBGBUS_RBP = 0x14,
369 A6XX_DBGBUS_DCS = 0x15,
370 A6XX_DBGBUS_RBBM_CFG = 0x16,
371 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530372 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530373 A6XX_DBGBUS_TPFCHE = 0x19,
374 A6XX_DBGBUS_GPC = 0x1d,
375 A6XX_DBGBUS_LARC = 0x1e,
376 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
377 A6XX_DBGBUS_RB_0 = 0x20,
378 A6XX_DBGBUS_RB_1 = 0x21,
379 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
380 A6XX_DBGBUS_CCU_0 = 0x28,
381 A6XX_DBGBUS_CCU_1 = 0x29,
382 A6XX_DBGBUS_VFD_0 = 0x38,
383 A6XX_DBGBUS_VFD_1 = 0x39,
384 A6XX_DBGBUS_VFD_2 = 0x3a,
385 A6XX_DBGBUS_VFD_3 = 0x3b,
386 A6XX_DBGBUS_SP_0 = 0x40,
387 A6XX_DBGBUS_SP_1 = 0x41,
388 A6XX_DBGBUS_TPL1_0 = 0x48,
389 A6XX_DBGBUS_TPL1_1 = 0x49,
390 A6XX_DBGBUS_TPL1_2 = 0x4a,
391 A6XX_DBGBUS_TPL1_3 = 0x4b,
392};
393
394static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
395 { A6XX_DBGBUS_CP, 0x100, },
396 { A6XX_DBGBUS_RBBM, 0x100, },
397 { A6XX_DBGBUS_HLSQ, 0x100, },
398 { A6XX_DBGBUS_UCHE, 0x100, },
399 { A6XX_DBGBUS_DPM, 0x100, },
400 { A6XX_DBGBUS_TESS, 0x100, },
401 { A6XX_DBGBUS_PC, 0x100, },
402 { A6XX_DBGBUS_VFDP, 0x100, },
403 { A6XX_DBGBUS_VPC, 0x100, },
404 { A6XX_DBGBUS_TSE, 0x100, },
405 { A6XX_DBGBUS_RAS, 0x100, },
406 { A6XX_DBGBUS_VSC, 0x100, },
407 { A6XX_DBGBUS_COM, 0x100, },
408 { A6XX_DBGBUS_LRZ, 0x100, },
409 { A6XX_DBGBUS_A2D, 0x100, },
410 { A6XX_DBGBUS_CCUFCHE, 0x100, },
411 { A6XX_DBGBUS_RBP, 0x100, },
412 { A6XX_DBGBUS_DCS, 0x100, },
413 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530414 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530415 { A6XX_DBGBUS_TPFCHE, 0x100, },
416 { A6XX_DBGBUS_GPC, 0x100, },
417 { A6XX_DBGBUS_LARC, 0x100, },
418 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
419 { A6XX_DBGBUS_RB_0, 0x100, },
420 { A6XX_DBGBUS_RB_1, 0x100, },
421 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
422 { A6XX_DBGBUS_CCU_0, 0x100, },
423 { A6XX_DBGBUS_CCU_1, 0x100, },
424 { A6XX_DBGBUS_VFD_0, 0x100, },
425 { A6XX_DBGBUS_VFD_1, 0x100, },
426 { A6XX_DBGBUS_VFD_2, 0x100, },
427 { A6XX_DBGBUS_VFD_3, 0x100, },
428 { A6XX_DBGBUS_SP_0, 0x100, },
429 { A6XX_DBGBUS_SP_1, 0x100, },
430 { A6XX_DBGBUS_TPL1_0, 0x100, },
431 { A6XX_DBGBUS_TPL1_1, 0x100, },
432 { A6XX_DBGBUS_TPL1_2, 0x100, },
433 { A6XX_DBGBUS_TPL1_3, 0x100, },
434};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800435
Lynus Vazdaac540732017-07-27 14:23:35 +0530436static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
437 A6XX_DBGBUS_VBIF, 0x100,
438};
439
Lynus Vazff24c972017-03-07 19:27:46 +0530440static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
Lynus Vazecd472c2017-04-18 14:15:57 +0530441 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530442 { A6XX_DBGBUS_CX, 0x100, },
443};
444
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530445#define A6XX_NUM_SHADER_BANKS 3
446#define A6XX_SHADER_STATETYPE_SHIFT 8
447
448enum a6xx_shader_obj {
449 A6XX_TP0_TMO_DATA = 0x9,
450 A6XX_TP0_SMO_DATA = 0xa,
451 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
452 A6XX_TP1_TMO_DATA = 0x19,
453 A6XX_TP1_SMO_DATA = 0x1a,
454 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
455 A6XX_SP_INST_DATA = 0x29,
456 A6XX_SP_LB_0_DATA = 0x2a,
457 A6XX_SP_LB_1_DATA = 0x2b,
458 A6XX_SP_LB_2_DATA = 0x2c,
459 A6XX_SP_LB_3_DATA = 0x2d,
460 A6XX_SP_LB_4_DATA = 0x2e,
461 A6XX_SP_LB_5_DATA = 0x2f,
462 A6XX_SP_CB_BINDLESS_DATA = 0x30,
463 A6XX_SP_CB_LEGACY_DATA = 0x31,
464 A6XX_SP_UAV_DATA = 0x32,
465 A6XX_SP_INST_TAG = 0x33,
466 A6XX_SP_CB_BINDLESS_TAG = 0x34,
467 A6XX_SP_TMO_UMO_TAG = 0x35,
468 A6XX_SP_SMO_TAG = 0x36,
469 A6XX_SP_STATE_DATA = 0x37,
470 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
471 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
472 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
473 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
474 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
475 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
476 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
477 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
478 A6XX_HLSQ_INST_RAM = 0x52,
479 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
480 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
481 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
482 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
483 A6XX_HLSQ_INST_RAM_TAG = 0x57,
484 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
485 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
486 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
487 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
488 A6XX_HLSQ_DATAPATH_META = 0x60,
489 A6XX_HLSQ_FRONTEND_META = 0x61,
490 A6XX_HLSQ_INDIRECT_META = 0x62,
491 A6XX_HLSQ_BACKEND_META = 0x63
492};
493
494struct a6xx_shader_block {
495 unsigned int statetype;
496 unsigned int sz;
497 uint64_t offset;
498};
499
500struct a6xx_shader_block_info {
501 struct a6xx_shader_block *block;
502 unsigned int bank;
503 uint64_t offset;
504};
505
506static struct a6xx_shader_block a6xx_shader_blocks[] = {
507 {A6XX_TP0_TMO_DATA, 0x200},
508 {A6XX_TP0_SMO_DATA, 0x80,},
509 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
510 {A6XX_TP1_TMO_DATA, 0x200},
511 {A6XX_TP1_SMO_DATA, 0x80,},
512 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
513 {A6XX_SP_INST_DATA, 0x800},
514 {A6XX_SP_LB_0_DATA, 0x800},
515 {A6XX_SP_LB_1_DATA, 0x800},
516 {A6XX_SP_LB_2_DATA, 0x800},
517 {A6XX_SP_LB_3_DATA, 0x800},
518 {A6XX_SP_LB_4_DATA, 0x800},
519 {A6XX_SP_LB_5_DATA, 0x200},
520 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
521 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
522 {A6XX_SP_UAV_DATA, 0x80,},
523 {A6XX_SP_INST_TAG, 0x80,},
524 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
525 {A6XX_SP_TMO_UMO_TAG, 0x80,},
526 {A6XX_SP_SMO_TAG, 0x80},
527 {A6XX_SP_STATE_DATA, 0x3F},
528 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
529 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
530 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
531 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
532 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
533 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
534 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
535 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
536 {A6XX_HLSQ_INST_RAM, 0x800},
537 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
538 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
539 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
540 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
541 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
542 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
543 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
544 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
545 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
546 {A6XX_HLSQ_DATAPATH_META, 0x40,},
547 {A6XX_HLSQ_FRONTEND_META, 0x40},
548 {A6XX_HLSQ_INDIRECT_META, 0x40,}
549};
550
Shrenuj Bansal41665402016-12-16 15:25:54 -0800551static struct kgsl_memdesc a6xx_capturescript;
552static struct kgsl_memdesc a6xx_crashdump_registers;
553static bool crash_dump_valid;
554
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600555static struct reg_list {
Shrenuj Bansal41665402016-12-16 15:25:54 -0800556 const unsigned int *regs;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600557 unsigned int count;
558 const struct sel_reg *sel;
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530559 uint64_t offset;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600560} a6xx_reg_list[] = {
561 { a6xx_registers, ARRAY_SIZE(a6xx_registers) / 2, NULL },
562 { a6xx_rb_rac_registers, ARRAY_SIZE(a6xx_rb_rac_registers) / 2,
563 &_a6xx_rb_rac_aperture },
564 { a6xx_rb_rbp_registers, ARRAY_SIZE(a6xx_rb_rbp_registers) / 2,
565 &_a6xx_rb_rbp_aperture },
Shrenuj Bansal41665402016-12-16 15:25:54 -0800566};
567
568#define REG_PAIR_COUNT(_a, _i) \
569 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
570
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600571static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
Lynus Vaz96de8522017-09-13 20:17:03 +0530572 u8 *buf, size_t remain, struct reg_list *regs)
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600573{
Lynus Vaz96de8522017-09-13 20:17:03 +0530574 struct kgsl_snapshot_registers snapshot_regs = {
575 .regs = regs->regs,
576 .count = regs->count,
577 };
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600578
Lynus Vaz96de8522017-09-13 20:17:03 +0530579 if (regs->sel)
580 kgsl_regwrite(device, regs->sel->host_reg, regs->sel->val);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600581
Lynus Vaz96de8522017-09-13 20:17:03 +0530582 return kgsl_snapshot_dump_registers(device, buf, remain,
583 &snapshot_regs);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600584}
585
Shrenuj Bansal41665402016-12-16 15:25:54 -0800586static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
587 size_t remain, void *priv)
588{
589 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
Lynus Vaz96de8522017-09-13 20:17:03 +0530590 struct reg_list *regs = (struct reg_list *)priv;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800591 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530592 unsigned int *src;
Lynus Vaz96de8522017-09-13 20:17:03 +0530593 unsigned int j, k;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800594 unsigned int count = 0;
595
596 if (crash_dump_valid == false)
Lynus Vaz96de8522017-09-13 20:17:03 +0530597 return a6xx_legacy_snapshot_registers(device, buf, remain,
598 regs);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800599
600 if (remain < sizeof(*header)) {
601 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
602 return 0;
603 }
604
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530605 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800606 remain -= sizeof(*header);
607
Lynus Vaz96de8522017-09-13 20:17:03 +0530608 for (j = 0; j < regs->count; j++) {
609 unsigned int start = regs->regs[2 * j];
610 unsigned int end = regs->regs[(2 * j) + 1];
Shrenuj Bansal41665402016-12-16 15:25:54 -0800611
Lynus Vaz96de8522017-09-13 20:17:03 +0530612 if (remain < ((end - start) + 1) * 8) {
613 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
614 goto out;
615 }
Shrenuj Bansal41665402016-12-16 15:25:54 -0800616
Lynus Vaz96de8522017-09-13 20:17:03 +0530617 remain -= ((end - start) + 1) * 8;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800618
Lynus Vaz96de8522017-09-13 20:17:03 +0530619 for (k = start; k <= end; k++, count++) {
620 *data++ = k;
621 *data++ = *src++;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800622 }
623 }
624
625out:
626 header->count = count;
627
628 /* Return the size of the section */
629 return (count * 8) + sizeof(*header);
630}
631
Lynus Vaz030473e2017-06-22 17:33:06 +0530632static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
633 u8 *buf, size_t remain, void *priv)
634{
635 struct kgsl_snapshot_registers pre_cdregs = {
636 .regs = a6xx_pre_crashdumper_registers,
637 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
638 };
639
640 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
641}
642
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530643static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
644 u8 *buf, size_t remain, void *priv)
645{
646 struct kgsl_snapshot_shader *header =
647 (struct kgsl_snapshot_shader *) buf;
648 struct a6xx_shader_block_info *info =
649 (struct a6xx_shader_block_info *) priv;
650 struct a6xx_shader_block *block = info->block;
651 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
652
653 if (remain < SHADER_SECTION_SZ(block->sz)) {
654 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
655 return 0;
656 }
657
658 header->type = block->statetype;
659 header->index = info->bank;
660 header->size = block->sz;
661
662 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
663 block->sz);
664
665 return SHADER_SECTION_SZ(block->sz);
666}
667
668static void a6xx_snapshot_shader(struct kgsl_device *device,
669 struct kgsl_snapshot *snapshot)
670{
671 unsigned int i, j;
672 struct a6xx_shader_block_info info;
673
674 /* Shader blocks can only be read by the crash dumper */
675 if (crash_dump_valid == false)
676 return;
677
678 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
679 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
680 info.block = &a6xx_shader_blocks[i];
681 info.bank = j;
682 info.offset = a6xx_shader_blocks[i].offset +
683 (j * a6xx_shader_blocks[i].sz);
684
685 /* Shader working/shadow memory */
686 kgsl_snapshot_add_section(device,
687 KGSL_SNAPSHOT_SECTION_SHADER,
688 snapshot, a6xx_snapshot_shader_memory, &info);
689 }
690 }
691}
692
Lynus Vaza5922742017-03-14 18:50:54 +0530693static void a6xx_snapshot_mempool(struct kgsl_device *device,
694 struct kgsl_snapshot *snapshot)
695{
696 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530697 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530698
Lynus Vazb8e43d52017-04-20 14:47:37 +0530699 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530700 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
701 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
702
703 kgsl_snapshot_indexed_registers(device, snapshot,
704 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
705 0, 0x2060);
706
Lynus Vazb8e43d52017-04-20 14:47:37 +0530707 /*
708 * Data at offset 0x2000 in the mempool section is the mempool size.
709 * Since we set it to 0, patch in the original size so that the data
710 * is consistent.
711 */
712 if (buf < snapshot->ptr) {
713 unsigned int *data;
714
715 /* Skip over the headers */
716 buf += sizeof(struct kgsl_snapshot_section_header) +
717 sizeof(struct kgsl_snapshot_indexed_regs);
718
719 data = (unsigned int *)buf + 0x2000;
720 *data = pool_size;
721 }
722
Lynus Vaza5922742017-03-14 18:50:54 +0530723 /* Restore the saved mempool size */
724 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
725}
726
Lynus Vaz461e2382017-01-16 19:35:41 +0530727static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
728 unsigned int regbase, unsigned int reg)
729{
730 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
731 reg - regbase / 4;
732 unsigned int val;
733
734 kgsl_regread(device, read_reg, &val);
735 return val;
736}
737
Lynus Vaz1e258612017-04-27 21:35:22 +0530738static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
739 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530740{
741 struct kgsl_snapshot_mvc_regs *header =
742 (struct kgsl_snapshot_mvc_regs *)buf;
743 struct a6xx_cluster_dbgahb_regs_info *info =
744 (struct a6xx_cluster_dbgahb_regs_info *)priv;
745 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
746 unsigned int read_sel;
747 unsigned int data_size = 0;
748 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
749 int i, j;
750
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600751 if (!device->snapshot_legacy)
752 return 0;
753
Lynus Vaz461e2382017-01-16 19:35:41 +0530754 if (remain < sizeof(*header)) {
755 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
756 return 0;
757 }
758
759 remain -= sizeof(*header);
760
761 header->ctxt_id = info->ctxt_id;
762 header->cluster_id = cur_cluster->id;
763
764 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
765 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
766
767 for (i = 0; i < cur_cluster->num_sets; i++) {
768 unsigned int start = cur_cluster->regs[2 * i];
769 unsigned int end = cur_cluster->regs[2 * i + 1];
770
771 if (remain < (end - start + 3) * 4) {
772 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
773 goto out;
774 }
775
776 remain -= (end - start + 3) * 4;
777 data_size += (end - start + 3) * 4;
778
779 *data++ = start | (1 << 31);
780 *data++ = end;
781
782 for (j = start; j <= end; j++) {
783 unsigned int val;
784
785 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
786 *data++ = val;
787
788 }
789 }
790
791out:
792 return data_size + sizeof(*header);
793}
794
Lynus Vaz1e258612017-04-27 21:35:22 +0530795static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
796 size_t remain, void *priv)
797{
798 struct kgsl_snapshot_mvc_regs *header =
799 (struct kgsl_snapshot_mvc_regs *)buf;
800 struct a6xx_cluster_dbgahb_regs_info *info =
801 (struct a6xx_cluster_dbgahb_regs_info *)priv;
802 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
803 unsigned int data_size = 0;
804 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
805 int i, j;
806 unsigned int *src;
807
808
809 if (crash_dump_valid == false)
810 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
811 info);
812
813 if (remain < sizeof(*header)) {
814 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
815 return 0;
816 }
817
818 remain -= sizeof(*header);
819
820 header->ctxt_id = info->ctxt_id;
821 header->cluster_id = cluster->id;
822
823 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
824 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
825
826 for (i = 0; i < cluster->num_sets; i++) {
827 unsigned int start;
828 unsigned int end;
829
830 start = cluster->regs[2 * i];
831 end = cluster->regs[2 * i + 1];
832
833 if (remain < (end - start + 3) * 4) {
834 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
835 goto out;
836 }
837
838 remain -= (end - start + 3) * 4;
839 data_size += (end - start + 3) * 4;
840
841 *data++ = start | (1 << 31);
842 *data++ = end;
843 for (j = start; j <= end; j++)
844 *data++ = *src++;
845 }
846out:
847 return data_size + sizeof(*header);
848}
849
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600850static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
851 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530852{
853 struct kgsl_snapshot_regs *header =
854 (struct kgsl_snapshot_regs *)buf;
855 struct a6xx_non_ctx_dbgahb_registers *regs =
856 (struct a6xx_non_ctx_dbgahb_registers *)priv;
857 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
858 int count = 0;
859 unsigned int read_sel;
860 int i, j;
861
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600862 if (!device->snapshot_legacy)
863 return 0;
864
Lynus Vaz461e2382017-01-16 19:35:41 +0530865 /* Figure out how many registers we are going to dump */
866 for (i = 0; i < regs->num_sets; i++) {
867 int start = regs->regs[i * 2];
868 int end = regs->regs[i * 2 + 1];
869
870 count += (end - start + 1);
871 }
872
873 if (remain < (count * 8) + sizeof(*header)) {
874 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
875 return 0;
876 }
877
878 header->count = count;
879
880 read_sel = (regs->statetype & 0xff) << 8;
881 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
882
883 for (i = 0; i < regs->num_sets; i++) {
884 unsigned int start = regs->regs[2 * i];
885 unsigned int end = regs->regs[2 * i + 1];
886
887 for (j = start; j <= end; j++) {
888 unsigned int val;
889
890 val = a6xx_read_dbgahb(device, regs->regbase, j);
891 *data++ = j;
892 *data++ = val;
893
894 }
895 }
896 return (count * 8) + sizeof(*header);
897}
898
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600899static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
900 size_t remain, void *priv)
901{
902 struct kgsl_snapshot_regs *header =
903 (struct kgsl_snapshot_regs *)buf;
904 struct a6xx_non_ctx_dbgahb_registers *regs =
905 (struct a6xx_non_ctx_dbgahb_registers *)priv;
906 unsigned int count = 0;
907 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
908 unsigned int i, k;
909 unsigned int *src;
910
911 if (crash_dump_valid == false)
912 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
913 regs);
914
915 if (remain < sizeof(*header)) {
916 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
917 return 0;
918 }
919
920 remain -= sizeof(*header);
921
922 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
923
924 for (i = 0; i < regs->num_sets; i++) {
925 unsigned int start;
926 unsigned int end;
927
928 start = regs->regs[2 * i];
929 end = regs->regs[(2 * i) + 1];
930
931 if (remain < (end - start + 1) * 8) {
932 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
933 goto out;
934 }
935
936 remain -= ((end - start) + 1) * 8;
937
938 for (k = start; k <= end; k++, count++) {
939 *data++ = k;
940 *data++ = *src++;
941 }
942 }
943out:
944 header->count = count;
945
946 /* Return the size of the section */
947 return (count * 8) + sizeof(*header);
948}
949
Lynus Vaz461e2382017-01-16 19:35:41 +0530950static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
951 struct kgsl_snapshot *snapshot)
952{
953 int i, j;
954
955 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
956 struct a6xx_cluster_dbgahb_registers *cluster =
957 &a6xx_dbgahb_ctx_clusters[i];
958 struct a6xx_cluster_dbgahb_regs_info info;
959
960 info.cluster = cluster;
961 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
962 info.ctxt_id = j;
963
964 kgsl_snapshot_add_section(device,
965 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
966 a6xx_snapshot_cluster_dbgahb, &info);
967 }
968 }
969
970 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
971 kgsl_snapshot_add_section(device,
972 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
973 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
974 }
975}
976
Shrenuj Bansal41665402016-12-16 15:25:54 -0800977static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
978 size_t remain, void *priv)
979{
980 struct kgsl_snapshot_mvc_regs *header =
981 (struct kgsl_snapshot_mvc_regs *)buf;
982 struct a6xx_cluster_regs_info *info =
983 (struct a6xx_cluster_regs_info *)priv;
984 struct a6xx_cluster_registers *cur_cluster = info->cluster;
985 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
986 unsigned int ctxt = info->ctxt_id;
987 unsigned int start, end, i, j, aperture_cntl = 0;
988 unsigned int data_size = 0;
989
990 if (remain < sizeof(*header)) {
991 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
992 return 0;
993 }
994
995 remain -= sizeof(*header);
996
997 header->ctxt_id = info->ctxt_id;
998 header->cluster_id = cur_cluster->id;
999
1000 /*
1001 * Set the AHB control for the Host to read from the
1002 * cluster/context for this iteration.
1003 */
1004 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
1005 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
1006
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001007 if (cur_cluster->sel)
1008 kgsl_regwrite(device, cur_cluster->sel->host_reg,
1009 cur_cluster->sel->val);
1010
Shrenuj Bansal41665402016-12-16 15:25:54 -08001011 for (i = 0; i < cur_cluster->num_sets; i++) {
1012 start = cur_cluster->regs[2 * i];
1013 end = cur_cluster->regs[2 * i + 1];
1014
1015 if (remain < (end - start + 3) * 4) {
1016 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1017 goto out;
1018 }
1019
1020 remain -= (end - start + 3) * 4;
1021 data_size += (end - start + 3) * 4;
1022
1023 *data++ = start | (1 << 31);
1024 *data++ = end;
1025 for (j = start; j <= end; j++) {
1026 unsigned int val;
1027
1028 kgsl_regread(device, j, &val);
1029 *data++ = val;
1030 }
1031 }
1032out:
1033 return data_size + sizeof(*header);
1034}
1035
1036static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
1037 size_t remain, void *priv)
1038{
1039 struct kgsl_snapshot_mvc_regs *header =
1040 (struct kgsl_snapshot_mvc_regs *)buf;
1041 struct a6xx_cluster_regs_info *info =
1042 (struct a6xx_cluster_regs_info *)priv;
1043 struct a6xx_cluster_registers *cluster = info->cluster;
1044 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1045 unsigned int *src;
1046 int i, j;
1047 unsigned int start, end;
1048 size_t data_size = 0;
1049
1050 if (crash_dump_valid == false)
1051 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
1052
1053 if (remain < sizeof(*header)) {
1054 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1055 return 0;
1056 }
1057
1058 remain -= sizeof(*header);
1059
1060 header->ctxt_id = info->ctxt_id;
1061 header->cluster_id = cluster->id;
1062
1063 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
1064 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
1065
1066 for (i = 0; i < cluster->num_sets; i++) {
1067 start = cluster->regs[2 * i];
1068 end = cluster->regs[2 * i + 1];
1069
1070 if (remain < (end - start + 3) * 4) {
1071 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1072 goto out;
1073 }
1074
1075 remain -= (end - start + 3) * 4;
1076 data_size += (end - start + 3) * 4;
1077
1078 *data++ = start | (1 << 31);
1079 *data++ = end;
1080 for (j = start; j <= end; j++)
1081 *data++ = *src++;
1082 }
1083
1084out:
1085 return data_size + sizeof(*header);
1086
1087}
1088
1089static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
1090 struct kgsl_snapshot *snapshot)
1091{
1092 int i, j;
1093 struct a6xx_cluster_regs_info info;
1094
1095 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1096 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1097
1098 info.cluster = cluster;
1099 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1100 info.ctxt_id = j;
1101
1102 kgsl_snapshot_add_section(device,
1103 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
1104 a6xx_snapshot_mvc, &info);
1105 }
1106 }
1107}
1108
Lynus Vaz20c81272017-02-10 16:22:12 +05301109/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
1110static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
1111 unsigned int block_id, unsigned int index, unsigned int *val)
1112{
1113 unsigned int reg;
1114
1115 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1116 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1117
1118 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1119 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1120 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1121 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1122
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001123 /*
1124 * There needs to be a delay of 1 us to ensure enough time for correct
1125 * data is funneled into the trace buffer
1126 */
1127 udelay(1);
1128
Lynus Vaz20c81272017-02-10 16:22:12 +05301129 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1130 val++;
1131 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1132}
1133
Lynus Vazdaac540732017-07-27 14:23:35 +05301134/* a6xx_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
Lynus Vaz20c81272017-02-10 16:22:12 +05301135static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1136 u8 *buf, size_t remain, void *priv)
1137{
Lynus Vazecd472c2017-04-18 14:15:57 +05301138 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301139 struct kgsl_snapshot_debugbus *header =
1140 (struct kgsl_snapshot_debugbus *)buf;
1141 struct adreno_debugbus_block *block = priv;
1142 int i;
1143 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1144 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301145 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301146 size_t size;
1147
1148 dwords = block->dwords;
1149
1150 /* For a6xx each debug bus data unit is 2 DWORDS */
1151 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1152
1153 if (remain < size) {
1154 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1155 return 0;
1156 }
1157
1158 header->id = block->block_id;
1159 header->count = dwords * 2;
1160
Lynus Vazecd472c2017-04-18 14:15:57 +05301161 block_id = block->block_id;
1162 /* GMU_GX data is read using the GMU_CX block id on A630 */
1163 if (adreno_is_a630(adreno_dev) &&
1164 (block_id == A6XX_DBGBUS_GMU_GX))
1165 block_id = A6XX_DBGBUS_GMU_CX;
1166
Lynus Vaz20c81272017-02-10 16:22:12 +05301167 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301168 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301169
1170 return size;
1171}
1172
Lynus Vazdaac540732017-07-27 14:23:35 +05301173/* a6xx_snapshot_vbif_debugbus_block() - Capture debug data for VBIF block */
1174static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
1175 u8 *buf, size_t remain, void *priv)
1176{
1177 struct kgsl_snapshot_debugbus *header =
1178 (struct kgsl_snapshot_debugbus *)buf;
1179 struct adreno_debugbus_block *block = priv;
1180 int i, j;
1181 /*
1182 * Total number of VBIF data words considering 3 sections:
1183 * 2 arbiter blocks of 16 words
1184 * 5 AXI XIN blocks of 18 dwords each
1185 * 4 core clock side XIN blocks of 12 dwords each
1186 */
1187 unsigned int dwords = (16 * A6XX_NUM_AXI_ARB_BLOCKS) +
1188 (18 * A6XX_NUM_XIN_AXI_BLOCKS) +
1189 (12 * A6XX_NUM_XIN_CORE_BLOCKS);
1190 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1191 size_t size;
1192 unsigned int reg_clk;
1193
1194 size = (dwords * sizeof(unsigned int)) + sizeof(*header);
1195
1196 if (remain < size) {
1197 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1198 return 0;
1199 }
1200 header->id = block->block_id;
1201 header->count = dwords;
1202
1203 kgsl_regread(device, A6XX_VBIF_CLKON, &reg_clk);
1204 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk |
1205 (A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK <<
1206 A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT));
1207 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 0);
1208 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS_OUT_CTRL,
1209 (A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK <<
1210 A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT));
1211
1212 for (i = 0; i < A6XX_NUM_AXI_ARB_BLOCKS; i++) {
1213 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0,
1214 (1 << (i + 16)));
1215 for (j = 0; j < 16; j++) {
1216 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1217 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1218 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1219 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1220 data);
1221 data++;
1222 }
1223 }
1224
1225 /* XIN blocks AXI side */
1226 for (i = 0; i < A6XX_NUM_XIN_AXI_BLOCKS; i++) {
1227 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
1228 for (j = 0; j < 18; j++) {
1229 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1230 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1231 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1232 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1233 data);
1234 data++;
1235 }
1236 }
1237 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 0);
1238
1239 /* XIN blocks core clock side */
1240 for (i = 0; i < A6XX_NUM_XIN_CORE_BLOCKS; i++) {
1241 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
1242 for (j = 0; j < 12; j++) {
1243 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL1,
1244 ((j & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
1245 << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
1246 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1247 data);
1248 data++;
1249 }
1250 }
1251 /* restore the clock of VBIF */
1252 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk);
1253 return size;
1254}
1255
Lynus Vazff24c972017-03-07 19:27:46 +05301256/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1257static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1258 unsigned int block_id, unsigned int index, unsigned int *val)
1259{
1260 unsigned int reg;
1261
1262 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1263 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1264
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301265 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1266 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1267 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1268 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
Lynus Vazff24c972017-03-07 19:27:46 +05301269
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001270 /*
1271 * There needs to be a delay of 1 us to ensure enough time for correct
1272 * data is funneled into the trace buffer
1273 */
1274 udelay(1);
1275
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301276 adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
Lynus Vazff24c972017-03-07 19:27:46 +05301277 val++;
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301278 adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
Lynus Vazff24c972017-03-07 19:27:46 +05301279}
1280
1281/*
1282 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1283 * block from the CX DBGC block
1284 */
1285static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1286 u8 *buf, size_t remain, void *priv)
1287{
1288 struct kgsl_snapshot_debugbus *header =
1289 (struct kgsl_snapshot_debugbus *)buf;
1290 struct adreno_debugbus_block *block = priv;
1291 int i;
1292 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1293 unsigned int dwords;
1294 size_t size;
1295
1296 dwords = block->dwords;
1297
1298 /* For a6xx each debug bus data unit is 2 DWRODS */
1299 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1300
1301 if (remain < size) {
1302 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1303 return 0;
1304 }
1305
1306 header->id = block->block_id;
1307 header->count = dwords * 2;
1308
1309 for (i = 0; i < dwords; i++)
1310 a6xx_cx_debug_bus_read(device, block->block_id, i,
1311 &data[i*2]);
1312
1313 return size;
1314}
1315
Lynus Vaz20c81272017-02-10 16:22:12 +05301316/* a6xx_snapshot_debugbus() - Capture debug bus data */
1317static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1318 struct kgsl_snapshot *snapshot)
1319{
1320 int i;
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301321 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301322
1323 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1324 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001325 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1326 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301327
1328 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1329 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1330
1331 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1332 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1333 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1334 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1335
1336 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1337 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1338 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1339 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1340 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1341 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1342 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1343 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1344 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1345 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1346 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1347 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1348 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1349 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1350 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1351 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1352 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1353 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1354
1355 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1356 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1357 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1358 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1359
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301360 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
Lynus Vazff24c972017-03-07 19:27:46 +05301361 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001362 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1363 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301364
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301365 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1366 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
Lynus Vazff24c972017-03-07 19:27:46 +05301367
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301368 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1369 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1370 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1371 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
Lynus Vazff24c972017-03-07 19:27:46 +05301372
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301373 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1374 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1375 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1376 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1377 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1378 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1379 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1380 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1381 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1382 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1383 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1384 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1385 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1386 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1387 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1388 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1389 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1390 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301391
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301392 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1393 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1394 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1395 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
Lynus Vazff24c972017-03-07 19:27:46 +05301396
Lynus Vaz20c81272017-02-10 16:22:12 +05301397 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1398 kgsl_snapshot_add_section(device,
1399 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1400 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1401 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1402 }
Lynus Vazff24c972017-03-07 19:27:46 +05301403
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301404 /* Skip if GPU has GBIF */
1405 if (!adreno_has_gbif(adreno_dev))
1406 kgsl_snapshot_add_section(device,
1407 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1408 snapshot, a6xx_snapshot_vbif_debugbus_block,
1409 (void *) &a6xx_vbif_debugbus_blocks);
Lynus Vazdaac540732017-07-27 14:23:35 +05301410
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301411 /* Dump the CX debugbus data if the block exists */
1412 if (adreno_is_cx_dbgc_register(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A)) {
Lynus Vazff24c972017-03-07 19:27:46 +05301413 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1414 kgsl_snapshot_add_section(device,
1415 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1416 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1417 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1418 }
Lynus Vazff24c972017-03-07 19:27:46 +05301419 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301420}
1421
Carter Cooperb88b7082017-09-14 09:03:26 -06001422/*
1423 * a6xx_snapshot_gmu() - A6XX GMU snapshot function
1424 * @adreno_dev: Device being snapshotted
1425 * @snapshot: Pointer to the snapshot instance
1426 *
1427 * This is where all of the A6XX GMU specific bits and pieces are grabbed
1428 * into the snapshot memory
1429 */
1430void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
Kyle Piefer60733aa2017-03-21 11:24:01 -07001431 struct kgsl_snapshot *snapshot)
1432{
Carter Cooperb88b7082017-09-14 09:03:26 -06001433 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
George Shen1d447b02017-07-12 13:40:28 -07001434 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
George Sheneb93bd32017-10-11 15:52:53 -07001435 unsigned int val;
George Shen1d447b02017-07-12 13:40:28 -07001436
Kyle Piefer60733aa2017-03-21 11:24:01 -07001437 if (!kgsl_gmu_isenabled(device))
1438 return;
1439
Lynus Vazd37f1d82017-05-24 16:39:15 +05301440 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1441 ARRAY_SIZE(a6xx_gmu_registers) / 2);
George Shen1d447b02017-07-12 13:40:28 -07001442
George Sheneb93bd32017-10-11 15:52:53 -07001443 if (gpudev->gx_is_on(adreno_dev)) {
1444 /* Set fence to ALLOW mode so registers can be read */
1445 kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
1446 kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
1447
1448 KGSL_DRV_ERR(device, "set FENCE to ALLOW mode:%x\n", val);
George Shen1d447b02017-07-12 13:40:28 -07001449 adreno_snapshot_registers(device, snapshot,
1450 a6xx_gmu_gx_registers,
1451 ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
George Sheneb93bd32017-10-11 15:52:53 -07001452 }
Kyle Piefer60733aa2017-03-21 11:24:01 -07001453}
1454
Lynus Vaz85150052017-02-21 17:57:48 +05301455/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1456static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1457 size_t remain, void *priv)
1458{
1459 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1460 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1461 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1462 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1463
1464 if (remain < DEBUG_SECTION_SZ(1)) {
1465 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1466 return 0;
1467 }
1468
1469 /* Dump the SQE firmware version */
1470 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1471 header->size = 1;
1472 *data = fw->version;
1473
1474 return DEBUG_SECTION_SZ(1);
1475}
1476
Shrenuj Bansal41665402016-12-16 15:25:54 -08001477static void _a6xx_do_crashdump(struct kgsl_device *device)
1478{
1479 unsigned long wait_time;
1480 unsigned int reg = 0;
1481 unsigned int val;
1482
1483 crash_dump_valid = false;
1484
Lynus Vaz0a06efd2017-09-13 20:21:07 +05301485 if (!device->snapshot_crashdumper)
1486 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001487 if (a6xx_capturescript.gpuaddr == 0 ||
1488 a6xx_crashdump_registers.gpuaddr == 0)
1489 return;
1490
1491 /* IF the SMMU is stalled we cannot do a crash dump */
1492 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1493 if (val & BIT(24))
1494 return;
1495
1496 /* Turn on APRIV so we can access the buffers */
1497 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1498
1499 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1500 lower_32_bits(a6xx_capturescript.gpuaddr));
1501 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1502 upper_32_bits(a6xx_capturescript.gpuaddr));
1503 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1504
1505 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1506 while (!time_after(jiffies, wait_time)) {
1507 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1508 if (reg & 0x2)
1509 break;
1510 cpu_relax();
1511 }
1512
1513 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1514
1515 if (!(reg & 0x2)) {
1516 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1517 return;
1518 }
1519
1520 crash_dump_valid = true;
1521}
1522
1523/*
1524 * a6xx_snapshot() - A6XX GPU snapshot function
1525 * @adreno_dev: Device being snapshotted
1526 * @snapshot: Pointer to the snapshot instance
1527 *
1528 * This is where all of the A6XX specific bits and pieces are grabbed
1529 * into the snapshot memory
1530 */
1531void a6xx_snapshot(struct adreno_device *adreno_dev,
1532 struct kgsl_snapshot *snapshot)
1533{
1534 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1535 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1536 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001537 bool sptprac_on;
Lynus Vaz96de8522017-09-13 20:17:03 +05301538 unsigned int i;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001539
Kyle Pieferda0fa542017-08-04 13:39:40 -07001540 /* GMU TCM data dumped through AHB */
1541 a6xx_snapshot_gmu(adreno_dev, snapshot);
1542
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001543 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1544
1545 /* Return if the GX is off */
Carter Cooperb88b7082017-09-14 09:03:26 -06001546 if (!gpudev->gx_is_on(adreno_dev))
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001547 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001548
Lynus Vaz030473e2017-06-22 17:33:06 +05301549 /* Dump the registers which get affected by crash dumper trigger */
1550 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1551 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1552
1553 /* Dump vbif registers as well which get affected by crash dumper */
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301554 if (!adreno_has_gbif(adreno_dev))
1555 adreno_snapshot_vbif_registers(device, snapshot,
1556 a6xx_vbif_snapshot_registers,
1557 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
Lynus Vaz030473e2017-06-22 17:33:06 +05301558
Shrenuj Bansal41665402016-12-16 15:25:54 -08001559 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001560 if (sptprac_on)
1561 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001562
Lynus Vaz96de8522017-09-13 20:17:03 +05301563 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1564 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1565 snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]);
1566 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001567
Shrenuj Bansal41665402016-12-16 15:25:54 -08001568 /* CP_SQE indexed registers */
1569 kgsl_snapshot_indexed_registers(device, snapshot,
1570 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1571 0, snap_data->sect_sizes->cp_pfp);
1572
1573 /* CP_DRAW_STATE */
1574 kgsl_snapshot_indexed_registers(device, snapshot,
1575 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1576 0, 0x100);
1577
1578 /* SQE_UCODE Cache */
1579 kgsl_snapshot_indexed_registers(device, snapshot,
1580 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1581 0, 0x6000);
1582
1583 /* CP ROQ */
1584 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1585 snapshot, adreno_snapshot_cp_roq,
1586 &snap_data->sect_sizes->roq);
1587
Lynus Vaz85150052017-02-21 17:57:48 +05301588 /* SQE Firmware */
1589 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1590 snapshot, a6xx_snapshot_sqe, NULL);
1591
Lynus Vaza5922742017-03-14 18:50:54 +05301592 /* Mempool debug data */
1593 a6xx_snapshot_mempool(device, snapshot);
1594
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001595 if (sptprac_on) {
1596 /* Shader memory */
1597 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301598
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001599 /* MVC register section */
1600 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001601
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001602 /* registers dumped through DBG AHB */
1603 a6xx_snapshot_dbgahb_regs(device, snapshot);
1604 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301605
Lynus Vaz20c81272017-02-10 16:22:12 +05301606 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001607
Shrenuj Bansal41665402016-12-16 15:25:54 -08001608}
1609
1610static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1611{
1612 int qwords = 0;
1613 unsigned int i, j, k;
1614 unsigned int count;
1615
1616 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1617 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1618
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001619 if (cluster->sel) {
1620 ptr[qwords++] = cluster->sel->val;
1621 ptr[qwords++] = ((uint64_t)cluster->sel->cd_reg << 44) |
1622 (1 << 21) | 1;
1623 }
1624
Shrenuj Bansal41665402016-12-16 15:25:54 -08001625 cluster->offset0 = *offset;
1626 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1627
1628 if (j == 1)
1629 cluster->offset1 = *offset;
1630
1631 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1632 ptr[qwords++] =
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001633 ((uint64_t)A6XX_CP_APERTURE_CNTL_CD << 44) |
Shrenuj Bansal41665402016-12-16 15:25:54 -08001634 (1 << 21) | 1;
1635
1636 for (k = 0; k < cluster->num_sets; k++) {
1637 count = REG_PAIR_COUNT(cluster->regs, k);
1638 ptr[qwords++] =
1639 a6xx_crashdump_registers.gpuaddr + *offset;
1640 ptr[qwords++] =
1641 (((uint64_t)cluster->regs[2 * k]) << 44) |
1642 count;
1643
1644 *offset += count * sizeof(unsigned int);
1645 }
1646 }
1647 }
1648
1649 return qwords;
1650}
1651
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301652static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1653 uint64_t *ptr, uint64_t *offset)
1654{
1655 int qwords = 0;
1656 unsigned int j;
1657
1658 /* Capture each bank in the block */
1659 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1660 /* Program the aperture */
1661 ptr[qwords++] =
1662 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1663 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1664 (1 << 21) | 1;
1665
1666 /* Read all the data in one chunk */
1667 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1668 ptr[qwords++] =
1669 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1670 block->sz;
1671
1672 /* Remember the offset of the first bank for easy access */
1673 if (j == 0)
1674 block->offset = *offset;
1675
1676 *offset += block->sz * sizeof(unsigned int);
1677 }
1678
1679 return qwords;
1680}
1681
Lynus Vaz1e258612017-04-27 21:35:22 +05301682static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1683{
1684 int qwords = 0;
1685 unsigned int i, j, k;
1686 unsigned int count;
1687
1688 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1689 struct a6xx_cluster_dbgahb_registers *cluster =
1690 &a6xx_dbgahb_ctx_clusters[i];
1691
1692 cluster->offset0 = *offset;
1693
1694 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1695 if (j == 1)
1696 cluster->offset1 = *offset;
1697
1698 /* Program the aperture */
1699 ptr[qwords++] =
1700 ((cluster->statetype + j * 2) & 0xff) << 8;
1701 ptr[qwords++] =
1702 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1703 (1 << 21) | 1;
1704
1705 for (k = 0; k < cluster->num_sets; k++) {
1706 unsigned int start = cluster->regs[2 * k];
1707
1708 count = REG_PAIR_COUNT(cluster->regs, k);
1709 ptr[qwords++] =
1710 a6xx_crashdump_registers.gpuaddr + *offset;
1711 ptr[qwords++] =
1712 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1713 start - cluster->regbase / 4) << 44)) |
1714 count;
1715
1716 *offset += count * sizeof(unsigned int);
1717 }
1718 }
1719 }
1720 return qwords;
1721}
1722
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001723static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1724{
1725 int qwords = 0;
1726 unsigned int i, k;
1727 unsigned int count;
1728
1729 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1730 struct a6xx_non_ctx_dbgahb_registers *regs =
1731 &a6xx_non_ctx_dbgahb[i];
1732
1733 regs->offset = *offset;
1734
1735 /* Program the aperture */
1736 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1737 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1738 (1 << 21) | 1;
1739
1740 for (k = 0; k < regs->num_sets; k++) {
1741 unsigned int start = regs->regs[2 * k];
1742
1743 count = REG_PAIR_COUNT(regs->regs, k);
1744 ptr[qwords++] =
1745 a6xx_crashdump_registers.gpuaddr + *offset;
1746 ptr[qwords++] =
1747 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1748 start - regs->regbase / 4) << 44)) |
1749 count;
1750
1751 *offset += count * sizeof(unsigned int);
1752 }
1753 }
1754 return qwords;
1755}
1756
Shrenuj Bansal41665402016-12-16 15:25:54 -08001757void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1758{
1759 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1760 unsigned int script_size = 0;
1761 unsigned int data_size = 0;
1762 unsigned int i, j, k;
1763 uint64_t *ptr;
1764 uint64_t offset = 0;
1765
1766 if (a6xx_capturescript.gpuaddr != 0 &&
1767 a6xx_crashdump_registers.gpuaddr != 0)
1768 return;
1769
1770 /*
1771 * We need to allocate two buffers:
1772 * 1 - the buffer to hold the draw script
1773 * 2 - the buffer to hold the data
1774 */
1775
1776 /*
1777 * To save the registers, we need 16 bytes per register pair for the
1778 * script and a dword for each register in the data
1779 */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001780 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1781 struct reg_list *regs = &a6xx_reg_list[i];
1782
1783 /* 16 bytes for programming the aperture */
1784 if (regs->sel)
1785 script_size += 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001786
1787 /* Each pair needs 16 bytes (2 qwords) */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001788 script_size += regs->count * 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001789
1790 /* Each register needs a dword in the data */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001791 for (j = 0; j < regs->count; j++)
Shrenuj Bansal41665402016-12-16 15:25:54 -08001792 data_size += REG_PAIR_COUNT(regs->regs, j) *
1793 sizeof(unsigned int);
1794
1795 }
1796
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301797 /*
1798 * To save the shader blocks for each block in each type we need 32
1799 * bytes for the script (16 bytes to program the aperture and 16 to
1800 * read the data) and then a block specific number of bytes to hold
1801 * the data
1802 */
1803 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1804 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1805 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1806 A6XX_NUM_SHADER_BANKS;
1807 }
1808
Shrenuj Bansal41665402016-12-16 15:25:54 -08001809 /* Calculate the script and data size for MVC registers */
1810 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1811 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1812
1813 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1814
1815 /* 16 bytes for programming the aperture */
1816 script_size += 16;
1817
1818 /* Reading each pair of registers takes 16 bytes */
1819 script_size += 16 * cluster->num_sets;
1820
1821 /* A dword per register read from the cluster list */
1822 for (k = 0; k < cluster->num_sets; k++)
1823 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1824 sizeof(unsigned int);
1825 }
1826 }
1827
Lynus Vaz1e258612017-04-27 21:35:22 +05301828 /* Calculate the script and data size for debug AHB registers */
1829 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1830 struct a6xx_cluster_dbgahb_registers *cluster =
1831 &a6xx_dbgahb_ctx_clusters[i];
1832
1833 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1834
1835 /* 16 bytes for programming the aperture */
1836 script_size += 16;
1837
1838 /* Reading each pair of registers takes 16 bytes */
1839 script_size += 16 * cluster->num_sets;
1840
1841 /* A dword per register read from the cluster list */
1842 for (k = 0; k < cluster->num_sets; k++)
1843 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1844 sizeof(unsigned int);
1845 }
1846 }
1847
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001848 /*
1849 * Calculate the script and data size for non context debug
1850 * AHB registers
1851 */
1852 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1853 struct a6xx_non_ctx_dbgahb_registers *regs =
1854 &a6xx_non_ctx_dbgahb[i];
1855
1856 /* 16 bytes for programming the aperture */
1857 script_size += 16;
1858
1859 /* Reading each pair of registers takes 16 bytes */
1860 script_size += 16 * regs->num_sets;
1861
1862 /* A dword per register read from the cluster list */
1863 for (k = 0; k < regs->num_sets; k++)
1864 data_size += REG_PAIR_COUNT(regs->regs, k) *
1865 sizeof(unsigned int);
1866 }
1867
Shrenuj Bansal41665402016-12-16 15:25:54 -08001868 /* Now allocate the script and data buffers */
1869
1870 /* The script buffers needs 2 extra qwords on the end */
1871 if (kgsl_allocate_global(device, &a6xx_capturescript,
1872 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1873 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1874 return;
1875
1876 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1877 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1878 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1879 return;
1880 }
1881
1882 /* Build the crash script */
1883
1884 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1885
1886 /* For the registers, program a read command for each pair */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001887 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1888 struct reg_list *regs = &a6xx_reg_list[i];
Shrenuj Bansal41665402016-12-16 15:25:54 -08001889
Lynus Vaz1bba57b2017-09-26 11:55:04 +05301890 regs->offset = offset;
1891
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001892 /* Program the SEL_CNTL_CD register appropriately */
1893 if (regs->sel) {
1894 *ptr++ = regs->sel->val;
1895 *ptr++ = (((uint64_t)regs->sel->cd_reg << 44)) |
1896 (1 << 21) | 1;
1897 }
1898
1899 for (j = 0; j < regs->count; j++) {
Shrenuj Bansal41665402016-12-16 15:25:54 -08001900 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1901 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1902 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1903 offset += r * sizeof(unsigned int);
1904 }
1905 }
1906
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301907 /* Program each shader block */
1908 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1909 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1910 &offset);
1911 }
1912
Shrenuj Bansal41665402016-12-16 15:25:54 -08001913 /* Program the capturescript for the MVC regsiters */
1914 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1915
Lynus Vaz1e258612017-04-27 21:35:22 +05301916 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1917
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001918 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1919
Shrenuj Bansal41665402016-12-16 15:25:54 -08001920 *ptr++ = 0;
1921 *ptr++ = 0;
1922}