blob: 02e5b5e5191e686a93b9ea3c997a1447847c5e95 [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
Lynus Vazdaac540732017-07-27 14:23:35 +053024#define A6XX_NUM_AXI_ARB_BLOCKS 2
25#define A6XX_NUM_XIN_AXI_BLOCKS 5
26#define A6XX_NUM_XIN_CORE_BLOCKS 4
Shrenuj Bansal41665402016-12-16 15:25:54 -080027
28static const unsigned int a6xx_gras_cluster[] = {
29 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
30 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
31 0x8400, 0x840B,
32};
33
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060034static const unsigned int a6xx_ps_cluster_rac[] = {
Shrenuj Bansal41665402016-12-16 15:25:54 -080035 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
36 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060037 0x88C0, 0x88C1, 0x88D0, 0x88E3, 0x8900, 0x890C, 0x890F, 0x891A,
38 0x8C00, 0x8C01, 0x8C08, 0x8C10, 0x8C17, 0x8C1F, 0x8C26, 0x8C33,
39};
40
41static const unsigned int a6xx_ps_cluster_rbp[] = {
42 0x88F0, 0x88F3, 0x890D, 0x890E, 0x8927, 0x8928, 0x8BF0, 0x8BF1,
43 0x8C02, 0x8C07, 0x8C11, 0x8C16, 0x8C20, 0x8C25,
44};
45
46static const unsigned int a6xx_ps_cluster[] = {
47 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
Shrenuj Bansal41665402016-12-16 15:25:54 -080048};
49
50static const unsigned int a6xx_fe_cluster[] = {
51 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
52 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
53};
54
55static const unsigned int a6xx_pc_vs_cluster[] = {
56 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
57};
58
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060059static const struct sel_reg {
60 unsigned int host_reg;
61 unsigned int cd_reg;
62 unsigned int val;
63} _a6xx_rb_rac_aperture = {
64 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
65 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
66 .val = 0x0,
67},
68_a6xx_rb_rbp_aperture = {
69 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
70 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
71 .val = 0x9,
72};
73
Shrenuj Bansal41665402016-12-16 15:25:54 -080074static struct a6xx_cluster_registers {
75 unsigned int id;
76 const unsigned int *regs;
77 unsigned int num_sets;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060078 const struct sel_reg *sel;
Shrenuj Bansal41665402016-12-16 15:25:54 -080079 unsigned int offset0;
80 unsigned int offset1;
81} a6xx_clusters[] = {
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060082 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2,
83 NULL },
84 { CP_CLUSTER_PS, a6xx_ps_cluster_rac, ARRAY_SIZE(a6xx_ps_cluster_rac)/2,
85 &_a6xx_rb_rac_aperture },
86 { CP_CLUSTER_PS, a6xx_ps_cluster_rbp, ARRAY_SIZE(a6xx_ps_cluster_rbp)/2,
87 &_a6xx_rb_rbp_aperture },
88 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2,
89 NULL },
90 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2,
91 NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080092 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060093 ARRAY_SIZE(a6xx_pc_vs_cluster)/2, NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080094};
95
96struct a6xx_cluster_regs_info {
97 struct a6xx_cluster_registers *cluster;
98 unsigned int ctxt_id;
99};
100
Lynus Vaz461e2382017-01-16 19:35:41 +0530101static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
102 0xB800, 0xB803, 0xB820, 0xB822,
103};
104
105static const unsigned int a6xx_sp_vs_sp_cluster[] = {
106 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
107 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
108};
109
110static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
111 0xBB10, 0xBB11, 0xBB20, 0xBB29,
112};
113
114static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
115 0xBD80, 0xBD80,
116};
117
118static const unsigned int a6xx_sp_duplicate_cluster[] = {
119 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
120};
121
122static const unsigned int a6xx_tp_duplicate_cluster[] = {
123 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
124};
125
126static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
127 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
128 0xB9C0, 0xB9C9,
129};
130
131static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
132 0xBD80, 0xBD80,
133};
134
135static const unsigned int a6xx_sp_ps_sp_cluster[] = {
136 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
137 0xAA00, 0xAA00, 0xAA30, 0xAA31,
138};
139
140static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
141 0xACC0, 0xACC0,
142};
143
144static const unsigned int a6xx_sp_ps_tp_cluster[] = {
145 0xB180, 0xB183, 0xB190, 0xB191,
146};
147
148static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
149 0xB4C0, 0xB4D1,
150};
151
152static struct a6xx_cluster_dbgahb_registers {
153 unsigned int id;
154 unsigned int regbase;
155 unsigned int statetype;
156 const unsigned int *regs;
157 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530158 unsigned int offset0;
159 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530160} a6xx_dbgahb_ctx_clusters[] = {
161 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
162 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
163 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
164 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700165 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530166 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
167 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
168 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700169 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530170 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700171 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530172 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700173 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530174 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700175 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530176 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700177 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530178 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700179 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530180 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700181 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530182 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700183 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530184 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700185 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530186 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700187 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530188 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700189 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530190 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
191};
192
193struct a6xx_cluster_dbgahb_regs_info {
194 struct a6xx_cluster_dbgahb_registers *cluster;
195 unsigned int ctxt_id;
196};
197
198static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
199 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
200 0xBE20, 0xBE23,
201};
202
203static const unsigned int a6xx_sp_non_ctx_registers[] = {
204 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
205 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
206};
207
208static const unsigned int a6xx_tp_non_ctx_registers[] = {
209 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
210};
211
212static struct a6xx_non_ctx_dbgahb_registers {
213 unsigned int regbase;
214 unsigned int statetype;
215 const unsigned int *regs;
216 unsigned int num_sets;
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600217 unsigned int offset;
Lynus Vaz461e2382017-01-16 19:35:41 +0530218} a6xx_non_ctx_dbgahb[] = {
219 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
220 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
221 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
222 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
223 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
224 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
225};
226
Shrenuj Bansal41665402016-12-16 15:25:54 -0800227static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
228 /* VBIF */
229 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
230 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
231 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
232 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
233 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
234 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
235 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
236 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
237 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
238 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
239 0x3410, 0x3410, 0x3800, 0x3801,
240};
241
George Shen1d447b02017-07-12 13:40:28 -0700242static const unsigned int a6xx_gmu_gx_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700243 /* GMU GX */
244 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
245 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
246 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
247 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
248 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
George Shen1d447b02017-07-12 13:40:28 -0700249};
250
251static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700252 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700253 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700254 /* GMU CX */
255 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
256 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
257 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
258 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
259 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
260 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
261 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
262 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
Lokesh Batrac367dc92017-08-24 13:40:32 -0700263 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA01,
Kyle Pieferbce21702017-06-08 09:21:28 -0700264 /* GPU RSCC */
George Shen6927d8f2017-07-19 11:38:10 -0700265 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747,
266 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
267 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
Kyle Pieferbce21702017-06-08 09:21:28 -0700268 /* GMU AO */
269 0x23B00, 0x23B16, 0x23C00, 0x23C00,
270 /* GPU CC */
271 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
272 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
273 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
274 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
275 0x26000, 0x26002,
276 /* GPU CC ACD */
277 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700278};
279
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600280static const unsigned int a6xx_rb_rac_registers[] = {
281 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E10, 0x8E1C, 0x8E20, 0x8E25,
282 0x8E28, 0x8E28, 0x8E2C, 0x8E2F, 0x8E50, 0x8E52,
283};
284
285static const unsigned int a6xx_rb_rbp_registers[] = {
286 0x8E01, 0x8E01, 0x8E0C, 0x8E0C, 0x8E3B, 0x8E3E, 0x8E40, 0x8E43,
287 0x8E53, 0x8E5F, 0x8E70, 0x8E77,
288};
289
Shrenuj Bansal41665402016-12-16 15:25:54 -0800290static const struct adreno_vbif_snapshot_registers
291a6xx_vbif_snapshot_registers[] = {
292 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
293 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
294};
295
296/*
297 * Set of registers to dump for A6XX on snapshot.
298 * Registers in pairs - first value is the start offset, second
299 * is the stop offset (inclusive)
300 */
301
302static const unsigned int a6xx_registers[] = {
303 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530304 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
305 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
306 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530307 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
308 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
309 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800310 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530311 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
312 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
313 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
314 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
315 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
316 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
317 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800318 /* VSC */
319 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
320 /* UCHE */
321 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
322 0x0E38, 0x0E39,
323 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530324 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
325 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800326 /* VPC */
327 0x9600, 0x9604, 0x9624, 0x9637,
328 /* PC */
329 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
330 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
331 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
332 /* VFD */
333 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530334 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800335};
336
Lynus Vaz030473e2017-06-22 17:33:06 +0530337/*
338 * Set of registers to dump for A6XX before actually triggering crash dumper.
339 * Registers in pairs - first value is the start offset, second
340 * is the stop offset (inclusive)
341 */
342static const unsigned int a6xx_pre_crashdumper_registers[] = {
343 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
344 0x210, 0x213,
345 /* CP: CP_STATUS_1 */
346 0x825, 0x825,
347};
348
Lynus Vaz20c81272017-02-10 16:22:12 +0530349enum a6xx_debugbus_id {
350 A6XX_DBGBUS_CP = 0x1,
351 A6XX_DBGBUS_RBBM = 0x2,
352 A6XX_DBGBUS_VBIF = 0x3,
353 A6XX_DBGBUS_HLSQ = 0x4,
354 A6XX_DBGBUS_UCHE = 0x5,
355 A6XX_DBGBUS_DPM = 0x6,
356 A6XX_DBGBUS_TESS = 0x7,
357 A6XX_DBGBUS_PC = 0x8,
358 A6XX_DBGBUS_VFDP = 0x9,
359 A6XX_DBGBUS_VPC = 0xa,
360 A6XX_DBGBUS_TSE = 0xb,
361 A6XX_DBGBUS_RAS = 0xc,
362 A6XX_DBGBUS_VSC = 0xd,
363 A6XX_DBGBUS_COM = 0xe,
364 A6XX_DBGBUS_LRZ = 0x10,
365 A6XX_DBGBUS_A2D = 0x11,
366 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530367 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530368 A6XX_DBGBUS_RBP = 0x14,
369 A6XX_DBGBUS_DCS = 0x15,
370 A6XX_DBGBUS_RBBM_CFG = 0x16,
371 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530372 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530373 A6XX_DBGBUS_TPFCHE = 0x19,
374 A6XX_DBGBUS_GPC = 0x1d,
375 A6XX_DBGBUS_LARC = 0x1e,
376 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
377 A6XX_DBGBUS_RB_0 = 0x20,
378 A6XX_DBGBUS_RB_1 = 0x21,
379 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
380 A6XX_DBGBUS_CCU_0 = 0x28,
381 A6XX_DBGBUS_CCU_1 = 0x29,
382 A6XX_DBGBUS_VFD_0 = 0x38,
383 A6XX_DBGBUS_VFD_1 = 0x39,
384 A6XX_DBGBUS_VFD_2 = 0x3a,
385 A6XX_DBGBUS_VFD_3 = 0x3b,
386 A6XX_DBGBUS_SP_0 = 0x40,
387 A6XX_DBGBUS_SP_1 = 0x41,
388 A6XX_DBGBUS_TPL1_0 = 0x48,
389 A6XX_DBGBUS_TPL1_1 = 0x49,
390 A6XX_DBGBUS_TPL1_2 = 0x4a,
391 A6XX_DBGBUS_TPL1_3 = 0x4b,
392};
393
394static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
395 { A6XX_DBGBUS_CP, 0x100, },
396 { A6XX_DBGBUS_RBBM, 0x100, },
397 { A6XX_DBGBUS_HLSQ, 0x100, },
398 { A6XX_DBGBUS_UCHE, 0x100, },
399 { A6XX_DBGBUS_DPM, 0x100, },
400 { A6XX_DBGBUS_TESS, 0x100, },
401 { A6XX_DBGBUS_PC, 0x100, },
402 { A6XX_DBGBUS_VFDP, 0x100, },
403 { A6XX_DBGBUS_VPC, 0x100, },
404 { A6XX_DBGBUS_TSE, 0x100, },
405 { A6XX_DBGBUS_RAS, 0x100, },
406 { A6XX_DBGBUS_VSC, 0x100, },
407 { A6XX_DBGBUS_COM, 0x100, },
408 { A6XX_DBGBUS_LRZ, 0x100, },
409 { A6XX_DBGBUS_A2D, 0x100, },
410 { A6XX_DBGBUS_CCUFCHE, 0x100, },
411 { A6XX_DBGBUS_RBP, 0x100, },
412 { A6XX_DBGBUS_DCS, 0x100, },
413 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530414 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530415 { A6XX_DBGBUS_TPFCHE, 0x100, },
416 { A6XX_DBGBUS_GPC, 0x100, },
417 { A6XX_DBGBUS_LARC, 0x100, },
418 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
419 { A6XX_DBGBUS_RB_0, 0x100, },
420 { A6XX_DBGBUS_RB_1, 0x100, },
421 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
422 { A6XX_DBGBUS_CCU_0, 0x100, },
423 { A6XX_DBGBUS_CCU_1, 0x100, },
424 { A6XX_DBGBUS_VFD_0, 0x100, },
425 { A6XX_DBGBUS_VFD_1, 0x100, },
426 { A6XX_DBGBUS_VFD_2, 0x100, },
427 { A6XX_DBGBUS_VFD_3, 0x100, },
428 { A6XX_DBGBUS_SP_0, 0x100, },
429 { A6XX_DBGBUS_SP_1, 0x100, },
430 { A6XX_DBGBUS_TPL1_0, 0x100, },
431 { A6XX_DBGBUS_TPL1_1, 0x100, },
432 { A6XX_DBGBUS_TPL1_2, 0x100, },
433 { A6XX_DBGBUS_TPL1_3, 0x100, },
434};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800435
Lynus Vazdaac540732017-07-27 14:23:35 +0530436static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
437 A6XX_DBGBUS_VBIF, 0x100,
438};
439
Lynus Vazff24c972017-03-07 19:27:46 +0530440static void __iomem *a6xx_cx_dbgc;
441static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
Lynus Vazecd472c2017-04-18 14:15:57 +0530442 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530443 { A6XX_DBGBUS_CX, 0x100, },
444};
445
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530446#define A6XX_NUM_SHADER_BANKS 3
447#define A6XX_SHADER_STATETYPE_SHIFT 8
448
449enum a6xx_shader_obj {
450 A6XX_TP0_TMO_DATA = 0x9,
451 A6XX_TP0_SMO_DATA = 0xa,
452 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
453 A6XX_TP1_TMO_DATA = 0x19,
454 A6XX_TP1_SMO_DATA = 0x1a,
455 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
456 A6XX_SP_INST_DATA = 0x29,
457 A6XX_SP_LB_0_DATA = 0x2a,
458 A6XX_SP_LB_1_DATA = 0x2b,
459 A6XX_SP_LB_2_DATA = 0x2c,
460 A6XX_SP_LB_3_DATA = 0x2d,
461 A6XX_SP_LB_4_DATA = 0x2e,
462 A6XX_SP_LB_5_DATA = 0x2f,
463 A6XX_SP_CB_BINDLESS_DATA = 0x30,
464 A6XX_SP_CB_LEGACY_DATA = 0x31,
465 A6XX_SP_UAV_DATA = 0x32,
466 A6XX_SP_INST_TAG = 0x33,
467 A6XX_SP_CB_BINDLESS_TAG = 0x34,
468 A6XX_SP_TMO_UMO_TAG = 0x35,
469 A6XX_SP_SMO_TAG = 0x36,
470 A6XX_SP_STATE_DATA = 0x37,
471 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
472 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
473 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
474 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
475 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
476 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
477 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
478 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
479 A6XX_HLSQ_INST_RAM = 0x52,
480 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
481 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
482 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
483 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
484 A6XX_HLSQ_INST_RAM_TAG = 0x57,
485 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
486 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
487 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
488 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
489 A6XX_HLSQ_DATAPATH_META = 0x60,
490 A6XX_HLSQ_FRONTEND_META = 0x61,
491 A6XX_HLSQ_INDIRECT_META = 0x62,
492 A6XX_HLSQ_BACKEND_META = 0x63
493};
494
495struct a6xx_shader_block {
496 unsigned int statetype;
497 unsigned int sz;
498 uint64_t offset;
499};
500
501struct a6xx_shader_block_info {
502 struct a6xx_shader_block *block;
503 unsigned int bank;
504 uint64_t offset;
505};
506
507static struct a6xx_shader_block a6xx_shader_blocks[] = {
508 {A6XX_TP0_TMO_DATA, 0x200},
509 {A6XX_TP0_SMO_DATA, 0x80,},
510 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
511 {A6XX_TP1_TMO_DATA, 0x200},
512 {A6XX_TP1_SMO_DATA, 0x80,},
513 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
514 {A6XX_SP_INST_DATA, 0x800},
515 {A6XX_SP_LB_0_DATA, 0x800},
516 {A6XX_SP_LB_1_DATA, 0x800},
517 {A6XX_SP_LB_2_DATA, 0x800},
518 {A6XX_SP_LB_3_DATA, 0x800},
519 {A6XX_SP_LB_4_DATA, 0x800},
520 {A6XX_SP_LB_5_DATA, 0x200},
521 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
522 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
523 {A6XX_SP_UAV_DATA, 0x80,},
524 {A6XX_SP_INST_TAG, 0x80,},
525 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
526 {A6XX_SP_TMO_UMO_TAG, 0x80,},
527 {A6XX_SP_SMO_TAG, 0x80},
528 {A6XX_SP_STATE_DATA, 0x3F},
529 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
530 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
531 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
532 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
533 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
534 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
535 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
536 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
537 {A6XX_HLSQ_INST_RAM, 0x800},
538 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
539 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
540 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
541 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
542 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
543 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
544 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
545 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
546 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
547 {A6XX_HLSQ_DATAPATH_META, 0x40,},
548 {A6XX_HLSQ_FRONTEND_META, 0x40},
549 {A6XX_HLSQ_INDIRECT_META, 0x40,}
550};
551
Shrenuj Bansal41665402016-12-16 15:25:54 -0800552static struct kgsl_memdesc a6xx_capturescript;
553static struct kgsl_memdesc a6xx_crashdump_registers;
554static bool crash_dump_valid;
555
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600556static struct reg_list {
Shrenuj Bansal41665402016-12-16 15:25:54 -0800557 const unsigned int *regs;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600558 unsigned int count;
559 const struct sel_reg *sel;
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530560 uint64_t offset;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600561} a6xx_reg_list[] = {
562 { a6xx_registers, ARRAY_SIZE(a6xx_registers) / 2, NULL },
563 { a6xx_rb_rac_registers, ARRAY_SIZE(a6xx_rb_rac_registers) / 2,
564 &_a6xx_rb_rac_aperture },
565 { a6xx_rb_rbp_registers, ARRAY_SIZE(a6xx_rb_rbp_registers) / 2,
566 &_a6xx_rb_rbp_aperture },
Shrenuj Bansal41665402016-12-16 15:25:54 -0800567};
568
569#define REG_PAIR_COUNT(_a, _i) \
570 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
571
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600572static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
Lynus Vaz96de8522017-09-13 20:17:03 +0530573 u8 *buf, size_t remain, struct reg_list *regs)
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600574{
Lynus Vaz96de8522017-09-13 20:17:03 +0530575 struct kgsl_snapshot_registers snapshot_regs = {
576 .regs = regs->regs,
577 .count = regs->count,
578 };
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600579
Lynus Vaz96de8522017-09-13 20:17:03 +0530580 if (regs->sel)
581 kgsl_regwrite(device, regs->sel->host_reg, regs->sel->val);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600582
Lynus Vaz96de8522017-09-13 20:17:03 +0530583 return kgsl_snapshot_dump_registers(device, buf, remain,
584 &snapshot_regs);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600585}
586
Shrenuj Bansal41665402016-12-16 15:25:54 -0800587static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
588 size_t remain, void *priv)
589{
590 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
Lynus Vaz96de8522017-09-13 20:17:03 +0530591 struct reg_list *regs = (struct reg_list *)priv;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800592 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530593 unsigned int *src;
Lynus Vaz96de8522017-09-13 20:17:03 +0530594 unsigned int j, k;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800595 unsigned int count = 0;
596
597 if (crash_dump_valid == false)
Lynus Vaz96de8522017-09-13 20:17:03 +0530598 return a6xx_legacy_snapshot_registers(device, buf, remain,
599 regs);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800600
601 if (remain < sizeof(*header)) {
602 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
603 return 0;
604 }
605
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530606 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800607 remain -= sizeof(*header);
608
Lynus Vaz96de8522017-09-13 20:17:03 +0530609 for (j = 0; j < regs->count; j++) {
610 unsigned int start = regs->regs[2 * j];
611 unsigned int end = regs->regs[(2 * j) + 1];
Shrenuj Bansal41665402016-12-16 15:25:54 -0800612
Lynus Vaz96de8522017-09-13 20:17:03 +0530613 if (remain < ((end - start) + 1) * 8) {
614 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
615 goto out;
616 }
Shrenuj Bansal41665402016-12-16 15:25:54 -0800617
Lynus Vaz96de8522017-09-13 20:17:03 +0530618 remain -= ((end - start) + 1) * 8;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800619
Lynus Vaz96de8522017-09-13 20:17:03 +0530620 for (k = start; k <= end; k++, count++) {
621 *data++ = k;
622 *data++ = *src++;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800623 }
624 }
625
626out:
627 header->count = count;
628
629 /* Return the size of the section */
630 return (count * 8) + sizeof(*header);
631}
632
Lynus Vaz030473e2017-06-22 17:33:06 +0530633static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
634 u8 *buf, size_t remain, void *priv)
635{
636 struct kgsl_snapshot_registers pre_cdregs = {
637 .regs = a6xx_pre_crashdumper_registers,
638 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
639 };
640
641 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
642}
643
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530644static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
645 u8 *buf, size_t remain, void *priv)
646{
647 struct kgsl_snapshot_shader *header =
648 (struct kgsl_snapshot_shader *) buf;
649 struct a6xx_shader_block_info *info =
650 (struct a6xx_shader_block_info *) priv;
651 struct a6xx_shader_block *block = info->block;
652 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
653
654 if (remain < SHADER_SECTION_SZ(block->sz)) {
655 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
656 return 0;
657 }
658
659 header->type = block->statetype;
660 header->index = info->bank;
661 header->size = block->sz;
662
663 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
664 block->sz);
665
666 return SHADER_SECTION_SZ(block->sz);
667}
668
669static void a6xx_snapshot_shader(struct kgsl_device *device,
670 struct kgsl_snapshot *snapshot)
671{
672 unsigned int i, j;
673 struct a6xx_shader_block_info info;
674
675 /* Shader blocks can only be read by the crash dumper */
676 if (crash_dump_valid == false)
677 return;
678
679 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
680 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
681 info.block = &a6xx_shader_blocks[i];
682 info.bank = j;
683 info.offset = a6xx_shader_blocks[i].offset +
684 (j * a6xx_shader_blocks[i].sz);
685
686 /* Shader working/shadow memory */
687 kgsl_snapshot_add_section(device,
688 KGSL_SNAPSHOT_SECTION_SHADER,
689 snapshot, a6xx_snapshot_shader_memory, &info);
690 }
691 }
692}
693
Lynus Vaza5922742017-03-14 18:50:54 +0530694static void a6xx_snapshot_mempool(struct kgsl_device *device,
695 struct kgsl_snapshot *snapshot)
696{
697 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530698 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530699
Lynus Vazb8e43d52017-04-20 14:47:37 +0530700 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530701 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
702 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
703
704 kgsl_snapshot_indexed_registers(device, snapshot,
705 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
706 0, 0x2060);
707
Lynus Vazb8e43d52017-04-20 14:47:37 +0530708 /*
709 * Data at offset 0x2000 in the mempool section is the mempool size.
710 * Since we set it to 0, patch in the original size so that the data
711 * is consistent.
712 */
713 if (buf < snapshot->ptr) {
714 unsigned int *data;
715
716 /* Skip over the headers */
717 buf += sizeof(struct kgsl_snapshot_section_header) +
718 sizeof(struct kgsl_snapshot_indexed_regs);
719
720 data = (unsigned int *)buf + 0x2000;
721 *data = pool_size;
722 }
723
Lynus Vaza5922742017-03-14 18:50:54 +0530724 /* Restore the saved mempool size */
725 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
726}
727
Lynus Vaz461e2382017-01-16 19:35:41 +0530728static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
729 unsigned int regbase, unsigned int reg)
730{
731 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
732 reg - regbase / 4;
733 unsigned int val;
734
735 kgsl_regread(device, read_reg, &val);
736 return val;
737}
738
Lynus Vaz1e258612017-04-27 21:35:22 +0530739static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
740 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530741{
742 struct kgsl_snapshot_mvc_regs *header =
743 (struct kgsl_snapshot_mvc_regs *)buf;
744 struct a6xx_cluster_dbgahb_regs_info *info =
745 (struct a6xx_cluster_dbgahb_regs_info *)priv;
746 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
747 unsigned int read_sel;
748 unsigned int data_size = 0;
749 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
750 int i, j;
751
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600752 if (!device->snapshot_legacy)
753 return 0;
754
Lynus Vaz461e2382017-01-16 19:35:41 +0530755 if (remain < sizeof(*header)) {
756 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
757 return 0;
758 }
759
760 remain -= sizeof(*header);
761
762 header->ctxt_id = info->ctxt_id;
763 header->cluster_id = cur_cluster->id;
764
765 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
766 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
767
768 for (i = 0; i < cur_cluster->num_sets; i++) {
769 unsigned int start = cur_cluster->regs[2 * i];
770 unsigned int end = cur_cluster->regs[2 * i + 1];
771
772 if (remain < (end - start + 3) * 4) {
773 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
774 goto out;
775 }
776
777 remain -= (end - start + 3) * 4;
778 data_size += (end - start + 3) * 4;
779
780 *data++ = start | (1 << 31);
781 *data++ = end;
782
783 for (j = start; j <= end; j++) {
784 unsigned int val;
785
786 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
787 *data++ = val;
788
789 }
790 }
791
792out:
793 return data_size + sizeof(*header);
794}
795
Lynus Vaz1e258612017-04-27 21:35:22 +0530796static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
797 size_t remain, void *priv)
798{
799 struct kgsl_snapshot_mvc_regs *header =
800 (struct kgsl_snapshot_mvc_regs *)buf;
801 struct a6xx_cluster_dbgahb_regs_info *info =
802 (struct a6xx_cluster_dbgahb_regs_info *)priv;
803 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
804 unsigned int data_size = 0;
805 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
806 int i, j;
807 unsigned int *src;
808
809
810 if (crash_dump_valid == false)
811 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
812 info);
813
814 if (remain < sizeof(*header)) {
815 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
816 return 0;
817 }
818
819 remain -= sizeof(*header);
820
821 header->ctxt_id = info->ctxt_id;
822 header->cluster_id = cluster->id;
823
824 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
825 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
826
827 for (i = 0; i < cluster->num_sets; i++) {
828 unsigned int start;
829 unsigned int end;
830
831 start = cluster->regs[2 * i];
832 end = cluster->regs[2 * i + 1];
833
834 if (remain < (end - start + 3) * 4) {
835 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
836 goto out;
837 }
838
839 remain -= (end - start + 3) * 4;
840 data_size += (end - start + 3) * 4;
841
842 *data++ = start | (1 << 31);
843 *data++ = end;
844 for (j = start; j <= end; j++)
845 *data++ = *src++;
846 }
847out:
848 return data_size + sizeof(*header);
849}
850
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600851static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
852 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530853{
854 struct kgsl_snapshot_regs *header =
855 (struct kgsl_snapshot_regs *)buf;
856 struct a6xx_non_ctx_dbgahb_registers *regs =
857 (struct a6xx_non_ctx_dbgahb_registers *)priv;
858 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
859 int count = 0;
860 unsigned int read_sel;
861 int i, j;
862
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600863 if (!device->snapshot_legacy)
864 return 0;
865
Lynus Vaz461e2382017-01-16 19:35:41 +0530866 /* Figure out how many registers we are going to dump */
867 for (i = 0; i < regs->num_sets; i++) {
868 int start = regs->regs[i * 2];
869 int end = regs->regs[i * 2 + 1];
870
871 count += (end - start + 1);
872 }
873
874 if (remain < (count * 8) + sizeof(*header)) {
875 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
876 return 0;
877 }
878
879 header->count = count;
880
881 read_sel = (regs->statetype & 0xff) << 8;
882 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
883
884 for (i = 0; i < regs->num_sets; i++) {
885 unsigned int start = regs->regs[2 * i];
886 unsigned int end = regs->regs[2 * i + 1];
887
888 for (j = start; j <= end; j++) {
889 unsigned int val;
890
891 val = a6xx_read_dbgahb(device, regs->regbase, j);
892 *data++ = j;
893 *data++ = val;
894
895 }
896 }
897 return (count * 8) + sizeof(*header);
898}
899
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600900static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
901 size_t remain, void *priv)
902{
903 struct kgsl_snapshot_regs *header =
904 (struct kgsl_snapshot_regs *)buf;
905 struct a6xx_non_ctx_dbgahb_registers *regs =
906 (struct a6xx_non_ctx_dbgahb_registers *)priv;
907 unsigned int count = 0;
908 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
909 unsigned int i, k;
910 unsigned int *src;
911
912 if (crash_dump_valid == false)
913 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
914 regs);
915
916 if (remain < sizeof(*header)) {
917 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
918 return 0;
919 }
920
921 remain -= sizeof(*header);
922
923 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
924
925 for (i = 0; i < regs->num_sets; i++) {
926 unsigned int start;
927 unsigned int end;
928
929 start = regs->regs[2 * i];
930 end = regs->regs[(2 * i) + 1];
931
932 if (remain < (end - start + 1) * 8) {
933 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
934 goto out;
935 }
936
937 remain -= ((end - start) + 1) * 8;
938
939 for (k = start; k <= end; k++, count++) {
940 *data++ = k;
941 *data++ = *src++;
942 }
943 }
944out:
945 header->count = count;
946
947 /* Return the size of the section */
948 return (count * 8) + sizeof(*header);
949}
950
Lynus Vaz461e2382017-01-16 19:35:41 +0530951static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
952 struct kgsl_snapshot *snapshot)
953{
954 int i, j;
955
956 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
957 struct a6xx_cluster_dbgahb_registers *cluster =
958 &a6xx_dbgahb_ctx_clusters[i];
959 struct a6xx_cluster_dbgahb_regs_info info;
960
961 info.cluster = cluster;
962 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
963 info.ctxt_id = j;
964
965 kgsl_snapshot_add_section(device,
966 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
967 a6xx_snapshot_cluster_dbgahb, &info);
968 }
969 }
970
971 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
972 kgsl_snapshot_add_section(device,
973 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
974 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
975 }
976}
977
Shrenuj Bansal41665402016-12-16 15:25:54 -0800978static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
979 size_t remain, void *priv)
980{
981 struct kgsl_snapshot_mvc_regs *header =
982 (struct kgsl_snapshot_mvc_regs *)buf;
983 struct a6xx_cluster_regs_info *info =
984 (struct a6xx_cluster_regs_info *)priv;
985 struct a6xx_cluster_registers *cur_cluster = info->cluster;
986 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
987 unsigned int ctxt = info->ctxt_id;
988 unsigned int start, end, i, j, aperture_cntl = 0;
989 unsigned int data_size = 0;
990
991 if (remain < sizeof(*header)) {
992 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
993 return 0;
994 }
995
996 remain -= sizeof(*header);
997
998 header->ctxt_id = info->ctxt_id;
999 header->cluster_id = cur_cluster->id;
1000
1001 /*
1002 * Set the AHB control for the Host to read from the
1003 * cluster/context for this iteration.
1004 */
1005 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
1006 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
1007
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001008 if (cur_cluster->sel)
1009 kgsl_regwrite(device, cur_cluster->sel->host_reg,
1010 cur_cluster->sel->val);
1011
Shrenuj Bansal41665402016-12-16 15:25:54 -08001012 for (i = 0; i < cur_cluster->num_sets; i++) {
1013 start = cur_cluster->regs[2 * i];
1014 end = cur_cluster->regs[2 * i + 1];
1015
1016 if (remain < (end - start + 3) * 4) {
1017 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1018 goto out;
1019 }
1020
1021 remain -= (end - start + 3) * 4;
1022 data_size += (end - start + 3) * 4;
1023
1024 *data++ = start | (1 << 31);
1025 *data++ = end;
1026 for (j = start; j <= end; j++) {
1027 unsigned int val;
1028
1029 kgsl_regread(device, j, &val);
1030 *data++ = val;
1031 }
1032 }
1033out:
1034 return data_size + sizeof(*header);
1035}
1036
1037static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
1038 size_t remain, void *priv)
1039{
1040 struct kgsl_snapshot_mvc_regs *header =
1041 (struct kgsl_snapshot_mvc_regs *)buf;
1042 struct a6xx_cluster_regs_info *info =
1043 (struct a6xx_cluster_regs_info *)priv;
1044 struct a6xx_cluster_registers *cluster = info->cluster;
1045 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1046 unsigned int *src;
1047 int i, j;
1048 unsigned int start, end;
1049 size_t data_size = 0;
1050
1051 if (crash_dump_valid == false)
1052 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
1053
1054 if (remain < sizeof(*header)) {
1055 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1056 return 0;
1057 }
1058
1059 remain -= sizeof(*header);
1060
1061 header->ctxt_id = info->ctxt_id;
1062 header->cluster_id = cluster->id;
1063
1064 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
1065 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
1066
1067 for (i = 0; i < cluster->num_sets; i++) {
1068 start = cluster->regs[2 * i];
1069 end = cluster->regs[2 * i + 1];
1070
1071 if (remain < (end - start + 3) * 4) {
1072 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1073 goto out;
1074 }
1075
1076 remain -= (end - start + 3) * 4;
1077 data_size += (end - start + 3) * 4;
1078
1079 *data++ = start | (1 << 31);
1080 *data++ = end;
1081 for (j = start; j <= end; j++)
1082 *data++ = *src++;
1083 }
1084
1085out:
1086 return data_size + sizeof(*header);
1087
1088}
1089
1090static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
1091 struct kgsl_snapshot *snapshot)
1092{
1093 int i, j;
1094 struct a6xx_cluster_regs_info info;
1095
1096 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1097 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1098
1099 info.cluster = cluster;
1100 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1101 info.ctxt_id = j;
1102
1103 kgsl_snapshot_add_section(device,
1104 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
1105 a6xx_snapshot_mvc, &info);
1106 }
1107 }
1108}
1109
Lynus Vaz20c81272017-02-10 16:22:12 +05301110/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
1111static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
1112 unsigned int block_id, unsigned int index, unsigned int *val)
1113{
1114 unsigned int reg;
1115
1116 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1117 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1118
1119 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1120 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1121 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1122 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1123
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001124 /*
1125 * There needs to be a delay of 1 us to ensure enough time for correct
1126 * data is funneled into the trace buffer
1127 */
1128 udelay(1);
1129
Lynus Vaz20c81272017-02-10 16:22:12 +05301130 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1131 val++;
1132 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1133}
1134
Lynus Vazdaac540732017-07-27 14:23:35 +05301135/* a6xx_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
Lynus Vaz20c81272017-02-10 16:22:12 +05301136static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1137 u8 *buf, size_t remain, void *priv)
1138{
Lynus Vazecd472c2017-04-18 14:15:57 +05301139 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301140 struct kgsl_snapshot_debugbus *header =
1141 (struct kgsl_snapshot_debugbus *)buf;
1142 struct adreno_debugbus_block *block = priv;
1143 int i;
1144 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1145 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301146 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301147 size_t size;
1148
1149 dwords = block->dwords;
1150
1151 /* For a6xx each debug bus data unit is 2 DWORDS */
1152 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1153
1154 if (remain < size) {
1155 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1156 return 0;
1157 }
1158
1159 header->id = block->block_id;
1160 header->count = dwords * 2;
1161
Lynus Vazecd472c2017-04-18 14:15:57 +05301162 block_id = block->block_id;
1163 /* GMU_GX data is read using the GMU_CX block id on A630 */
1164 if (adreno_is_a630(adreno_dev) &&
1165 (block_id == A6XX_DBGBUS_GMU_GX))
1166 block_id = A6XX_DBGBUS_GMU_CX;
1167
Lynus Vaz20c81272017-02-10 16:22:12 +05301168 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301169 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301170
1171 return size;
1172}
1173
Lynus Vazdaac540732017-07-27 14:23:35 +05301174/* a6xx_snapshot_vbif_debugbus_block() - Capture debug data for VBIF block */
1175static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
1176 u8 *buf, size_t remain, void *priv)
1177{
1178 struct kgsl_snapshot_debugbus *header =
1179 (struct kgsl_snapshot_debugbus *)buf;
1180 struct adreno_debugbus_block *block = priv;
1181 int i, j;
1182 /*
1183 * Total number of VBIF data words considering 3 sections:
1184 * 2 arbiter blocks of 16 words
1185 * 5 AXI XIN blocks of 18 dwords each
1186 * 4 core clock side XIN blocks of 12 dwords each
1187 */
1188 unsigned int dwords = (16 * A6XX_NUM_AXI_ARB_BLOCKS) +
1189 (18 * A6XX_NUM_XIN_AXI_BLOCKS) +
1190 (12 * A6XX_NUM_XIN_CORE_BLOCKS);
1191 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1192 size_t size;
1193 unsigned int reg_clk;
1194
1195 size = (dwords * sizeof(unsigned int)) + sizeof(*header);
1196
1197 if (remain < size) {
1198 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1199 return 0;
1200 }
1201 header->id = block->block_id;
1202 header->count = dwords;
1203
1204 kgsl_regread(device, A6XX_VBIF_CLKON, &reg_clk);
1205 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk |
1206 (A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK <<
1207 A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT));
1208 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 0);
1209 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS_OUT_CTRL,
1210 (A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK <<
1211 A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT));
1212
1213 for (i = 0; i < A6XX_NUM_AXI_ARB_BLOCKS; i++) {
1214 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0,
1215 (1 << (i + 16)));
1216 for (j = 0; j < 16; j++) {
1217 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1218 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1219 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1220 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1221 data);
1222 data++;
1223 }
1224 }
1225
1226 /* XIN blocks AXI side */
1227 for (i = 0; i < A6XX_NUM_XIN_AXI_BLOCKS; i++) {
1228 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
1229 for (j = 0; j < 18; j++) {
1230 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1231 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1232 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1233 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1234 data);
1235 data++;
1236 }
1237 }
1238 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 0);
1239
1240 /* XIN blocks core clock side */
1241 for (i = 0; i < A6XX_NUM_XIN_CORE_BLOCKS; i++) {
1242 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
1243 for (j = 0; j < 12; j++) {
1244 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL1,
1245 ((j & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
1246 << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
1247 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1248 data);
1249 data++;
1250 }
1251 }
1252 /* restore the clock of VBIF */
1253 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk);
1254 return size;
1255}
1256
Lynus Vazff24c972017-03-07 19:27:46 +05301257static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1258{
1259 void __iomem *reg;
1260
1261 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1262 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1263 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1264 return;
1265
1266 reg = a6xx_cx_dbgc +
1267 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1268 *value = __raw_readl(reg);
1269
1270 /*
1271 * ensure this read finishes before the next one.
1272 * i.e. act like normal readl()
1273 */
1274 rmb();
1275}
1276
1277static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1278{
1279 void __iomem *reg;
1280
1281 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1282 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1283 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1284 return;
1285
1286 reg = a6xx_cx_dbgc +
1287 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1288
1289 /*
1290 * ensure previous writes post before this one,
1291 * i.e. act like normal writel()
1292 */
1293 wmb();
1294 __raw_writel(value, reg);
1295}
1296
1297/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1298static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1299 unsigned int block_id, unsigned int index, unsigned int *val)
1300{
1301 unsigned int reg;
1302
1303 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1304 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1305
1306 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1307 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1308 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1309 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1310
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001311 /*
1312 * There needs to be a delay of 1 us to ensure enough time for correct
1313 * data is funneled into the trace buffer
1314 */
1315 udelay(1);
1316
Lynus Vazff24c972017-03-07 19:27:46 +05301317 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1318 val++;
1319 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1320}
1321
1322/*
1323 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1324 * block from the CX DBGC block
1325 */
1326static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1327 u8 *buf, size_t remain, void *priv)
1328{
1329 struct kgsl_snapshot_debugbus *header =
1330 (struct kgsl_snapshot_debugbus *)buf;
1331 struct adreno_debugbus_block *block = priv;
1332 int i;
1333 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1334 unsigned int dwords;
1335 size_t size;
1336
1337 dwords = block->dwords;
1338
1339 /* For a6xx each debug bus data unit is 2 DWRODS */
1340 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1341
1342 if (remain < size) {
1343 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1344 return 0;
1345 }
1346
1347 header->id = block->block_id;
1348 header->count = dwords * 2;
1349
1350 for (i = 0; i < dwords; i++)
1351 a6xx_cx_debug_bus_read(device, block->block_id, i,
1352 &data[i*2]);
1353
1354 return size;
1355}
1356
Lynus Vaz20c81272017-02-10 16:22:12 +05301357/* a6xx_snapshot_debugbus() - Capture debug bus data */
1358static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1359 struct kgsl_snapshot *snapshot)
1360{
1361 int i;
1362
1363 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1364 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001365 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1366 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301367
1368 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1369 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1370
1371 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1372 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1373 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1374 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1375
1376 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1377 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1378 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1379 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1380 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1381 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1382 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1383 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1384 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1385 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1386 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1387 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1388 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1389 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1390 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1391 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1392 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1393 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1394
1395 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1396 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1397 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1398 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1399
Lynus Vazff24c972017-03-07 19:27:46 +05301400 a6xx_cx_dbgc = ioremap(device->reg_phys +
1401 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1402 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1403 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1404
1405 if (a6xx_cx_dbgc) {
1406 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1407 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001408 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1409 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301410
1411 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1412 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1413
1414 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1415 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1416 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1417 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1418
1419 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1420 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1421 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1422 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1423 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1424 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1425 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1426 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1427 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1428 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1429 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1430 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1431 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1432 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1433 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1434 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1435 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1436 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1437
1438 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1439 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1440 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1441 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1442 } else
1443 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1444
Lynus Vaz20c81272017-02-10 16:22:12 +05301445 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1446 kgsl_snapshot_add_section(device,
1447 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1448 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1449 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1450 }
Lynus Vazff24c972017-03-07 19:27:46 +05301451
Lynus Vazdaac540732017-07-27 14:23:35 +05301452 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1453 snapshot, a6xx_snapshot_vbif_debugbus_block,
1454 (void *) &a6xx_vbif_debugbus_blocks);
1455
Lynus Vazff24c972017-03-07 19:27:46 +05301456 if (a6xx_cx_dbgc) {
1457 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1458 kgsl_snapshot_add_section(device,
1459 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1460 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1461 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1462 }
1463 iounmap(a6xx_cx_dbgc);
1464 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301465}
1466
Carter Cooperb88b7082017-09-14 09:03:26 -06001467/*
1468 * a6xx_snapshot_gmu() - A6XX GMU snapshot function
1469 * @adreno_dev: Device being snapshotted
1470 * @snapshot: Pointer to the snapshot instance
1471 *
1472 * This is where all of the A6XX GMU specific bits and pieces are grabbed
1473 * into the snapshot memory
1474 */
1475void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
Kyle Piefer60733aa2017-03-21 11:24:01 -07001476 struct kgsl_snapshot *snapshot)
1477{
Carter Cooperb88b7082017-09-14 09:03:26 -06001478 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
George Shen1d447b02017-07-12 13:40:28 -07001479 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1480
Kyle Piefer60733aa2017-03-21 11:24:01 -07001481 if (!kgsl_gmu_isenabled(device))
1482 return;
1483
Lynus Vazd37f1d82017-05-24 16:39:15 +05301484 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1485 ARRAY_SIZE(a6xx_gmu_registers) / 2);
George Shen1d447b02017-07-12 13:40:28 -07001486
1487 if (gpudev->gx_is_on(adreno_dev))
1488 adreno_snapshot_registers(device, snapshot,
1489 a6xx_gmu_gx_registers,
1490 ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001491}
1492
Lynus Vaz85150052017-02-21 17:57:48 +05301493/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1494static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1495 size_t remain, void *priv)
1496{
1497 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1498 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1499 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1500 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1501
1502 if (remain < DEBUG_SECTION_SZ(1)) {
1503 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1504 return 0;
1505 }
1506
1507 /* Dump the SQE firmware version */
1508 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1509 header->size = 1;
1510 *data = fw->version;
1511
1512 return DEBUG_SECTION_SZ(1);
1513}
1514
Shrenuj Bansal41665402016-12-16 15:25:54 -08001515static void _a6xx_do_crashdump(struct kgsl_device *device)
1516{
1517 unsigned long wait_time;
1518 unsigned int reg = 0;
1519 unsigned int val;
1520
1521 crash_dump_valid = false;
1522
Lynus Vaz0a06efd2017-09-13 20:21:07 +05301523 if (!device->snapshot_crashdumper)
1524 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001525 if (a6xx_capturescript.gpuaddr == 0 ||
1526 a6xx_crashdump_registers.gpuaddr == 0)
1527 return;
1528
1529 /* IF the SMMU is stalled we cannot do a crash dump */
1530 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1531 if (val & BIT(24))
1532 return;
1533
1534 /* Turn on APRIV so we can access the buffers */
1535 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1536
1537 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1538 lower_32_bits(a6xx_capturescript.gpuaddr));
1539 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1540 upper_32_bits(a6xx_capturescript.gpuaddr));
1541 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1542
1543 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1544 while (!time_after(jiffies, wait_time)) {
1545 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1546 if (reg & 0x2)
1547 break;
1548 cpu_relax();
1549 }
1550
1551 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1552
1553 if (!(reg & 0x2)) {
1554 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1555 return;
1556 }
1557
1558 crash_dump_valid = true;
1559}
1560
1561/*
1562 * a6xx_snapshot() - A6XX GPU snapshot function
1563 * @adreno_dev: Device being snapshotted
1564 * @snapshot: Pointer to the snapshot instance
1565 *
1566 * This is where all of the A6XX specific bits and pieces are grabbed
1567 * into the snapshot memory
1568 */
1569void a6xx_snapshot(struct adreno_device *adreno_dev,
1570 struct kgsl_snapshot *snapshot)
1571{
1572 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1573 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1574 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001575 bool sptprac_on;
Lynus Vaz96de8522017-09-13 20:17:03 +05301576 unsigned int i;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001577
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001578 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1579
1580 /* Return if the GX is off */
Carter Cooperb88b7082017-09-14 09:03:26 -06001581 if (!gpudev->gx_is_on(adreno_dev))
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001582 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001583
Lynus Vaz030473e2017-06-22 17:33:06 +05301584 /* Dump the registers which get affected by crash dumper trigger */
1585 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1586 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1587
1588 /* Dump vbif registers as well which get affected by crash dumper */
1589 adreno_snapshot_vbif_registers(device, snapshot,
1590 a6xx_vbif_snapshot_registers,
1591 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1592
Shrenuj Bansal41665402016-12-16 15:25:54 -08001593 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001594 if (sptprac_on)
1595 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001596
Lynus Vaz96de8522017-09-13 20:17:03 +05301597 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1598 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1599 snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]);
1600 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001601
Shrenuj Bansal41665402016-12-16 15:25:54 -08001602 /* CP_SQE indexed registers */
1603 kgsl_snapshot_indexed_registers(device, snapshot,
1604 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1605 0, snap_data->sect_sizes->cp_pfp);
1606
1607 /* CP_DRAW_STATE */
1608 kgsl_snapshot_indexed_registers(device, snapshot,
1609 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1610 0, 0x100);
1611
1612 /* SQE_UCODE Cache */
1613 kgsl_snapshot_indexed_registers(device, snapshot,
1614 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1615 0, 0x6000);
1616
1617 /* CP ROQ */
1618 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1619 snapshot, adreno_snapshot_cp_roq,
1620 &snap_data->sect_sizes->roq);
1621
Lynus Vaz85150052017-02-21 17:57:48 +05301622 /* SQE Firmware */
1623 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1624 snapshot, a6xx_snapshot_sqe, NULL);
1625
Lynus Vaza5922742017-03-14 18:50:54 +05301626 /* Mempool debug data */
1627 a6xx_snapshot_mempool(device, snapshot);
1628
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001629 if (sptprac_on) {
1630 /* Shader memory */
1631 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301632
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001633 /* MVC register section */
1634 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001635
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001636 /* registers dumped through DBG AHB */
1637 a6xx_snapshot_dbgahb_regs(device, snapshot);
1638 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301639
Lynus Vaz20c81272017-02-10 16:22:12 +05301640 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001641
Shrenuj Bansal41665402016-12-16 15:25:54 -08001642}
1643
1644static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1645{
1646 int qwords = 0;
1647 unsigned int i, j, k;
1648 unsigned int count;
1649
1650 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1651 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1652
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001653 if (cluster->sel) {
1654 ptr[qwords++] = cluster->sel->val;
1655 ptr[qwords++] = ((uint64_t)cluster->sel->cd_reg << 44) |
1656 (1 << 21) | 1;
1657 }
1658
Shrenuj Bansal41665402016-12-16 15:25:54 -08001659 cluster->offset0 = *offset;
1660 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1661
1662 if (j == 1)
1663 cluster->offset1 = *offset;
1664
1665 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1666 ptr[qwords++] =
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001667 ((uint64_t)A6XX_CP_APERTURE_CNTL_CD << 44) |
Shrenuj Bansal41665402016-12-16 15:25:54 -08001668 (1 << 21) | 1;
1669
1670 for (k = 0; k < cluster->num_sets; k++) {
1671 count = REG_PAIR_COUNT(cluster->regs, k);
1672 ptr[qwords++] =
1673 a6xx_crashdump_registers.gpuaddr + *offset;
1674 ptr[qwords++] =
1675 (((uint64_t)cluster->regs[2 * k]) << 44) |
1676 count;
1677
1678 *offset += count * sizeof(unsigned int);
1679 }
1680 }
1681 }
1682
1683 return qwords;
1684}
1685
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301686static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1687 uint64_t *ptr, uint64_t *offset)
1688{
1689 int qwords = 0;
1690 unsigned int j;
1691
1692 /* Capture each bank in the block */
1693 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1694 /* Program the aperture */
1695 ptr[qwords++] =
1696 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1697 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1698 (1 << 21) | 1;
1699
1700 /* Read all the data in one chunk */
1701 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1702 ptr[qwords++] =
1703 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1704 block->sz;
1705
1706 /* Remember the offset of the first bank for easy access */
1707 if (j == 0)
1708 block->offset = *offset;
1709
1710 *offset += block->sz * sizeof(unsigned int);
1711 }
1712
1713 return qwords;
1714}
1715
Lynus Vaz1e258612017-04-27 21:35:22 +05301716static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1717{
1718 int qwords = 0;
1719 unsigned int i, j, k;
1720 unsigned int count;
1721
1722 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1723 struct a6xx_cluster_dbgahb_registers *cluster =
1724 &a6xx_dbgahb_ctx_clusters[i];
1725
1726 cluster->offset0 = *offset;
1727
1728 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1729 if (j == 1)
1730 cluster->offset1 = *offset;
1731
1732 /* Program the aperture */
1733 ptr[qwords++] =
1734 ((cluster->statetype + j * 2) & 0xff) << 8;
1735 ptr[qwords++] =
1736 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1737 (1 << 21) | 1;
1738
1739 for (k = 0; k < cluster->num_sets; k++) {
1740 unsigned int start = cluster->regs[2 * k];
1741
1742 count = REG_PAIR_COUNT(cluster->regs, k);
1743 ptr[qwords++] =
1744 a6xx_crashdump_registers.gpuaddr + *offset;
1745 ptr[qwords++] =
1746 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1747 start - cluster->regbase / 4) << 44)) |
1748 count;
1749
1750 *offset += count * sizeof(unsigned int);
1751 }
1752 }
1753 }
1754 return qwords;
1755}
1756
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001757static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1758{
1759 int qwords = 0;
1760 unsigned int i, k;
1761 unsigned int count;
1762
1763 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1764 struct a6xx_non_ctx_dbgahb_registers *regs =
1765 &a6xx_non_ctx_dbgahb[i];
1766
1767 regs->offset = *offset;
1768
1769 /* Program the aperture */
1770 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1771 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1772 (1 << 21) | 1;
1773
1774 for (k = 0; k < regs->num_sets; k++) {
1775 unsigned int start = regs->regs[2 * k];
1776
1777 count = REG_PAIR_COUNT(regs->regs, k);
1778 ptr[qwords++] =
1779 a6xx_crashdump_registers.gpuaddr + *offset;
1780 ptr[qwords++] =
1781 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1782 start - regs->regbase / 4) << 44)) |
1783 count;
1784
1785 *offset += count * sizeof(unsigned int);
1786 }
1787 }
1788 return qwords;
1789}
1790
Shrenuj Bansal41665402016-12-16 15:25:54 -08001791void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1792{
1793 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1794 unsigned int script_size = 0;
1795 unsigned int data_size = 0;
1796 unsigned int i, j, k;
1797 uint64_t *ptr;
1798 uint64_t offset = 0;
1799
1800 if (a6xx_capturescript.gpuaddr != 0 &&
1801 a6xx_crashdump_registers.gpuaddr != 0)
1802 return;
1803
1804 /*
1805 * We need to allocate two buffers:
1806 * 1 - the buffer to hold the draw script
1807 * 2 - the buffer to hold the data
1808 */
1809
1810 /*
1811 * To save the registers, we need 16 bytes per register pair for the
1812 * script and a dword for each register in the data
1813 */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001814 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1815 struct reg_list *regs = &a6xx_reg_list[i];
1816
1817 /* 16 bytes for programming the aperture */
1818 if (regs->sel)
1819 script_size += 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001820
1821 /* Each pair needs 16 bytes (2 qwords) */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001822 script_size += regs->count * 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001823
1824 /* Each register needs a dword in the data */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001825 for (j = 0; j < regs->count; j++)
Shrenuj Bansal41665402016-12-16 15:25:54 -08001826 data_size += REG_PAIR_COUNT(regs->regs, j) *
1827 sizeof(unsigned int);
1828
1829 }
1830
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301831 /*
1832 * To save the shader blocks for each block in each type we need 32
1833 * bytes for the script (16 bytes to program the aperture and 16 to
1834 * read the data) and then a block specific number of bytes to hold
1835 * the data
1836 */
1837 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1838 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1839 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1840 A6XX_NUM_SHADER_BANKS;
1841 }
1842
Shrenuj Bansal41665402016-12-16 15:25:54 -08001843 /* Calculate the script and data size for MVC registers */
1844 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1845 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1846
1847 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1848
1849 /* 16 bytes for programming the aperture */
1850 script_size += 16;
1851
1852 /* Reading each pair of registers takes 16 bytes */
1853 script_size += 16 * cluster->num_sets;
1854
1855 /* A dword per register read from the cluster list */
1856 for (k = 0; k < cluster->num_sets; k++)
1857 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1858 sizeof(unsigned int);
1859 }
1860 }
1861
Lynus Vaz1e258612017-04-27 21:35:22 +05301862 /* Calculate the script and data size for debug AHB registers */
1863 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1864 struct a6xx_cluster_dbgahb_registers *cluster =
1865 &a6xx_dbgahb_ctx_clusters[i];
1866
1867 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1868
1869 /* 16 bytes for programming the aperture */
1870 script_size += 16;
1871
1872 /* Reading each pair of registers takes 16 bytes */
1873 script_size += 16 * cluster->num_sets;
1874
1875 /* A dword per register read from the cluster list */
1876 for (k = 0; k < cluster->num_sets; k++)
1877 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1878 sizeof(unsigned int);
1879 }
1880 }
1881
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001882 /*
1883 * Calculate the script and data size for non context debug
1884 * AHB registers
1885 */
1886 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1887 struct a6xx_non_ctx_dbgahb_registers *regs =
1888 &a6xx_non_ctx_dbgahb[i];
1889
1890 /* 16 bytes for programming the aperture */
1891 script_size += 16;
1892
1893 /* Reading each pair of registers takes 16 bytes */
1894 script_size += 16 * regs->num_sets;
1895
1896 /* A dword per register read from the cluster list */
1897 for (k = 0; k < regs->num_sets; k++)
1898 data_size += REG_PAIR_COUNT(regs->regs, k) *
1899 sizeof(unsigned int);
1900 }
1901
Shrenuj Bansal41665402016-12-16 15:25:54 -08001902 /* Now allocate the script and data buffers */
1903
1904 /* The script buffers needs 2 extra qwords on the end */
1905 if (kgsl_allocate_global(device, &a6xx_capturescript,
1906 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1907 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1908 return;
1909
1910 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1911 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1912 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1913 return;
1914 }
1915
1916 /* Build the crash script */
1917
1918 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1919
1920 /* For the registers, program a read command for each pair */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001921 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1922 struct reg_list *regs = &a6xx_reg_list[i];
Shrenuj Bansal41665402016-12-16 15:25:54 -08001923
Lynus Vaz1bba57b2017-09-26 11:55:04 +05301924 regs->offset = offset;
1925
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001926 /* Program the SEL_CNTL_CD register appropriately */
1927 if (regs->sel) {
1928 *ptr++ = regs->sel->val;
1929 *ptr++ = (((uint64_t)regs->sel->cd_reg << 44)) |
1930 (1 << 21) | 1;
1931 }
1932
1933 for (j = 0; j < regs->count; j++) {
Shrenuj Bansal41665402016-12-16 15:25:54 -08001934 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1935 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1936 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1937 offset += r * sizeof(unsigned int);
1938 }
1939 }
1940
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301941 /* Program each shader block */
1942 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1943 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1944 &offset);
1945 }
1946
Shrenuj Bansal41665402016-12-16 15:25:54 -08001947 /* Program the capturescript for the MVC regsiters */
1948 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1949
Lynus Vaz1e258612017-04-27 21:35:22 +05301950 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1951
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001952 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1953
Shrenuj Bansal41665402016-12-16 15:25:54 -08001954 *ptr++ = 0;
1955 *ptr++ = 0;
1956}