blob: 80c9a614a54f647e383ba22316e04cd2ba5607e2 [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
Lynus Vazdaac540732017-07-27 14:23:35 +053024#define A6XX_NUM_AXI_ARB_BLOCKS 2
25#define A6XX_NUM_XIN_AXI_BLOCKS 5
26#define A6XX_NUM_XIN_CORE_BLOCKS 4
Shrenuj Bansal41665402016-12-16 15:25:54 -080027
28static const unsigned int a6xx_gras_cluster[] = {
29 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
30 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
31 0x8400, 0x840B,
32};
33
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060034static const unsigned int a6xx_ps_cluster_rac[] = {
Shrenuj Bansal41665402016-12-16 15:25:54 -080035 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
36 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060037 0x88C0, 0x88C1, 0x88D0, 0x88E3, 0x8900, 0x890C, 0x890F, 0x891A,
38 0x8C00, 0x8C01, 0x8C08, 0x8C10, 0x8C17, 0x8C1F, 0x8C26, 0x8C33,
39};
40
41static const unsigned int a6xx_ps_cluster_rbp[] = {
42 0x88F0, 0x88F3, 0x890D, 0x890E, 0x8927, 0x8928, 0x8BF0, 0x8BF1,
43 0x8C02, 0x8C07, 0x8C11, 0x8C16, 0x8C20, 0x8C25,
44};
45
46static const unsigned int a6xx_ps_cluster[] = {
47 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
Shrenuj Bansal41665402016-12-16 15:25:54 -080048};
49
50static const unsigned int a6xx_fe_cluster[] = {
51 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
52 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
53};
54
55static const unsigned int a6xx_pc_vs_cluster[] = {
56 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
57};
58
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060059static const struct sel_reg {
60 unsigned int host_reg;
61 unsigned int cd_reg;
62 unsigned int val;
63} _a6xx_rb_rac_aperture = {
64 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
65 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
66 .val = 0x0,
67},
68_a6xx_rb_rbp_aperture = {
69 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
70 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
71 .val = 0x9,
72};
73
Shrenuj Bansal41665402016-12-16 15:25:54 -080074static struct a6xx_cluster_registers {
75 unsigned int id;
76 const unsigned int *regs;
77 unsigned int num_sets;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060078 const struct sel_reg *sel;
Shrenuj Bansal41665402016-12-16 15:25:54 -080079 unsigned int offset0;
80 unsigned int offset1;
81} a6xx_clusters[] = {
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060082 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2,
83 NULL },
84 { CP_CLUSTER_PS, a6xx_ps_cluster_rac, ARRAY_SIZE(a6xx_ps_cluster_rac)/2,
85 &_a6xx_rb_rac_aperture },
86 { CP_CLUSTER_PS, a6xx_ps_cluster_rbp, ARRAY_SIZE(a6xx_ps_cluster_rbp)/2,
87 &_a6xx_rb_rbp_aperture },
88 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2,
89 NULL },
90 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2,
91 NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080092 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060093 ARRAY_SIZE(a6xx_pc_vs_cluster)/2, NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080094};
95
96struct a6xx_cluster_regs_info {
97 struct a6xx_cluster_registers *cluster;
98 unsigned int ctxt_id;
99};
100
Lynus Vaz461e2382017-01-16 19:35:41 +0530101static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
102 0xB800, 0xB803, 0xB820, 0xB822,
103};
104
105static const unsigned int a6xx_sp_vs_sp_cluster[] = {
106 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
107 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
108};
109
110static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
111 0xBB10, 0xBB11, 0xBB20, 0xBB29,
112};
113
114static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
115 0xBD80, 0xBD80,
116};
117
118static const unsigned int a6xx_sp_duplicate_cluster[] = {
119 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
120};
121
122static const unsigned int a6xx_tp_duplicate_cluster[] = {
123 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
124};
125
126static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
127 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
128 0xB9C0, 0xB9C9,
129};
130
131static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
132 0xBD80, 0xBD80,
133};
134
135static const unsigned int a6xx_sp_ps_sp_cluster[] = {
136 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
137 0xAA00, 0xAA00, 0xAA30, 0xAA31,
138};
139
140static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
141 0xACC0, 0xACC0,
142};
143
144static const unsigned int a6xx_sp_ps_tp_cluster[] = {
145 0xB180, 0xB183, 0xB190, 0xB191,
146};
147
148static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
149 0xB4C0, 0xB4D1,
150};
151
152static struct a6xx_cluster_dbgahb_registers {
153 unsigned int id;
154 unsigned int regbase;
155 unsigned int statetype;
156 const unsigned int *regs;
157 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530158 unsigned int offset0;
159 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530160} a6xx_dbgahb_ctx_clusters[] = {
161 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
162 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
163 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
164 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700165 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530166 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
167 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
168 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700169 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530170 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700171 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530172 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700173 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530174 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700175 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530176 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700177 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530178 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700179 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530180 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700181 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530182 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700183 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530184 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700185 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530186 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700187 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530188 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700189 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530190 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
191};
192
193struct a6xx_cluster_dbgahb_regs_info {
194 struct a6xx_cluster_dbgahb_registers *cluster;
195 unsigned int ctxt_id;
196};
197
198static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
199 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
200 0xBE20, 0xBE23,
201};
202
203static const unsigned int a6xx_sp_non_ctx_registers[] = {
204 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
205 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
206};
207
208static const unsigned int a6xx_tp_non_ctx_registers[] = {
209 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
210};
211
212static struct a6xx_non_ctx_dbgahb_registers {
213 unsigned int regbase;
214 unsigned int statetype;
215 const unsigned int *regs;
216 unsigned int num_sets;
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600217 unsigned int offset;
Lynus Vaz461e2382017-01-16 19:35:41 +0530218} a6xx_non_ctx_dbgahb[] = {
219 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
220 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
221 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
222 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
223 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
224 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
225};
226
Shrenuj Bansal41665402016-12-16 15:25:54 -0800227static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
228 /* VBIF */
229 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
230 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
231 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
232 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
233 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
234 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
235 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
236 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
237 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
238 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
239 0x3410, 0x3410, 0x3800, 0x3801,
240};
241
George Shen1d447b02017-07-12 13:40:28 -0700242static const unsigned int a6xx_gmu_gx_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700243 /* GMU GX */
244 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
245 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
246 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
247 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
248 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
George Shen1d447b02017-07-12 13:40:28 -0700249};
250
251static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700252 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700253 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700254 /* GMU CX */
255 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
256 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
257 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
258 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
259 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
260 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
261 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
262 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
Lokesh Batrac367dc92017-08-24 13:40:32 -0700263 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA01,
Kyle Pieferbce21702017-06-08 09:21:28 -0700264 /* GPU RSCC */
George Shen6927d8f2017-07-19 11:38:10 -0700265 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747,
266 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
267 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
Kyle Pieferbce21702017-06-08 09:21:28 -0700268 /* GMU AO */
269 0x23B00, 0x23B16, 0x23C00, 0x23C00,
270 /* GPU CC */
271 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
272 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
273 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
274 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
275 0x26000, 0x26002,
276 /* GPU CC ACD */
277 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700278};
279
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600280static const unsigned int a6xx_rb_rac_registers[] = {
281 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E10, 0x8E1C, 0x8E20, 0x8E25,
282 0x8E28, 0x8E28, 0x8E2C, 0x8E2F, 0x8E50, 0x8E52,
283};
284
285static const unsigned int a6xx_rb_rbp_registers[] = {
286 0x8E01, 0x8E01, 0x8E0C, 0x8E0C, 0x8E3B, 0x8E3E, 0x8E40, 0x8E43,
287 0x8E53, 0x8E5F, 0x8E70, 0x8E77,
288};
289
Shrenuj Bansal41665402016-12-16 15:25:54 -0800290static const struct adreno_vbif_snapshot_registers
291a6xx_vbif_snapshot_registers[] = {
292 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
293 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
294};
295
296/*
297 * Set of registers to dump for A6XX on snapshot.
298 * Registers in pairs - first value is the start offset, second
299 * is the stop offset (inclusive)
300 */
301
302static const unsigned int a6xx_registers[] = {
303 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530304 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
305 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
306 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530307 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
308 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
309 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800310 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530311 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
312 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
313 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
314 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
315 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
316 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
317 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800318 /* VSC */
319 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
320 /* UCHE */
321 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
322 0x0E38, 0x0E39,
323 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530324 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
325 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800326 /* VPC */
327 0x9600, 0x9604, 0x9624, 0x9637,
328 /* PC */
329 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
330 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
331 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
332 /* VFD */
333 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530334 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800335};
336
Lynus Vaz030473e2017-06-22 17:33:06 +0530337/*
338 * Set of registers to dump for A6XX before actually triggering crash dumper.
339 * Registers in pairs - first value is the start offset, second
340 * is the stop offset (inclusive)
341 */
342static const unsigned int a6xx_pre_crashdumper_registers[] = {
343 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
344 0x210, 0x213,
345 /* CP: CP_STATUS_1 */
346 0x825, 0x825,
347};
348
Lynus Vaz20c81272017-02-10 16:22:12 +0530349enum a6xx_debugbus_id {
350 A6XX_DBGBUS_CP = 0x1,
351 A6XX_DBGBUS_RBBM = 0x2,
352 A6XX_DBGBUS_VBIF = 0x3,
353 A6XX_DBGBUS_HLSQ = 0x4,
354 A6XX_DBGBUS_UCHE = 0x5,
355 A6XX_DBGBUS_DPM = 0x6,
356 A6XX_DBGBUS_TESS = 0x7,
357 A6XX_DBGBUS_PC = 0x8,
358 A6XX_DBGBUS_VFDP = 0x9,
359 A6XX_DBGBUS_VPC = 0xa,
360 A6XX_DBGBUS_TSE = 0xb,
361 A6XX_DBGBUS_RAS = 0xc,
362 A6XX_DBGBUS_VSC = 0xd,
363 A6XX_DBGBUS_COM = 0xe,
364 A6XX_DBGBUS_LRZ = 0x10,
365 A6XX_DBGBUS_A2D = 0x11,
366 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530367 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530368 A6XX_DBGBUS_RBP = 0x14,
369 A6XX_DBGBUS_DCS = 0x15,
370 A6XX_DBGBUS_RBBM_CFG = 0x16,
371 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530372 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530373 A6XX_DBGBUS_TPFCHE = 0x19,
374 A6XX_DBGBUS_GPC = 0x1d,
375 A6XX_DBGBUS_LARC = 0x1e,
376 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
377 A6XX_DBGBUS_RB_0 = 0x20,
378 A6XX_DBGBUS_RB_1 = 0x21,
379 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
380 A6XX_DBGBUS_CCU_0 = 0x28,
381 A6XX_DBGBUS_CCU_1 = 0x29,
382 A6XX_DBGBUS_VFD_0 = 0x38,
383 A6XX_DBGBUS_VFD_1 = 0x39,
384 A6XX_DBGBUS_VFD_2 = 0x3a,
385 A6XX_DBGBUS_VFD_3 = 0x3b,
386 A6XX_DBGBUS_SP_0 = 0x40,
387 A6XX_DBGBUS_SP_1 = 0x41,
388 A6XX_DBGBUS_TPL1_0 = 0x48,
389 A6XX_DBGBUS_TPL1_1 = 0x49,
390 A6XX_DBGBUS_TPL1_2 = 0x4a,
391 A6XX_DBGBUS_TPL1_3 = 0x4b,
392};
393
394static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
395 { A6XX_DBGBUS_CP, 0x100, },
396 { A6XX_DBGBUS_RBBM, 0x100, },
397 { A6XX_DBGBUS_HLSQ, 0x100, },
398 { A6XX_DBGBUS_UCHE, 0x100, },
399 { A6XX_DBGBUS_DPM, 0x100, },
400 { A6XX_DBGBUS_TESS, 0x100, },
401 { A6XX_DBGBUS_PC, 0x100, },
402 { A6XX_DBGBUS_VFDP, 0x100, },
403 { A6XX_DBGBUS_VPC, 0x100, },
404 { A6XX_DBGBUS_TSE, 0x100, },
405 { A6XX_DBGBUS_RAS, 0x100, },
406 { A6XX_DBGBUS_VSC, 0x100, },
407 { A6XX_DBGBUS_COM, 0x100, },
408 { A6XX_DBGBUS_LRZ, 0x100, },
409 { A6XX_DBGBUS_A2D, 0x100, },
410 { A6XX_DBGBUS_CCUFCHE, 0x100, },
411 { A6XX_DBGBUS_RBP, 0x100, },
412 { A6XX_DBGBUS_DCS, 0x100, },
413 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530414 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530415 { A6XX_DBGBUS_TPFCHE, 0x100, },
416 { A6XX_DBGBUS_GPC, 0x100, },
417 { A6XX_DBGBUS_LARC, 0x100, },
418 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
419 { A6XX_DBGBUS_RB_0, 0x100, },
420 { A6XX_DBGBUS_RB_1, 0x100, },
421 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
422 { A6XX_DBGBUS_CCU_0, 0x100, },
423 { A6XX_DBGBUS_CCU_1, 0x100, },
424 { A6XX_DBGBUS_VFD_0, 0x100, },
425 { A6XX_DBGBUS_VFD_1, 0x100, },
426 { A6XX_DBGBUS_VFD_2, 0x100, },
427 { A6XX_DBGBUS_VFD_3, 0x100, },
428 { A6XX_DBGBUS_SP_0, 0x100, },
429 { A6XX_DBGBUS_SP_1, 0x100, },
430 { A6XX_DBGBUS_TPL1_0, 0x100, },
431 { A6XX_DBGBUS_TPL1_1, 0x100, },
432 { A6XX_DBGBUS_TPL1_2, 0x100, },
433 { A6XX_DBGBUS_TPL1_3, 0x100, },
434};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800435
Lynus Vazdaac540732017-07-27 14:23:35 +0530436static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
437 A6XX_DBGBUS_VBIF, 0x100,
438};
439
Lynus Vazff24c972017-03-07 19:27:46 +0530440static void __iomem *a6xx_cx_dbgc;
441static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
Lynus Vazecd472c2017-04-18 14:15:57 +0530442 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530443 { A6XX_DBGBUS_CX, 0x100, },
444};
445
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530446#define A6XX_NUM_SHADER_BANKS 3
447#define A6XX_SHADER_STATETYPE_SHIFT 8
448
449enum a6xx_shader_obj {
450 A6XX_TP0_TMO_DATA = 0x9,
451 A6XX_TP0_SMO_DATA = 0xa,
452 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
453 A6XX_TP1_TMO_DATA = 0x19,
454 A6XX_TP1_SMO_DATA = 0x1a,
455 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
456 A6XX_SP_INST_DATA = 0x29,
457 A6XX_SP_LB_0_DATA = 0x2a,
458 A6XX_SP_LB_1_DATA = 0x2b,
459 A6XX_SP_LB_2_DATA = 0x2c,
460 A6XX_SP_LB_3_DATA = 0x2d,
461 A6XX_SP_LB_4_DATA = 0x2e,
462 A6XX_SP_LB_5_DATA = 0x2f,
463 A6XX_SP_CB_BINDLESS_DATA = 0x30,
464 A6XX_SP_CB_LEGACY_DATA = 0x31,
465 A6XX_SP_UAV_DATA = 0x32,
466 A6XX_SP_INST_TAG = 0x33,
467 A6XX_SP_CB_BINDLESS_TAG = 0x34,
468 A6XX_SP_TMO_UMO_TAG = 0x35,
469 A6XX_SP_SMO_TAG = 0x36,
470 A6XX_SP_STATE_DATA = 0x37,
471 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
472 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
473 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
474 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
475 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
476 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
477 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
478 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
479 A6XX_HLSQ_INST_RAM = 0x52,
480 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
481 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
482 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
483 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
484 A6XX_HLSQ_INST_RAM_TAG = 0x57,
485 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
486 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
487 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
488 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
489 A6XX_HLSQ_DATAPATH_META = 0x60,
490 A6XX_HLSQ_FRONTEND_META = 0x61,
491 A6XX_HLSQ_INDIRECT_META = 0x62,
492 A6XX_HLSQ_BACKEND_META = 0x63
493};
494
495struct a6xx_shader_block {
496 unsigned int statetype;
497 unsigned int sz;
498 uint64_t offset;
499};
500
501struct a6xx_shader_block_info {
502 struct a6xx_shader_block *block;
503 unsigned int bank;
504 uint64_t offset;
505};
506
507static struct a6xx_shader_block a6xx_shader_blocks[] = {
508 {A6XX_TP0_TMO_DATA, 0x200},
509 {A6XX_TP0_SMO_DATA, 0x80,},
510 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
511 {A6XX_TP1_TMO_DATA, 0x200},
512 {A6XX_TP1_SMO_DATA, 0x80,},
513 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
514 {A6XX_SP_INST_DATA, 0x800},
515 {A6XX_SP_LB_0_DATA, 0x800},
516 {A6XX_SP_LB_1_DATA, 0x800},
517 {A6XX_SP_LB_2_DATA, 0x800},
518 {A6XX_SP_LB_3_DATA, 0x800},
519 {A6XX_SP_LB_4_DATA, 0x800},
520 {A6XX_SP_LB_5_DATA, 0x200},
521 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
522 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
523 {A6XX_SP_UAV_DATA, 0x80,},
524 {A6XX_SP_INST_TAG, 0x80,},
525 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
526 {A6XX_SP_TMO_UMO_TAG, 0x80,},
527 {A6XX_SP_SMO_TAG, 0x80},
528 {A6XX_SP_STATE_DATA, 0x3F},
529 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
530 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
531 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
532 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
533 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
534 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
535 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
536 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
537 {A6XX_HLSQ_INST_RAM, 0x800},
538 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
539 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
540 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
541 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
542 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
543 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
544 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
545 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
546 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
547 {A6XX_HLSQ_DATAPATH_META, 0x40,},
548 {A6XX_HLSQ_FRONTEND_META, 0x40},
549 {A6XX_HLSQ_INDIRECT_META, 0x40,}
550};
551
Shrenuj Bansal41665402016-12-16 15:25:54 -0800552static struct kgsl_memdesc a6xx_capturescript;
553static struct kgsl_memdesc a6xx_crashdump_registers;
554static bool crash_dump_valid;
555
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600556static struct reg_list {
Shrenuj Bansal41665402016-12-16 15:25:54 -0800557 const unsigned int *regs;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600558 unsigned int count;
559 const struct sel_reg *sel;
560} a6xx_reg_list[] = {
561 { a6xx_registers, ARRAY_SIZE(a6xx_registers) / 2, NULL },
562 { a6xx_rb_rac_registers, ARRAY_SIZE(a6xx_rb_rac_registers) / 2,
563 &_a6xx_rb_rac_aperture },
564 { a6xx_rb_rbp_registers, ARRAY_SIZE(a6xx_rb_rbp_registers) / 2,
565 &_a6xx_rb_rbp_aperture },
Shrenuj Bansal41665402016-12-16 15:25:54 -0800566};
567
568#define REG_PAIR_COUNT(_a, _i) \
569 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
570
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600571static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
572 u8 *buf, size_t remain)
573{
574 unsigned int i;
575 size_t used = 0;
576
577 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
578 struct reg_list *regs = &a6xx_reg_list[i];
579 struct kgsl_snapshot_registers snapshot_regs = {
580 .regs = regs->regs,
581 .count = regs->count,
582 };
583
584 if (regs->sel)
585 kgsl_regwrite(device, regs->sel->host_reg,
586 regs->sel->val);
587 used += kgsl_snapshot_dump_registers(device, buf + used,
588 remain - used, &snapshot_regs);
589 }
590 return used;
591}
592
Shrenuj Bansal41665402016-12-16 15:25:54 -0800593static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
594 size_t remain, void *priv)
595{
596 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
597 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
598 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
599 unsigned int i, j, k;
600 unsigned int count = 0;
601
602 if (crash_dump_valid == false)
603 return a6xx_legacy_snapshot_registers(device, buf, remain);
604
605 if (remain < sizeof(*header)) {
606 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
607 return 0;
608 }
609
610 remain -= sizeof(*header);
611
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600612 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
613 struct reg_list *regs = &a6xx_reg_list[i];
Shrenuj Bansal41665402016-12-16 15:25:54 -0800614
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600615 for (j = 0; j < regs->count; j++) {
Shrenuj Bansal41665402016-12-16 15:25:54 -0800616 unsigned int start = regs->regs[2 * j];
617 unsigned int end = regs->regs[(2 * j) + 1];
618
619 if (remain < ((end - start) + 1) * 8) {
620 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
621 goto out;
622 }
623
624 remain -= ((end - start) + 1) * 8;
625
626 for (k = start; k <= end; k++, count++) {
627 *data++ = k;
628 *data++ = *src++;
629 }
630 }
631 }
632
633out:
634 header->count = count;
635
636 /* Return the size of the section */
637 return (count * 8) + sizeof(*header);
638}
639
Lynus Vaz030473e2017-06-22 17:33:06 +0530640static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
641 u8 *buf, size_t remain, void *priv)
642{
643 struct kgsl_snapshot_registers pre_cdregs = {
644 .regs = a6xx_pre_crashdumper_registers,
645 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
646 };
647
648 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
649}
650
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530651static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
652 u8 *buf, size_t remain, void *priv)
653{
654 struct kgsl_snapshot_shader *header =
655 (struct kgsl_snapshot_shader *) buf;
656 struct a6xx_shader_block_info *info =
657 (struct a6xx_shader_block_info *) priv;
658 struct a6xx_shader_block *block = info->block;
659 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
660
661 if (remain < SHADER_SECTION_SZ(block->sz)) {
662 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
663 return 0;
664 }
665
666 header->type = block->statetype;
667 header->index = info->bank;
668 header->size = block->sz;
669
670 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
671 block->sz);
672
673 return SHADER_SECTION_SZ(block->sz);
674}
675
676static void a6xx_snapshot_shader(struct kgsl_device *device,
677 struct kgsl_snapshot *snapshot)
678{
679 unsigned int i, j;
680 struct a6xx_shader_block_info info;
681
682 /* Shader blocks can only be read by the crash dumper */
683 if (crash_dump_valid == false)
684 return;
685
686 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
687 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
688 info.block = &a6xx_shader_blocks[i];
689 info.bank = j;
690 info.offset = a6xx_shader_blocks[i].offset +
691 (j * a6xx_shader_blocks[i].sz);
692
693 /* Shader working/shadow memory */
694 kgsl_snapshot_add_section(device,
695 KGSL_SNAPSHOT_SECTION_SHADER,
696 snapshot, a6xx_snapshot_shader_memory, &info);
697 }
698 }
699}
700
Lynus Vaza5922742017-03-14 18:50:54 +0530701static void a6xx_snapshot_mempool(struct kgsl_device *device,
702 struct kgsl_snapshot *snapshot)
703{
704 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530705 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530706
Lynus Vazb8e43d52017-04-20 14:47:37 +0530707 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530708 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
709 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
710
711 kgsl_snapshot_indexed_registers(device, snapshot,
712 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
713 0, 0x2060);
714
Lynus Vazb8e43d52017-04-20 14:47:37 +0530715 /*
716 * Data at offset 0x2000 in the mempool section is the mempool size.
717 * Since we set it to 0, patch in the original size so that the data
718 * is consistent.
719 */
720 if (buf < snapshot->ptr) {
721 unsigned int *data;
722
723 /* Skip over the headers */
724 buf += sizeof(struct kgsl_snapshot_section_header) +
725 sizeof(struct kgsl_snapshot_indexed_regs);
726
727 data = (unsigned int *)buf + 0x2000;
728 *data = pool_size;
729 }
730
Lynus Vaza5922742017-03-14 18:50:54 +0530731 /* Restore the saved mempool size */
732 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
733}
734
Lynus Vaz461e2382017-01-16 19:35:41 +0530735static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
736 unsigned int regbase, unsigned int reg)
737{
738 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
739 reg - regbase / 4;
740 unsigned int val;
741
742 kgsl_regread(device, read_reg, &val);
743 return val;
744}
745
Lynus Vaz1e258612017-04-27 21:35:22 +0530746static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
747 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530748{
749 struct kgsl_snapshot_mvc_regs *header =
750 (struct kgsl_snapshot_mvc_regs *)buf;
751 struct a6xx_cluster_dbgahb_regs_info *info =
752 (struct a6xx_cluster_dbgahb_regs_info *)priv;
753 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
754 unsigned int read_sel;
755 unsigned int data_size = 0;
756 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
757 int i, j;
758
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600759 if (!device->snapshot_legacy)
760 return 0;
761
Lynus Vaz461e2382017-01-16 19:35:41 +0530762 if (remain < sizeof(*header)) {
763 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
764 return 0;
765 }
766
767 remain -= sizeof(*header);
768
769 header->ctxt_id = info->ctxt_id;
770 header->cluster_id = cur_cluster->id;
771
772 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
773 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
774
775 for (i = 0; i < cur_cluster->num_sets; i++) {
776 unsigned int start = cur_cluster->regs[2 * i];
777 unsigned int end = cur_cluster->regs[2 * i + 1];
778
779 if (remain < (end - start + 3) * 4) {
780 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
781 goto out;
782 }
783
784 remain -= (end - start + 3) * 4;
785 data_size += (end - start + 3) * 4;
786
787 *data++ = start | (1 << 31);
788 *data++ = end;
789
790 for (j = start; j <= end; j++) {
791 unsigned int val;
792
793 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
794 *data++ = val;
795
796 }
797 }
798
799out:
800 return data_size + sizeof(*header);
801}
802
Lynus Vaz1e258612017-04-27 21:35:22 +0530803static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
804 size_t remain, void *priv)
805{
806 struct kgsl_snapshot_mvc_regs *header =
807 (struct kgsl_snapshot_mvc_regs *)buf;
808 struct a6xx_cluster_dbgahb_regs_info *info =
809 (struct a6xx_cluster_dbgahb_regs_info *)priv;
810 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
811 unsigned int data_size = 0;
812 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
813 int i, j;
814 unsigned int *src;
815
816
817 if (crash_dump_valid == false)
818 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
819 info);
820
821 if (remain < sizeof(*header)) {
822 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
823 return 0;
824 }
825
826 remain -= sizeof(*header);
827
828 header->ctxt_id = info->ctxt_id;
829 header->cluster_id = cluster->id;
830
831 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
832 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
833
834 for (i = 0; i < cluster->num_sets; i++) {
835 unsigned int start;
836 unsigned int end;
837
838 start = cluster->regs[2 * i];
839 end = cluster->regs[2 * i + 1];
840
841 if (remain < (end - start + 3) * 4) {
842 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
843 goto out;
844 }
845
846 remain -= (end - start + 3) * 4;
847 data_size += (end - start + 3) * 4;
848
849 *data++ = start | (1 << 31);
850 *data++ = end;
851 for (j = start; j <= end; j++)
852 *data++ = *src++;
853 }
854out:
855 return data_size + sizeof(*header);
856}
857
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600858static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
859 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530860{
861 struct kgsl_snapshot_regs *header =
862 (struct kgsl_snapshot_regs *)buf;
863 struct a6xx_non_ctx_dbgahb_registers *regs =
864 (struct a6xx_non_ctx_dbgahb_registers *)priv;
865 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
866 int count = 0;
867 unsigned int read_sel;
868 int i, j;
869
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600870 if (!device->snapshot_legacy)
871 return 0;
872
Lynus Vaz461e2382017-01-16 19:35:41 +0530873 /* Figure out how many registers we are going to dump */
874 for (i = 0; i < regs->num_sets; i++) {
875 int start = regs->regs[i * 2];
876 int end = regs->regs[i * 2 + 1];
877
878 count += (end - start + 1);
879 }
880
881 if (remain < (count * 8) + sizeof(*header)) {
882 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
883 return 0;
884 }
885
886 header->count = count;
887
888 read_sel = (regs->statetype & 0xff) << 8;
889 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
890
891 for (i = 0; i < regs->num_sets; i++) {
892 unsigned int start = regs->regs[2 * i];
893 unsigned int end = regs->regs[2 * i + 1];
894
895 for (j = start; j <= end; j++) {
896 unsigned int val;
897
898 val = a6xx_read_dbgahb(device, regs->regbase, j);
899 *data++ = j;
900 *data++ = val;
901
902 }
903 }
904 return (count * 8) + sizeof(*header);
905}
906
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600907static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
908 size_t remain, void *priv)
909{
910 struct kgsl_snapshot_regs *header =
911 (struct kgsl_snapshot_regs *)buf;
912 struct a6xx_non_ctx_dbgahb_registers *regs =
913 (struct a6xx_non_ctx_dbgahb_registers *)priv;
914 unsigned int count = 0;
915 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
916 unsigned int i, k;
917 unsigned int *src;
918
919 if (crash_dump_valid == false)
920 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
921 regs);
922
923 if (remain < sizeof(*header)) {
924 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
925 return 0;
926 }
927
928 remain -= sizeof(*header);
929
930 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
931
932 for (i = 0; i < regs->num_sets; i++) {
933 unsigned int start;
934 unsigned int end;
935
936 start = regs->regs[2 * i];
937 end = regs->regs[(2 * i) + 1];
938
939 if (remain < (end - start + 1) * 8) {
940 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
941 goto out;
942 }
943
944 remain -= ((end - start) + 1) * 8;
945
946 for (k = start; k <= end; k++, count++) {
947 *data++ = k;
948 *data++ = *src++;
949 }
950 }
951out:
952 header->count = count;
953
954 /* Return the size of the section */
955 return (count * 8) + sizeof(*header);
956}
957
Lynus Vaz461e2382017-01-16 19:35:41 +0530958static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
959 struct kgsl_snapshot *snapshot)
960{
961 int i, j;
962
963 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
964 struct a6xx_cluster_dbgahb_registers *cluster =
965 &a6xx_dbgahb_ctx_clusters[i];
966 struct a6xx_cluster_dbgahb_regs_info info;
967
968 info.cluster = cluster;
969 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
970 info.ctxt_id = j;
971
972 kgsl_snapshot_add_section(device,
973 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
974 a6xx_snapshot_cluster_dbgahb, &info);
975 }
976 }
977
978 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
979 kgsl_snapshot_add_section(device,
980 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
981 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
982 }
983}
984
Shrenuj Bansal41665402016-12-16 15:25:54 -0800985static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
986 size_t remain, void *priv)
987{
988 struct kgsl_snapshot_mvc_regs *header =
989 (struct kgsl_snapshot_mvc_regs *)buf;
990 struct a6xx_cluster_regs_info *info =
991 (struct a6xx_cluster_regs_info *)priv;
992 struct a6xx_cluster_registers *cur_cluster = info->cluster;
993 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
994 unsigned int ctxt = info->ctxt_id;
995 unsigned int start, end, i, j, aperture_cntl = 0;
996 unsigned int data_size = 0;
997
998 if (remain < sizeof(*header)) {
999 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1000 return 0;
1001 }
1002
1003 remain -= sizeof(*header);
1004
1005 header->ctxt_id = info->ctxt_id;
1006 header->cluster_id = cur_cluster->id;
1007
1008 /*
1009 * Set the AHB control for the Host to read from the
1010 * cluster/context for this iteration.
1011 */
1012 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
1013 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
1014
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001015 if (cur_cluster->sel)
1016 kgsl_regwrite(device, cur_cluster->sel->host_reg,
1017 cur_cluster->sel->val);
1018
Shrenuj Bansal41665402016-12-16 15:25:54 -08001019 for (i = 0; i < cur_cluster->num_sets; i++) {
1020 start = cur_cluster->regs[2 * i];
1021 end = cur_cluster->regs[2 * i + 1];
1022
1023 if (remain < (end - start + 3) * 4) {
1024 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1025 goto out;
1026 }
1027
1028 remain -= (end - start + 3) * 4;
1029 data_size += (end - start + 3) * 4;
1030
1031 *data++ = start | (1 << 31);
1032 *data++ = end;
1033 for (j = start; j <= end; j++) {
1034 unsigned int val;
1035
1036 kgsl_regread(device, j, &val);
1037 *data++ = val;
1038 }
1039 }
1040out:
1041 return data_size + sizeof(*header);
1042}
1043
1044static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
1045 size_t remain, void *priv)
1046{
1047 struct kgsl_snapshot_mvc_regs *header =
1048 (struct kgsl_snapshot_mvc_regs *)buf;
1049 struct a6xx_cluster_regs_info *info =
1050 (struct a6xx_cluster_regs_info *)priv;
1051 struct a6xx_cluster_registers *cluster = info->cluster;
1052 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1053 unsigned int *src;
1054 int i, j;
1055 unsigned int start, end;
1056 size_t data_size = 0;
1057
1058 if (crash_dump_valid == false)
1059 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
1060
1061 if (remain < sizeof(*header)) {
1062 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1063 return 0;
1064 }
1065
1066 remain -= sizeof(*header);
1067
1068 header->ctxt_id = info->ctxt_id;
1069 header->cluster_id = cluster->id;
1070
1071 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
1072 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
1073
1074 for (i = 0; i < cluster->num_sets; i++) {
1075 start = cluster->regs[2 * i];
1076 end = cluster->regs[2 * i + 1];
1077
1078 if (remain < (end - start + 3) * 4) {
1079 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1080 goto out;
1081 }
1082
1083 remain -= (end - start + 3) * 4;
1084 data_size += (end - start + 3) * 4;
1085
1086 *data++ = start | (1 << 31);
1087 *data++ = end;
1088 for (j = start; j <= end; j++)
1089 *data++ = *src++;
1090 }
1091
1092out:
1093 return data_size + sizeof(*header);
1094
1095}
1096
1097static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
1098 struct kgsl_snapshot *snapshot)
1099{
1100 int i, j;
1101 struct a6xx_cluster_regs_info info;
1102
1103 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1104 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1105
1106 info.cluster = cluster;
1107 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1108 info.ctxt_id = j;
1109
1110 kgsl_snapshot_add_section(device,
1111 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
1112 a6xx_snapshot_mvc, &info);
1113 }
1114 }
1115}
1116
Lynus Vaz20c81272017-02-10 16:22:12 +05301117/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
1118static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
1119 unsigned int block_id, unsigned int index, unsigned int *val)
1120{
1121 unsigned int reg;
1122
1123 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1124 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1125
1126 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1127 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1128 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1129 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1130
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001131 /*
1132 * There needs to be a delay of 1 us to ensure enough time for correct
1133 * data is funneled into the trace buffer
1134 */
1135 udelay(1);
1136
Lynus Vaz20c81272017-02-10 16:22:12 +05301137 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1138 val++;
1139 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1140}
1141
Lynus Vazdaac540732017-07-27 14:23:35 +05301142/* a6xx_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
Lynus Vaz20c81272017-02-10 16:22:12 +05301143static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1144 u8 *buf, size_t remain, void *priv)
1145{
Lynus Vazecd472c2017-04-18 14:15:57 +05301146 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301147 struct kgsl_snapshot_debugbus *header =
1148 (struct kgsl_snapshot_debugbus *)buf;
1149 struct adreno_debugbus_block *block = priv;
1150 int i;
1151 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1152 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301153 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301154 size_t size;
1155
1156 dwords = block->dwords;
1157
1158 /* For a6xx each debug bus data unit is 2 DWORDS */
1159 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1160
1161 if (remain < size) {
1162 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1163 return 0;
1164 }
1165
1166 header->id = block->block_id;
1167 header->count = dwords * 2;
1168
Lynus Vazecd472c2017-04-18 14:15:57 +05301169 block_id = block->block_id;
1170 /* GMU_GX data is read using the GMU_CX block id on A630 */
1171 if (adreno_is_a630(adreno_dev) &&
1172 (block_id == A6XX_DBGBUS_GMU_GX))
1173 block_id = A6XX_DBGBUS_GMU_CX;
1174
Lynus Vaz20c81272017-02-10 16:22:12 +05301175 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301176 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301177
1178 return size;
1179}
1180
Lynus Vazdaac540732017-07-27 14:23:35 +05301181/* a6xx_snapshot_vbif_debugbus_block() - Capture debug data for VBIF block */
1182static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
1183 u8 *buf, size_t remain, void *priv)
1184{
1185 struct kgsl_snapshot_debugbus *header =
1186 (struct kgsl_snapshot_debugbus *)buf;
1187 struct adreno_debugbus_block *block = priv;
1188 int i, j;
1189 /*
1190 * Total number of VBIF data words considering 3 sections:
1191 * 2 arbiter blocks of 16 words
1192 * 5 AXI XIN blocks of 18 dwords each
1193 * 4 core clock side XIN blocks of 12 dwords each
1194 */
1195 unsigned int dwords = (16 * A6XX_NUM_AXI_ARB_BLOCKS) +
1196 (18 * A6XX_NUM_XIN_AXI_BLOCKS) +
1197 (12 * A6XX_NUM_XIN_CORE_BLOCKS);
1198 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1199 size_t size;
1200 unsigned int reg_clk;
1201
1202 size = (dwords * sizeof(unsigned int)) + sizeof(*header);
1203
1204 if (remain < size) {
1205 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1206 return 0;
1207 }
1208 header->id = block->block_id;
1209 header->count = dwords;
1210
1211 kgsl_regread(device, A6XX_VBIF_CLKON, &reg_clk);
1212 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk |
1213 (A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK <<
1214 A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT));
1215 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 0);
1216 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS_OUT_CTRL,
1217 (A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK <<
1218 A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT));
1219
1220 for (i = 0; i < A6XX_NUM_AXI_ARB_BLOCKS; i++) {
1221 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0,
1222 (1 << (i + 16)));
1223 for (j = 0; j < 16; j++) {
1224 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1225 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1226 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1227 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1228 data);
1229 data++;
1230 }
1231 }
1232
1233 /* XIN blocks AXI side */
1234 for (i = 0; i < A6XX_NUM_XIN_AXI_BLOCKS; i++) {
1235 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
1236 for (j = 0; j < 18; j++) {
1237 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1238 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1239 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1240 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1241 data);
1242 data++;
1243 }
1244 }
1245 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 0);
1246
1247 /* XIN blocks core clock side */
1248 for (i = 0; i < A6XX_NUM_XIN_CORE_BLOCKS; i++) {
1249 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
1250 for (j = 0; j < 12; j++) {
1251 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL1,
1252 ((j & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
1253 << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
1254 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1255 data);
1256 data++;
1257 }
1258 }
1259 /* restore the clock of VBIF */
1260 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk);
1261 return size;
1262}
1263
Lynus Vazff24c972017-03-07 19:27:46 +05301264static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1265{
1266 void __iomem *reg;
1267
1268 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1269 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1270 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1271 return;
1272
1273 reg = a6xx_cx_dbgc +
1274 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1275 *value = __raw_readl(reg);
1276
1277 /*
1278 * ensure this read finishes before the next one.
1279 * i.e. act like normal readl()
1280 */
1281 rmb();
1282}
1283
1284static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1285{
1286 void __iomem *reg;
1287
1288 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1289 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1290 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1291 return;
1292
1293 reg = a6xx_cx_dbgc +
1294 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1295
1296 /*
1297 * ensure previous writes post before this one,
1298 * i.e. act like normal writel()
1299 */
1300 wmb();
1301 __raw_writel(value, reg);
1302}
1303
1304/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1305static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1306 unsigned int block_id, unsigned int index, unsigned int *val)
1307{
1308 unsigned int reg;
1309
1310 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1311 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1312
1313 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1314 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1315 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1316 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1317
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001318 /*
1319 * There needs to be a delay of 1 us to ensure enough time for correct
1320 * data is funneled into the trace buffer
1321 */
1322 udelay(1);
1323
Lynus Vazff24c972017-03-07 19:27:46 +05301324 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1325 val++;
1326 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1327}
1328
1329/*
1330 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1331 * block from the CX DBGC block
1332 */
1333static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1334 u8 *buf, size_t remain, void *priv)
1335{
1336 struct kgsl_snapshot_debugbus *header =
1337 (struct kgsl_snapshot_debugbus *)buf;
1338 struct adreno_debugbus_block *block = priv;
1339 int i;
1340 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1341 unsigned int dwords;
1342 size_t size;
1343
1344 dwords = block->dwords;
1345
1346 /* For a6xx each debug bus data unit is 2 DWRODS */
1347 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1348
1349 if (remain < size) {
1350 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1351 return 0;
1352 }
1353
1354 header->id = block->block_id;
1355 header->count = dwords * 2;
1356
1357 for (i = 0; i < dwords; i++)
1358 a6xx_cx_debug_bus_read(device, block->block_id, i,
1359 &data[i*2]);
1360
1361 return size;
1362}
1363
Lynus Vaz20c81272017-02-10 16:22:12 +05301364/* a6xx_snapshot_debugbus() - Capture debug bus data */
1365static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1366 struct kgsl_snapshot *snapshot)
1367{
1368 int i;
1369
1370 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1371 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001372 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1373 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301374
1375 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1376 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1377
1378 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1379 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1380 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1381 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1382
1383 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1384 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1385 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1386 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1387 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1388 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1389 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1390 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1391 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1392 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1393 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1394 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1395 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1396 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1397 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1398 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1399 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1400 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1401
1402 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1403 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1404 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1405 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1406
Lynus Vazff24c972017-03-07 19:27:46 +05301407 a6xx_cx_dbgc = ioremap(device->reg_phys +
1408 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1409 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1410 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1411
1412 if (a6xx_cx_dbgc) {
1413 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1414 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001415 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1416 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301417
1418 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1419 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1420
1421 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1422 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1423 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1424 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1425
1426 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1427 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1428 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1429 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1430 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1431 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1432 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1433 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1434 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1435 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1436 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1437 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1438 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1439 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1440 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1441 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1442 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1443 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1444
1445 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1446 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1447 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1448 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1449 } else
1450 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1451
Lynus Vaz20c81272017-02-10 16:22:12 +05301452 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1453 kgsl_snapshot_add_section(device,
1454 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1455 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1456 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1457 }
Lynus Vazff24c972017-03-07 19:27:46 +05301458
Lynus Vazdaac540732017-07-27 14:23:35 +05301459 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1460 snapshot, a6xx_snapshot_vbif_debugbus_block,
1461 (void *) &a6xx_vbif_debugbus_blocks);
1462
Lynus Vazff24c972017-03-07 19:27:46 +05301463 if (a6xx_cx_dbgc) {
1464 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1465 kgsl_snapshot_add_section(device,
1466 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1467 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1468 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1469 }
1470 iounmap(a6xx_cx_dbgc);
1471 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301472}
1473
Kyle Piefer60733aa2017-03-21 11:24:01 -07001474static void a6xx_snapshot_gmu(struct kgsl_device *device,
1475 struct kgsl_snapshot *snapshot)
1476{
George Shen1d447b02017-07-12 13:40:28 -07001477 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1478 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1479
Kyle Piefer60733aa2017-03-21 11:24:01 -07001480 if (!kgsl_gmu_isenabled(device))
1481 return;
1482
Lynus Vazd37f1d82017-05-24 16:39:15 +05301483 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1484 ARRAY_SIZE(a6xx_gmu_registers) / 2);
George Shen1d447b02017-07-12 13:40:28 -07001485
1486 if (gpudev->gx_is_on(adreno_dev))
1487 adreno_snapshot_registers(device, snapshot,
1488 a6xx_gmu_gx_registers,
1489 ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001490}
1491
Lynus Vaz85150052017-02-21 17:57:48 +05301492/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1493static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1494 size_t remain, void *priv)
1495{
1496 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1497 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1498 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1499 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1500
1501 if (remain < DEBUG_SECTION_SZ(1)) {
1502 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1503 return 0;
1504 }
1505
1506 /* Dump the SQE firmware version */
1507 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1508 header->size = 1;
1509 *data = fw->version;
1510
1511 return DEBUG_SECTION_SZ(1);
1512}
1513
Shrenuj Bansal41665402016-12-16 15:25:54 -08001514static void _a6xx_do_crashdump(struct kgsl_device *device)
1515{
1516 unsigned long wait_time;
1517 unsigned int reg = 0;
1518 unsigned int val;
1519
1520 crash_dump_valid = false;
1521
1522 if (a6xx_capturescript.gpuaddr == 0 ||
1523 a6xx_crashdump_registers.gpuaddr == 0)
1524 return;
1525
1526 /* IF the SMMU is stalled we cannot do a crash dump */
1527 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1528 if (val & BIT(24))
1529 return;
1530
1531 /* Turn on APRIV so we can access the buffers */
1532 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1533
1534 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1535 lower_32_bits(a6xx_capturescript.gpuaddr));
1536 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1537 upper_32_bits(a6xx_capturescript.gpuaddr));
1538 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1539
1540 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1541 while (!time_after(jiffies, wait_time)) {
1542 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1543 if (reg & 0x2)
1544 break;
1545 cpu_relax();
1546 }
1547
1548 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1549
1550 if (!(reg & 0x2)) {
1551 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1552 return;
1553 }
1554
1555 crash_dump_valid = true;
1556}
1557
1558/*
1559 * a6xx_snapshot() - A6XX GPU snapshot function
1560 * @adreno_dev: Device being snapshotted
1561 * @snapshot: Pointer to the snapshot instance
1562 *
1563 * This is where all of the A6XX specific bits and pieces are grabbed
1564 * into the snapshot memory
1565 */
1566void a6xx_snapshot(struct adreno_device *adreno_dev,
1567 struct kgsl_snapshot *snapshot)
1568{
1569 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1570 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1571 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001572 bool sptprac_on;
1573
1574 /* GMU TCM data dumped through AHB */
1575 a6xx_snapshot_gmu(device, snapshot);
1576
1577 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1578
1579 /* Return if the GX is off */
1580 if (!gpudev->gx_is_on(adreno_dev)) {
1581 pr_err("GX is off. Only dumping GMU data in snapshot\n");
1582 return;
1583 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001584
Lynus Vaz030473e2017-06-22 17:33:06 +05301585 /* Dump the registers which get affected by crash dumper trigger */
1586 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1587 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1588
1589 /* Dump vbif registers as well which get affected by crash dumper */
1590 adreno_snapshot_vbif_registers(device, snapshot,
1591 a6xx_vbif_snapshot_registers,
1592 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1593
Shrenuj Bansal41665402016-12-16 15:25:54 -08001594 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001595 if (sptprac_on)
1596 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001597
1598 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1599 snapshot, a6xx_snapshot_registers, NULL);
1600
Shrenuj Bansal41665402016-12-16 15:25:54 -08001601 /* CP_SQE indexed registers */
1602 kgsl_snapshot_indexed_registers(device, snapshot,
1603 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1604 0, snap_data->sect_sizes->cp_pfp);
1605
1606 /* CP_DRAW_STATE */
1607 kgsl_snapshot_indexed_registers(device, snapshot,
1608 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1609 0, 0x100);
1610
1611 /* SQE_UCODE Cache */
1612 kgsl_snapshot_indexed_registers(device, snapshot,
1613 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1614 0, 0x6000);
1615
1616 /* CP ROQ */
1617 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1618 snapshot, adreno_snapshot_cp_roq,
1619 &snap_data->sect_sizes->roq);
1620
Lynus Vaz85150052017-02-21 17:57:48 +05301621 /* SQE Firmware */
1622 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1623 snapshot, a6xx_snapshot_sqe, NULL);
1624
Lynus Vaza5922742017-03-14 18:50:54 +05301625 /* Mempool debug data */
1626 a6xx_snapshot_mempool(device, snapshot);
1627
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001628 if (sptprac_on) {
1629 /* Shader memory */
1630 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301631
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001632 /* MVC register section */
1633 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001634
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001635 /* registers dumped through DBG AHB */
1636 a6xx_snapshot_dbgahb_regs(device, snapshot);
1637 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301638
Lynus Vaz20c81272017-02-10 16:22:12 +05301639 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001640
Shrenuj Bansal41665402016-12-16 15:25:54 -08001641}
1642
1643static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1644{
1645 int qwords = 0;
1646 unsigned int i, j, k;
1647 unsigned int count;
1648
1649 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1650 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1651
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001652 if (cluster->sel) {
1653 ptr[qwords++] = cluster->sel->val;
1654 ptr[qwords++] = ((uint64_t)cluster->sel->cd_reg << 44) |
1655 (1 << 21) | 1;
1656 }
1657
Shrenuj Bansal41665402016-12-16 15:25:54 -08001658 cluster->offset0 = *offset;
1659 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1660
1661 if (j == 1)
1662 cluster->offset1 = *offset;
1663
1664 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1665 ptr[qwords++] =
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001666 ((uint64_t)A6XX_CP_APERTURE_CNTL_CD << 44) |
Shrenuj Bansal41665402016-12-16 15:25:54 -08001667 (1 << 21) | 1;
1668
1669 for (k = 0; k < cluster->num_sets; k++) {
1670 count = REG_PAIR_COUNT(cluster->regs, k);
1671 ptr[qwords++] =
1672 a6xx_crashdump_registers.gpuaddr + *offset;
1673 ptr[qwords++] =
1674 (((uint64_t)cluster->regs[2 * k]) << 44) |
1675 count;
1676
1677 *offset += count * sizeof(unsigned int);
1678 }
1679 }
1680 }
1681
1682 return qwords;
1683}
1684
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301685static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1686 uint64_t *ptr, uint64_t *offset)
1687{
1688 int qwords = 0;
1689 unsigned int j;
1690
1691 /* Capture each bank in the block */
1692 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1693 /* Program the aperture */
1694 ptr[qwords++] =
1695 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1696 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1697 (1 << 21) | 1;
1698
1699 /* Read all the data in one chunk */
1700 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1701 ptr[qwords++] =
1702 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1703 block->sz;
1704
1705 /* Remember the offset of the first bank for easy access */
1706 if (j == 0)
1707 block->offset = *offset;
1708
1709 *offset += block->sz * sizeof(unsigned int);
1710 }
1711
1712 return qwords;
1713}
1714
Lynus Vaz1e258612017-04-27 21:35:22 +05301715static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1716{
1717 int qwords = 0;
1718 unsigned int i, j, k;
1719 unsigned int count;
1720
1721 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1722 struct a6xx_cluster_dbgahb_registers *cluster =
1723 &a6xx_dbgahb_ctx_clusters[i];
1724
1725 cluster->offset0 = *offset;
1726
1727 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1728 if (j == 1)
1729 cluster->offset1 = *offset;
1730
1731 /* Program the aperture */
1732 ptr[qwords++] =
1733 ((cluster->statetype + j * 2) & 0xff) << 8;
1734 ptr[qwords++] =
1735 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1736 (1 << 21) | 1;
1737
1738 for (k = 0; k < cluster->num_sets; k++) {
1739 unsigned int start = cluster->regs[2 * k];
1740
1741 count = REG_PAIR_COUNT(cluster->regs, k);
1742 ptr[qwords++] =
1743 a6xx_crashdump_registers.gpuaddr + *offset;
1744 ptr[qwords++] =
1745 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1746 start - cluster->regbase / 4) << 44)) |
1747 count;
1748
1749 *offset += count * sizeof(unsigned int);
1750 }
1751 }
1752 }
1753 return qwords;
1754}
1755
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001756static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1757{
1758 int qwords = 0;
1759 unsigned int i, k;
1760 unsigned int count;
1761
1762 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1763 struct a6xx_non_ctx_dbgahb_registers *regs =
1764 &a6xx_non_ctx_dbgahb[i];
1765
1766 regs->offset = *offset;
1767
1768 /* Program the aperture */
1769 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1770 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1771 (1 << 21) | 1;
1772
1773 for (k = 0; k < regs->num_sets; k++) {
1774 unsigned int start = regs->regs[2 * k];
1775
1776 count = REG_PAIR_COUNT(regs->regs, k);
1777 ptr[qwords++] =
1778 a6xx_crashdump_registers.gpuaddr + *offset;
1779 ptr[qwords++] =
1780 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1781 start - regs->regbase / 4) << 44)) |
1782 count;
1783
1784 *offset += count * sizeof(unsigned int);
1785 }
1786 }
1787 return qwords;
1788}
1789
Shrenuj Bansal41665402016-12-16 15:25:54 -08001790void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1791{
1792 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1793 unsigned int script_size = 0;
1794 unsigned int data_size = 0;
1795 unsigned int i, j, k;
1796 uint64_t *ptr;
1797 uint64_t offset = 0;
1798
1799 if (a6xx_capturescript.gpuaddr != 0 &&
1800 a6xx_crashdump_registers.gpuaddr != 0)
1801 return;
1802
1803 /*
1804 * We need to allocate two buffers:
1805 * 1 - the buffer to hold the draw script
1806 * 2 - the buffer to hold the data
1807 */
1808
1809 /*
1810 * To save the registers, we need 16 bytes per register pair for the
1811 * script and a dword for each register in the data
1812 */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001813 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1814 struct reg_list *regs = &a6xx_reg_list[i];
1815
1816 /* 16 bytes for programming the aperture */
1817 if (regs->sel)
1818 script_size += 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001819
1820 /* Each pair needs 16 bytes (2 qwords) */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001821 script_size += regs->count * 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001822
1823 /* Each register needs a dword in the data */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001824 for (j = 0; j < regs->count; j++)
Shrenuj Bansal41665402016-12-16 15:25:54 -08001825 data_size += REG_PAIR_COUNT(regs->regs, j) *
1826 sizeof(unsigned int);
1827
1828 }
1829
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301830 /*
1831 * To save the shader blocks for each block in each type we need 32
1832 * bytes for the script (16 bytes to program the aperture and 16 to
1833 * read the data) and then a block specific number of bytes to hold
1834 * the data
1835 */
1836 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1837 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1838 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1839 A6XX_NUM_SHADER_BANKS;
1840 }
1841
Shrenuj Bansal41665402016-12-16 15:25:54 -08001842 /* Calculate the script and data size for MVC registers */
1843 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1844 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1845
1846 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1847
1848 /* 16 bytes for programming the aperture */
1849 script_size += 16;
1850
1851 /* Reading each pair of registers takes 16 bytes */
1852 script_size += 16 * cluster->num_sets;
1853
1854 /* A dword per register read from the cluster list */
1855 for (k = 0; k < cluster->num_sets; k++)
1856 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1857 sizeof(unsigned int);
1858 }
1859 }
1860
Lynus Vaz1e258612017-04-27 21:35:22 +05301861 /* Calculate the script and data size for debug AHB registers */
1862 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1863 struct a6xx_cluster_dbgahb_registers *cluster =
1864 &a6xx_dbgahb_ctx_clusters[i];
1865
1866 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1867
1868 /* 16 bytes for programming the aperture */
1869 script_size += 16;
1870
1871 /* Reading each pair of registers takes 16 bytes */
1872 script_size += 16 * cluster->num_sets;
1873
1874 /* A dword per register read from the cluster list */
1875 for (k = 0; k < cluster->num_sets; k++)
1876 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1877 sizeof(unsigned int);
1878 }
1879 }
1880
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001881 /*
1882 * Calculate the script and data size for non context debug
1883 * AHB registers
1884 */
1885 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1886 struct a6xx_non_ctx_dbgahb_registers *regs =
1887 &a6xx_non_ctx_dbgahb[i];
1888
1889 /* 16 bytes for programming the aperture */
1890 script_size += 16;
1891
1892 /* Reading each pair of registers takes 16 bytes */
1893 script_size += 16 * regs->num_sets;
1894
1895 /* A dword per register read from the cluster list */
1896 for (k = 0; k < regs->num_sets; k++)
1897 data_size += REG_PAIR_COUNT(regs->regs, k) *
1898 sizeof(unsigned int);
1899 }
1900
Shrenuj Bansal41665402016-12-16 15:25:54 -08001901 /* Now allocate the script and data buffers */
1902
1903 /* The script buffers needs 2 extra qwords on the end */
1904 if (kgsl_allocate_global(device, &a6xx_capturescript,
1905 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1906 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1907 return;
1908
1909 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1910 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1911 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1912 return;
1913 }
1914
1915 /* Build the crash script */
1916
1917 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1918
1919 /* For the registers, program a read command for each pair */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001920 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1921 struct reg_list *regs = &a6xx_reg_list[i];
Shrenuj Bansal41665402016-12-16 15:25:54 -08001922
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001923 /* Program the SEL_CNTL_CD register appropriately */
1924 if (regs->sel) {
1925 *ptr++ = regs->sel->val;
1926 *ptr++ = (((uint64_t)regs->sel->cd_reg << 44)) |
1927 (1 << 21) | 1;
1928 }
1929
1930 for (j = 0; j < regs->count; j++) {
Shrenuj Bansal41665402016-12-16 15:25:54 -08001931 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1932 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1933 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1934 offset += r * sizeof(unsigned int);
1935 }
1936 }
1937
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301938 /* Program each shader block */
1939 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1940 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1941 &offset);
1942 }
1943
Shrenuj Bansal41665402016-12-16 15:25:54 -08001944 /* Program the capturescript for the MVC regsiters */
1945 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1946
Lynus Vaz1e258612017-04-27 21:35:22 +05301947 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1948
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001949 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1950
Shrenuj Bansal41665402016-12-16 15:25:54 -08001951 *ptr++ = 0;
1952 *ptr++ = 0;
1953}