blob: 880ee136c9e79068998b13993c3857f9374f96fd [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
Lynus Vazdaac540732017-07-27 14:23:35 +053024#define A6XX_NUM_AXI_ARB_BLOCKS 2
25#define A6XX_NUM_XIN_AXI_BLOCKS 5
26#define A6XX_NUM_XIN_CORE_BLOCKS 4
Shrenuj Bansal41665402016-12-16 15:25:54 -080027
28static const unsigned int a6xx_gras_cluster[] = {
29 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
30 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
31 0x8400, 0x840B,
32};
33
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060034static const unsigned int a6xx_ps_cluster_rac[] = {
Shrenuj Bansal41665402016-12-16 15:25:54 -080035 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
36 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060037 0x88C0, 0x88C1, 0x88D0, 0x88E3, 0x8900, 0x890C, 0x890F, 0x891A,
38 0x8C00, 0x8C01, 0x8C08, 0x8C10, 0x8C17, 0x8C1F, 0x8C26, 0x8C33,
39};
40
41static const unsigned int a6xx_ps_cluster_rbp[] = {
42 0x88F0, 0x88F3, 0x890D, 0x890E, 0x8927, 0x8928, 0x8BF0, 0x8BF1,
43 0x8C02, 0x8C07, 0x8C11, 0x8C16, 0x8C20, 0x8C25,
44};
45
46static const unsigned int a6xx_ps_cluster[] = {
47 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
Shrenuj Bansal41665402016-12-16 15:25:54 -080048};
49
50static const unsigned int a6xx_fe_cluster[] = {
51 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
52 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
53};
54
55static const unsigned int a6xx_pc_vs_cluster[] = {
56 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
57};
58
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060059static const struct sel_reg {
60 unsigned int host_reg;
61 unsigned int cd_reg;
62 unsigned int val;
63} _a6xx_rb_rac_aperture = {
64 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
65 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
66 .val = 0x0,
67},
68_a6xx_rb_rbp_aperture = {
69 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
70 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
71 .val = 0x9,
72};
73
Shrenuj Bansal41665402016-12-16 15:25:54 -080074static struct a6xx_cluster_registers {
75 unsigned int id;
76 const unsigned int *regs;
77 unsigned int num_sets;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060078 const struct sel_reg *sel;
Shrenuj Bansal41665402016-12-16 15:25:54 -080079 unsigned int offset0;
80 unsigned int offset1;
81} a6xx_clusters[] = {
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060082 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2,
83 NULL },
84 { CP_CLUSTER_PS, a6xx_ps_cluster_rac, ARRAY_SIZE(a6xx_ps_cluster_rac)/2,
85 &_a6xx_rb_rac_aperture },
86 { CP_CLUSTER_PS, a6xx_ps_cluster_rbp, ARRAY_SIZE(a6xx_ps_cluster_rbp)/2,
87 &_a6xx_rb_rbp_aperture },
88 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2,
89 NULL },
90 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2,
91 NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080092 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060093 ARRAY_SIZE(a6xx_pc_vs_cluster)/2, NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080094};
95
96struct a6xx_cluster_regs_info {
97 struct a6xx_cluster_registers *cluster;
98 unsigned int ctxt_id;
99};
100
Lynus Vaz461e2382017-01-16 19:35:41 +0530101static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
102 0xB800, 0xB803, 0xB820, 0xB822,
103};
104
105static const unsigned int a6xx_sp_vs_sp_cluster[] = {
106 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
107 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
108};
109
110static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
111 0xBB10, 0xBB11, 0xBB20, 0xBB29,
112};
113
114static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
115 0xBD80, 0xBD80,
116};
117
118static const unsigned int a6xx_sp_duplicate_cluster[] = {
119 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
120};
121
122static const unsigned int a6xx_tp_duplicate_cluster[] = {
123 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
124};
125
126static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
127 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
128 0xB9C0, 0xB9C9,
129};
130
131static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
132 0xBD80, 0xBD80,
133};
134
135static const unsigned int a6xx_sp_ps_sp_cluster[] = {
136 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
137 0xAA00, 0xAA00, 0xAA30, 0xAA31,
138};
139
140static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
141 0xACC0, 0xACC0,
142};
143
144static const unsigned int a6xx_sp_ps_tp_cluster[] = {
145 0xB180, 0xB183, 0xB190, 0xB191,
146};
147
148static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
149 0xB4C0, 0xB4D1,
150};
151
152static struct a6xx_cluster_dbgahb_registers {
153 unsigned int id;
154 unsigned int regbase;
155 unsigned int statetype;
156 const unsigned int *regs;
157 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530158 unsigned int offset0;
159 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530160} a6xx_dbgahb_ctx_clusters[] = {
161 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
162 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
163 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
164 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700165 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530166 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
167 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
168 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700169 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530170 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700171 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530172 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700173 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530174 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700175 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530176 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700177 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530178 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700179 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530180 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700181 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530182 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700183 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530184 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700185 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530186 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700187 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530188 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700189 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530190 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
191};
192
193struct a6xx_cluster_dbgahb_regs_info {
194 struct a6xx_cluster_dbgahb_registers *cluster;
195 unsigned int ctxt_id;
196};
197
198static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
199 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
200 0xBE20, 0xBE23,
201};
202
203static const unsigned int a6xx_sp_non_ctx_registers[] = {
204 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
205 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
206};
207
208static const unsigned int a6xx_tp_non_ctx_registers[] = {
209 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
210};
211
212static struct a6xx_non_ctx_dbgahb_registers {
213 unsigned int regbase;
214 unsigned int statetype;
215 const unsigned int *regs;
216 unsigned int num_sets;
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600217 unsigned int offset;
Lynus Vaz461e2382017-01-16 19:35:41 +0530218} a6xx_non_ctx_dbgahb[] = {
219 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
220 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
221 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
222 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
223 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
224 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
225};
226
Shrenuj Bansal41665402016-12-16 15:25:54 -0800227static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
228 /* VBIF */
229 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
230 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
231 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
232 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
233 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
234 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
235 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
236 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
237 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
238 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
239 0x3410, 0x3410, 0x3800, 0x3801,
240};
241
George Shen1d447b02017-07-12 13:40:28 -0700242static const unsigned int a6xx_gmu_gx_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700243 /* GMU GX */
244 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
245 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
246 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
247 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
248 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
George Shen1d447b02017-07-12 13:40:28 -0700249};
250
251static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700252 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700253 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700254 /* GMU CX */
255 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
256 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
257 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
258 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
259 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
260 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
261 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
262 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
Lokesh Batrac367dc92017-08-24 13:40:32 -0700263 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA01,
Kyle Pieferbce21702017-06-08 09:21:28 -0700264 /* GPU RSCC */
George Shen6927d8f2017-07-19 11:38:10 -0700265 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747,
266 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
267 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
Kyle Pieferbce21702017-06-08 09:21:28 -0700268 /* GMU AO */
269 0x23B00, 0x23B16, 0x23C00, 0x23C00,
270 /* GPU CC */
271 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
272 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
273 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
274 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
275 0x26000, 0x26002,
276 /* GPU CC ACD */
277 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700278};
279
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600280static const unsigned int a6xx_rb_rac_registers[] = {
281 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E10, 0x8E1C, 0x8E20, 0x8E25,
282 0x8E28, 0x8E28, 0x8E2C, 0x8E2F, 0x8E50, 0x8E52,
283};
284
285static const unsigned int a6xx_rb_rbp_registers[] = {
286 0x8E01, 0x8E01, 0x8E0C, 0x8E0C, 0x8E3B, 0x8E3E, 0x8E40, 0x8E43,
287 0x8E53, 0x8E5F, 0x8E70, 0x8E77,
288};
289
Shrenuj Bansal41665402016-12-16 15:25:54 -0800290static const struct adreno_vbif_snapshot_registers
291a6xx_vbif_snapshot_registers[] = {
292 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
293 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
294};
295
296/*
297 * Set of registers to dump for A6XX on snapshot.
298 * Registers in pairs - first value is the start offset, second
299 * is the stop offset (inclusive)
300 */
301
302static const unsigned int a6xx_registers[] = {
303 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530304 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
305 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
306 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530307 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
308 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
309 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800310 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530311 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
312 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
313 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
314 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
315 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
316 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
317 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800318 /* VSC */
319 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
320 /* UCHE */
321 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
322 0x0E38, 0x0E39,
323 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530324 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
325 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800326 /* VPC */
327 0x9600, 0x9604, 0x9624, 0x9637,
328 /* PC */
329 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
330 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
331 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
332 /* VFD */
333 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530334 0xA630, 0xA630,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800335};
336
Lynus Vaz030473e2017-06-22 17:33:06 +0530337/*
338 * Set of registers to dump for A6XX before actually triggering crash dumper.
339 * Registers in pairs - first value is the start offset, second
340 * is the stop offset (inclusive)
341 */
342static const unsigned int a6xx_pre_crashdumper_registers[] = {
343 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
344 0x210, 0x213,
345 /* CP: CP_STATUS_1 */
346 0x825, 0x825,
347};
348
Lynus Vaz20c81272017-02-10 16:22:12 +0530349enum a6xx_debugbus_id {
350 A6XX_DBGBUS_CP = 0x1,
351 A6XX_DBGBUS_RBBM = 0x2,
352 A6XX_DBGBUS_VBIF = 0x3,
353 A6XX_DBGBUS_HLSQ = 0x4,
354 A6XX_DBGBUS_UCHE = 0x5,
355 A6XX_DBGBUS_DPM = 0x6,
356 A6XX_DBGBUS_TESS = 0x7,
357 A6XX_DBGBUS_PC = 0x8,
358 A6XX_DBGBUS_VFDP = 0x9,
359 A6XX_DBGBUS_VPC = 0xa,
360 A6XX_DBGBUS_TSE = 0xb,
361 A6XX_DBGBUS_RAS = 0xc,
362 A6XX_DBGBUS_VSC = 0xd,
363 A6XX_DBGBUS_COM = 0xe,
364 A6XX_DBGBUS_LRZ = 0x10,
365 A6XX_DBGBUS_A2D = 0x11,
366 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530367 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530368 A6XX_DBGBUS_RBP = 0x14,
369 A6XX_DBGBUS_DCS = 0x15,
370 A6XX_DBGBUS_RBBM_CFG = 0x16,
371 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530372 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530373 A6XX_DBGBUS_TPFCHE = 0x19,
374 A6XX_DBGBUS_GPC = 0x1d,
375 A6XX_DBGBUS_LARC = 0x1e,
376 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
377 A6XX_DBGBUS_RB_0 = 0x20,
378 A6XX_DBGBUS_RB_1 = 0x21,
379 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
380 A6XX_DBGBUS_CCU_0 = 0x28,
381 A6XX_DBGBUS_CCU_1 = 0x29,
382 A6XX_DBGBUS_VFD_0 = 0x38,
383 A6XX_DBGBUS_VFD_1 = 0x39,
384 A6XX_DBGBUS_VFD_2 = 0x3a,
385 A6XX_DBGBUS_VFD_3 = 0x3b,
386 A6XX_DBGBUS_SP_0 = 0x40,
387 A6XX_DBGBUS_SP_1 = 0x41,
388 A6XX_DBGBUS_TPL1_0 = 0x48,
389 A6XX_DBGBUS_TPL1_1 = 0x49,
390 A6XX_DBGBUS_TPL1_2 = 0x4a,
391 A6XX_DBGBUS_TPL1_3 = 0x4b,
392};
393
394static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
395 { A6XX_DBGBUS_CP, 0x100, },
396 { A6XX_DBGBUS_RBBM, 0x100, },
397 { A6XX_DBGBUS_HLSQ, 0x100, },
398 { A6XX_DBGBUS_UCHE, 0x100, },
399 { A6XX_DBGBUS_DPM, 0x100, },
400 { A6XX_DBGBUS_TESS, 0x100, },
401 { A6XX_DBGBUS_PC, 0x100, },
402 { A6XX_DBGBUS_VFDP, 0x100, },
403 { A6XX_DBGBUS_VPC, 0x100, },
404 { A6XX_DBGBUS_TSE, 0x100, },
405 { A6XX_DBGBUS_RAS, 0x100, },
406 { A6XX_DBGBUS_VSC, 0x100, },
407 { A6XX_DBGBUS_COM, 0x100, },
408 { A6XX_DBGBUS_LRZ, 0x100, },
409 { A6XX_DBGBUS_A2D, 0x100, },
410 { A6XX_DBGBUS_CCUFCHE, 0x100, },
411 { A6XX_DBGBUS_RBP, 0x100, },
412 { A6XX_DBGBUS_DCS, 0x100, },
413 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530414 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530415 { A6XX_DBGBUS_TPFCHE, 0x100, },
416 { A6XX_DBGBUS_GPC, 0x100, },
417 { A6XX_DBGBUS_LARC, 0x100, },
418 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
419 { A6XX_DBGBUS_RB_0, 0x100, },
420 { A6XX_DBGBUS_RB_1, 0x100, },
421 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
422 { A6XX_DBGBUS_CCU_0, 0x100, },
423 { A6XX_DBGBUS_CCU_1, 0x100, },
424 { A6XX_DBGBUS_VFD_0, 0x100, },
425 { A6XX_DBGBUS_VFD_1, 0x100, },
426 { A6XX_DBGBUS_VFD_2, 0x100, },
427 { A6XX_DBGBUS_VFD_3, 0x100, },
428 { A6XX_DBGBUS_SP_0, 0x100, },
429 { A6XX_DBGBUS_SP_1, 0x100, },
430 { A6XX_DBGBUS_TPL1_0, 0x100, },
431 { A6XX_DBGBUS_TPL1_1, 0x100, },
432 { A6XX_DBGBUS_TPL1_2, 0x100, },
433 { A6XX_DBGBUS_TPL1_3, 0x100, },
434};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800435
Lynus Vazdaac540732017-07-27 14:23:35 +0530436static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
437 A6XX_DBGBUS_VBIF, 0x100,
438};
439
Lynus Vazff24c972017-03-07 19:27:46 +0530440static void __iomem *a6xx_cx_dbgc;
441static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
Lynus Vazecd472c2017-04-18 14:15:57 +0530442 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530443 { A6XX_DBGBUS_CX, 0x100, },
444};
445
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530446#define A6XX_NUM_SHADER_BANKS 3
447#define A6XX_SHADER_STATETYPE_SHIFT 8
448
449enum a6xx_shader_obj {
450 A6XX_TP0_TMO_DATA = 0x9,
451 A6XX_TP0_SMO_DATA = 0xa,
452 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
453 A6XX_TP1_TMO_DATA = 0x19,
454 A6XX_TP1_SMO_DATA = 0x1a,
455 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
456 A6XX_SP_INST_DATA = 0x29,
457 A6XX_SP_LB_0_DATA = 0x2a,
458 A6XX_SP_LB_1_DATA = 0x2b,
459 A6XX_SP_LB_2_DATA = 0x2c,
460 A6XX_SP_LB_3_DATA = 0x2d,
461 A6XX_SP_LB_4_DATA = 0x2e,
462 A6XX_SP_LB_5_DATA = 0x2f,
463 A6XX_SP_CB_BINDLESS_DATA = 0x30,
464 A6XX_SP_CB_LEGACY_DATA = 0x31,
465 A6XX_SP_UAV_DATA = 0x32,
466 A6XX_SP_INST_TAG = 0x33,
467 A6XX_SP_CB_BINDLESS_TAG = 0x34,
468 A6XX_SP_TMO_UMO_TAG = 0x35,
469 A6XX_SP_SMO_TAG = 0x36,
470 A6XX_SP_STATE_DATA = 0x37,
471 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
472 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
473 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
474 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
475 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
476 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
477 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
478 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
479 A6XX_HLSQ_INST_RAM = 0x52,
480 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
481 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
482 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
483 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
484 A6XX_HLSQ_INST_RAM_TAG = 0x57,
485 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
486 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
487 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
488 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
489 A6XX_HLSQ_DATAPATH_META = 0x60,
490 A6XX_HLSQ_FRONTEND_META = 0x61,
491 A6XX_HLSQ_INDIRECT_META = 0x62,
492 A6XX_HLSQ_BACKEND_META = 0x63
493};
494
495struct a6xx_shader_block {
496 unsigned int statetype;
497 unsigned int sz;
498 uint64_t offset;
499};
500
501struct a6xx_shader_block_info {
502 struct a6xx_shader_block *block;
503 unsigned int bank;
504 uint64_t offset;
505};
506
507static struct a6xx_shader_block a6xx_shader_blocks[] = {
508 {A6XX_TP0_TMO_DATA, 0x200},
509 {A6XX_TP0_SMO_DATA, 0x80,},
510 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
511 {A6XX_TP1_TMO_DATA, 0x200},
512 {A6XX_TP1_SMO_DATA, 0x80,},
513 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
514 {A6XX_SP_INST_DATA, 0x800},
515 {A6XX_SP_LB_0_DATA, 0x800},
516 {A6XX_SP_LB_1_DATA, 0x800},
517 {A6XX_SP_LB_2_DATA, 0x800},
518 {A6XX_SP_LB_3_DATA, 0x800},
519 {A6XX_SP_LB_4_DATA, 0x800},
520 {A6XX_SP_LB_5_DATA, 0x200},
521 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
522 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
523 {A6XX_SP_UAV_DATA, 0x80,},
524 {A6XX_SP_INST_TAG, 0x80,},
525 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
526 {A6XX_SP_TMO_UMO_TAG, 0x80,},
527 {A6XX_SP_SMO_TAG, 0x80},
528 {A6XX_SP_STATE_DATA, 0x3F},
529 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
530 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
531 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
532 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
533 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
534 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
535 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
536 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
537 {A6XX_HLSQ_INST_RAM, 0x800},
538 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
539 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
540 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
541 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
542 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
543 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
544 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
545 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
546 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
547 {A6XX_HLSQ_DATAPATH_META, 0x40,},
548 {A6XX_HLSQ_FRONTEND_META, 0x40},
549 {A6XX_HLSQ_INDIRECT_META, 0x40,}
550};
551
Shrenuj Bansal41665402016-12-16 15:25:54 -0800552static struct kgsl_memdesc a6xx_capturescript;
553static struct kgsl_memdesc a6xx_crashdump_registers;
554static bool crash_dump_valid;
555
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600556static struct reg_list {
Shrenuj Bansal41665402016-12-16 15:25:54 -0800557 const unsigned int *regs;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600558 unsigned int count;
559 const struct sel_reg *sel;
560} a6xx_reg_list[] = {
561 { a6xx_registers, ARRAY_SIZE(a6xx_registers) / 2, NULL },
562 { a6xx_rb_rac_registers, ARRAY_SIZE(a6xx_rb_rac_registers) / 2,
563 &_a6xx_rb_rac_aperture },
564 { a6xx_rb_rbp_registers, ARRAY_SIZE(a6xx_rb_rbp_registers) / 2,
565 &_a6xx_rb_rbp_aperture },
Shrenuj Bansal41665402016-12-16 15:25:54 -0800566};
567
568#define REG_PAIR_COUNT(_a, _i) \
569 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
570
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600571static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
Lynus Vaz96de8522017-09-13 20:17:03 +0530572 u8 *buf, size_t remain, struct reg_list *regs)
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600573{
Lynus Vaz96de8522017-09-13 20:17:03 +0530574 struct kgsl_snapshot_registers snapshot_regs = {
575 .regs = regs->regs,
576 .count = regs->count,
577 };
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600578
Lynus Vaz96de8522017-09-13 20:17:03 +0530579 if (regs->sel)
580 kgsl_regwrite(device, regs->sel->host_reg, regs->sel->val);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600581
Lynus Vaz96de8522017-09-13 20:17:03 +0530582 return kgsl_snapshot_dump_registers(device, buf, remain,
583 &snapshot_regs);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600584}
585
Shrenuj Bansal41665402016-12-16 15:25:54 -0800586static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
587 size_t remain, void *priv)
588{
589 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
Lynus Vaz96de8522017-09-13 20:17:03 +0530590 struct reg_list *regs = (struct reg_list *)priv;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800591 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
592 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
Lynus Vaz96de8522017-09-13 20:17:03 +0530593 unsigned int j, k;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800594 unsigned int count = 0;
595
596 if (crash_dump_valid == false)
Lynus Vaz96de8522017-09-13 20:17:03 +0530597 return a6xx_legacy_snapshot_registers(device, buf, remain,
598 regs);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800599
600 if (remain < sizeof(*header)) {
601 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
602 return 0;
603 }
604
605 remain -= sizeof(*header);
606
Lynus Vaz96de8522017-09-13 20:17:03 +0530607 for (j = 0; j < regs->count; j++) {
608 unsigned int start = regs->regs[2 * j];
609 unsigned int end = regs->regs[(2 * j) + 1];
Shrenuj Bansal41665402016-12-16 15:25:54 -0800610
Lynus Vaz96de8522017-09-13 20:17:03 +0530611 if (remain < ((end - start) + 1) * 8) {
612 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
613 goto out;
614 }
Shrenuj Bansal41665402016-12-16 15:25:54 -0800615
Lynus Vaz96de8522017-09-13 20:17:03 +0530616 remain -= ((end - start) + 1) * 8;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800617
Lynus Vaz96de8522017-09-13 20:17:03 +0530618 for (k = start; k <= end; k++, count++) {
619 *data++ = k;
620 *data++ = *src++;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800621 }
622 }
623
624out:
625 header->count = count;
626
627 /* Return the size of the section */
628 return (count * 8) + sizeof(*header);
629}
630
Lynus Vaz030473e2017-06-22 17:33:06 +0530631static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
632 u8 *buf, size_t remain, void *priv)
633{
634 struct kgsl_snapshot_registers pre_cdregs = {
635 .regs = a6xx_pre_crashdumper_registers,
636 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
637 };
638
639 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
640}
641
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530642static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
643 u8 *buf, size_t remain, void *priv)
644{
645 struct kgsl_snapshot_shader *header =
646 (struct kgsl_snapshot_shader *) buf;
647 struct a6xx_shader_block_info *info =
648 (struct a6xx_shader_block_info *) priv;
649 struct a6xx_shader_block *block = info->block;
650 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
651
652 if (remain < SHADER_SECTION_SZ(block->sz)) {
653 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
654 return 0;
655 }
656
657 header->type = block->statetype;
658 header->index = info->bank;
659 header->size = block->sz;
660
661 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
662 block->sz);
663
664 return SHADER_SECTION_SZ(block->sz);
665}
666
667static void a6xx_snapshot_shader(struct kgsl_device *device,
668 struct kgsl_snapshot *snapshot)
669{
670 unsigned int i, j;
671 struct a6xx_shader_block_info info;
672
673 /* Shader blocks can only be read by the crash dumper */
674 if (crash_dump_valid == false)
675 return;
676
677 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
678 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
679 info.block = &a6xx_shader_blocks[i];
680 info.bank = j;
681 info.offset = a6xx_shader_blocks[i].offset +
682 (j * a6xx_shader_blocks[i].sz);
683
684 /* Shader working/shadow memory */
685 kgsl_snapshot_add_section(device,
686 KGSL_SNAPSHOT_SECTION_SHADER,
687 snapshot, a6xx_snapshot_shader_memory, &info);
688 }
689 }
690}
691
Lynus Vaza5922742017-03-14 18:50:54 +0530692static void a6xx_snapshot_mempool(struct kgsl_device *device,
693 struct kgsl_snapshot *snapshot)
694{
695 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530696 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530697
Lynus Vazb8e43d52017-04-20 14:47:37 +0530698 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530699 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
700 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
701
702 kgsl_snapshot_indexed_registers(device, snapshot,
703 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
704 0, 0x2060);
705
Lynus Vazb8e43d52017-04-20 14:47:37 +0530706 /*
707 * Data at offset 0x2000 in the mempool section is the mempool size.
708 * Since we set it to 0, patch in the original size so that the data
709 * is consistent.
710 */
711 if (buf < snapshot->ptr) {
712 unsigned int *data;
713
714 /* Skip over the headers */
715 buf += sizeof(struct kgsl_snapshot_section_header) +
716 sizeof(struct kgsl_snapshot_indexed_regs);
717
718 data = (unsigned int *)buf + 0x2000;
719 *data = pool_size;
720 }
721
Lynus Vaza5922742017-03-14 18:50:54 +0530722 /* Restore the saved mempool size */
723 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
724}
725
Lynus Vaz461e2382017-01-16 19:35:41 +0530726static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
727 unsigned int regbase, unsigned int reg)
728{
729 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
730 reg - regbase / 4;
731 unsigned int val;
732
733 kgsl_regread(device, read_reg, &val);
734 return val;
735}
736
Lynus Vaz1e258612017-04-27 21:35:22 +0530737static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
738 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530739{
740 struct kgsl_snapshot_mvc_regs *header =
741 (struct kgsl_snapshot_mvc_regs *)buf;
742 struct a6xx_cluster_dbgahb_regs_info *info =
743 (struct a6xx_cluster_dbgahb_regs_info *)priv;
744 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
745 unsigned int read_sel;
746 unsigned int data_size = 0;
747 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
748 int i, j;
749
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600750 if (!device->snapshot_legacy)
751 return 0;
752
Lynus Vaz461e2382017-01-16 19:35:41 +0530753 if (remain < sizeof(*header)) {
754 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
755 return 0;
756 }
757
758 remain -= sizeof(*header);
759
760 header->ctxt_id = info->ctxt_id;
761 header->cluster_id = cur_cluster->id;
762
763 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
764 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
765
766 for (i = 0; i < cur_cluster->num_sets; i++) {
767 unsigned int start = cur_cluster->regs[2 * i];
768 unsigned int end = cur_cluster->regs[2 * i + 1];
769
770 if (remain < (end - start + 3) * 4) {
771 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
772 goto out;
773 }
774
775 remain -= (end - start + 3) * 4;
776 data_size += (end - start + 3) * 4;
777
778 *data++ = start | (1 << 31);
779 *data++ = end;
780
781 for (j = start; j <= end; j++) {
782 unsigned int val;
783
784 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
785 *data++ = val;
786
787 }
788 }
789
790out:
791 return data_size + sizeof(*header);
792}
793
Lynus Vaz1e258612017-04-27 21:35:22 +0530794static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
795 size_t remain, void *priv)
796{
797 struct kgsl_snapshot_mvc_regs *header =
798 (struct kgsl_snapshot_mvc_regs *)buf;
799 struct a6xx_cluster_dbgahb_regs_info *info =
800 (struct a6xx_cluster_dbgahb_regs_info *)priv;
801 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
802 unsigned int data_size = 0;
803 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
804 int i, j;
805 unsigned int *src;
806
807
808 if (crash_dump_valid == false)
809 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
810 info);
811
812 if (remain < sizeof(*header)) {
813 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
814 return 0;
815 }
816
817 remain -= sizeof(*header);
818
819 header->ctxt_id = info->ctxt_id;
820 header->cluster_id = cluster->id;
821
822 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
823 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
824
825 for (i = 0; i < cluster->num_sets; i++) {
826 unsigned int start;
827 unsigned int end;
828
829 start = cluster->regs[2 * i];
830 end = cluster->regs[2 * i + 1];
831
832 if (remain < (end - start + 3) * 4) {
833 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
834 goto out;
835 }
836
837 remain -= (end - start + 3) * 4;
838 data_size += (end - start + 3) * 4;
839
840 *data++ = start | (1 << 31);
841 *data++ = end;
842 for (j = start; j <= end; j++)
843 *data++ = *src++;
844 }
845out:
846 return data_size + sizeof(*header);
847}
848
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600849static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
850 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530851{
852 struct kgsl_snapshot_regs *header =
853 (struct kgsl_snapshot_regs *)buf;
854 struct a6xx_non_ctx_dbgahb_registers *regs =
855 (struct a6xx_non_ctx_dbgahb_registers *)priv;
856 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
857 int count = 0;
858 unsigned int read_sel;
859 int i, j;
860
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600861 if (!device->snapshot_legacy)
862 return 0;
863
Lynus Vaz461e2382017-01-16 19:35:41 +0530864 /* Figure out how many registers we are going to dump */
865 for (i = 0; i < regs->num_sets; i++) {
866 int start = regs->regs[i * 2];
867 int end = regs->regs[i * 2 + 1];
868
869 count += (end - start + 1);
870 }
871
872 if (remain < (count * 8) + sizeof(*header)) {
873 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
874 return 0;
875 }
876
877 header->count = count;
878
879 read_sel = (regs->statetype & 0xff) << 8;
880 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
881
882 for (i = 0; i < regs->num_sets; i++) {
883 unsigned int start = regs->regs[2 * i];
884 unsigned int end = regs->regs[2 * i + 1];
885
886 for (j = start; j <= end; j++) {
887 unsigned int val;
888
889 val = a6xx_read_dbgahb(device, regs->regbase, j);
890 *data++ = j;
891 *data++ = val;
892
893 }
894 }
895 return (count * 8) + sizeof(*header);
896}
897
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -0600898static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
899 size_t remain, void *priv)
900{
901 struct kgsl_snapshot_regs *header =
902 (struct kgsl_snapshot_regs *)buf;
903 struct a6xx_non_ctx_dbgahb_registers *regs =
904 (struct a6xx_non_ctx_dbgahb_registers *)priv;
905 unsigned int count = 0;
906 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
907 unsigned int i, k;
908 unsigned int *src;
909
910 if (crash_dump_valid == false)
911 return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
912 regs);
913
914 if (remain < sizeof(*header)) {
915 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
916 return 0;
917 }
918
919 remain -= sizeof(*header);
920
921 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
922
923 for (i = 0; i < regs->num_sets; i++) {
924 unsigned int start;
925 unsigned int end;
926
927 start = regs->regs[2 * i];
928 end = regs->regs[(2 * i) + 1];
929
930 if (remain < (end - start + 1) * 8) {
931 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
932 goto out;
933 }
934
935 remain -= ((end - start) + 1) * 8;
936
937 for (k = start; k <= end; k++, count++) {
938 *data++ = k;
939 *data++ = *src++;
940 }
941 }
942out:
943 header->count = count;
944
945 /* Return the size of the section */
946 return (count * 8) + sizeof(*header);
947}
948
Lynus Vaz461e2382017-01-16 19:35:41 +0530949static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
950 struct kgsl_snapshot *snapshot)
951{
952 int i, j;
953
954 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
955 struct a6xx_cluster_dbgahb_registers *cluster =
956 &a6xx_dbgahb_ctx_clusters[i];
957 struct a6xx_cluster_dbgahb_regs_info info;
958
959 info.cluster = cluster;
960 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
961 info.ctxt_id = j;
962
963 kgsl_snapshot_add_section(device,
964 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
965 a6xx_snapshot_cluster_dbgahb, &info);
966 }
967 }
968
969 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
970 kgsl_snapshot_add_section(device,
971 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
972 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
973 }
974}
975
Shrenuj Bansal41665402016-12-16 15:25:54 -0800976static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
977 size_t remain, void *priv)
978{
979 struct kgsl_snapshot_mvc_regs *header =
980 (struct kgsl_snapshot_mvc_regs *)buf;
981 struct a6xx_cluster_regs_info *info =
982 (struct a6xx_cluster_regs_info *)priv;
983 struct a6xx_cluster_registers *cur_cluster = info->cluster;
984 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
985 unsigned int ctxt = info->ctxt_id;
986 unsigned int start, end, i, j, aperture_cntl = 0;
987 unsigned int data_size = 0;
988
989 if (remain < sizeof(*header)) {
990 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
991 return 0;
992 }
993
994 remain -= sizeof(*header);
995
996 header->ctxt_id = info->ctxt_id;
997 header->cluster_id = cur_cluster->id;
998
999 /*
1000 * Set the AHB control for the Host to read from the
1001 * cluster/context for this iteration.
1002 */
1003 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
1004 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
1005
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001006 if (cur_cluster->sel)
1007 kgsl_regwrite(device, cur_cluster->sel->host_reg,
1008 cur_cluster->sel->val);
1009
Shrenuj Bansal41665402016-12-16 15:25:54 -08001010 for (i = 0; i < cur_cluster->num_sets; i++) {
1011 start = cur_cluster->regs[2 * i];
1012 end = cur_cluster->regs[2 * i + 1];
1013
1014 if (remain < (end - start + 3) * 4) {
1015 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1016 goto out;
1017 }
1018
1019 remain -= (end - start + 3) * 4;
1020 data_size += (end - start + 3) * 4;
1021
1022 *data++ = start | (1 << 31);
1023 *data++ = end;
1024 for (j = start; j <= end; j++) {
1025 unsigned int val;
1026
1027 kgsl_regread(device, j, &val);
1028 *data++ = val;
1029 }
1030 }
1031out:
1032 return data_size + sizeof(*header);
1033}
1034
1035static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
1036 size_t remain, void *priv)
1037{
1038 struct kgsl_snapshot_mvc_regs *header =
1039 (struct kgsl_snapshot_mvc_regs *)buf;
1040 struct a6xx_cluster_regs_info *info =
1041 (struct a6xx_cluster_regs_info *)priv;
1042 struct a6xx_cluster_registers *cluster = info->cluster;
1043 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1044 unsigned int *src;
1045 int i, j;
1046 unsigned int start, end;
1047 size_t data_size = 0;
1048
1049 if (crash_dump_valid == false)
1050 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
1051
1052 if (remain < sizeof(*header)) {
1053 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1054 return 0;
1055 }
1056
1057 remain -= sizeof(*header);
1058
1059 header->ctxt_id = info->ctxt_id;
1060 header->cluster_id = cluster->id;
1061
1062 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
1063 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
1064
1065 for (i = 0; i < cluster->num_sets; i++) {
1066 start = cluster->regs[2 * i];
1067 end = cluster->regs[2 * i + 1];
1068
1069 if (remain < (end - start + 3) * 4) {
1070 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
1071 goto out;
1072 }
1073
1074 remain -= (end - start + 3) * 4;
1075 data_size += (end - start + 3) * 4;
1076
1077 *data++ = start | (1 << 31);
1078 *data++ = end;
1079 for (j = start; j <= end; j++)
1080 *data++ = *src++;
1081 }
1082
1083out:
1084 return data_size + sizeof(*header);
1085
1086}
1087
1088static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
1089 struct kgsl_snapshot *snapshot)
1090{
1091 int i, j;
1092 struct a6xx_cluster_regs_info info;
1093
1094 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1095 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1096
1097 info.cluster = cluster;
1098 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1099 info.ctxt_id = j;
1100
1101 kgsl_snapshot_add_section(device,
1102 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
1103 a6xx_snapshot_mvc, &info);
1104 }
1105 }
1106}
1107
Lynus Vaz20c81272017-02-10 16:22:12 +05301108/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
1109static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
1110 unsigned int block_id, unsigned int index, unsigned int *val)
1111{
1112 unsigned int reg;
1113
1114 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1115 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1116
1117 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
1118 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
1119 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
1120 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
1121
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001122 /*
1123 * There needs to be a delay of 1 us to ensure enough time for correct
1124 * data is funneled into the trace buffer
1125 */
1126 udelay(1);
1127
Lynus Vaz20c81272017-02-10 16:22:12 +05301128 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1129 val++;
1130 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1131}
1132
Lynus Vazdaac540732017-07-27 14:23:35 +05301133/* a6xx_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
Lynus Vaz20c81272017-02-10 16:22:12 +05301134static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1135 u8 *buf, size_t remain, void *priv)
1136{
Lynus Vazecd472c2017-04-18 14:15:57 +05301137 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301138 struct kgsl_snapshot_debugbus *header =
1139 (struct kgsl_snapshot_debugbus *)buf;
1140 struct adreno_debugbus_block *block = priv;
1141 int i;
1142 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1143 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301144 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301145 size_t size;
1146
1147 dwords = block->dwords;
1148
1149 /* For a6xx each debug bus data unit is 2 DWORDS */
1150 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1151
1152 if (remain < size) {
1153 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1154 return 0;
1155 }
1156
1157 header->id = block->block_id;
1158 header->count = dwords * 2;
1159
Lynus Vazecd472c2017-04-18 14:15:57 +05301160 block_id = block->block_id;
1161 /* GMU_GX data is read using the GMU_CX block id on A630 */
1162 if (adreno_is_a630(adreno_dev) &&
1163 (block_id == A6XX_DBGBUS_GMU_GX))
1164 block_id = A6XX_DBGBUS_GMU_CX;
1165
Lynus Vaz20c81272017-02-10 16:22:12 +05301166 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301167 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301168
1169 return size;
1170}
1171
Lynus Vazdaac540732017-07-27 14:23:35 +05301172/* a6xx_snapshot_vbif_debugbus_block() - Capture debug data for VBIF block */
1173static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
1174 u8 *buf, size_t remain, void *priv)
1175{
1176 struct kgsl_snapshot_debugbus *header =
1177 (struct kgsl_snapshot_debugbus *)buf;
1178 struct adreno_debugbus_block *block = priv;
1179 int i, j;
1180 /*
1181 * Total number of VBIF data words considering 3 sections:
1182 * 2 arbiter blocks of 16 words
1183 * 5 AXI XIN blocks of 18 dwords each
1184 * 4 core clock side XIN blocks of 12 dwords each
1185 */
1186 unsigned int dwords = (16 * A6XX_NUM_AXI_ARB_BLOCKS) +
1187 (18 * A6XX_NUM_XIN_AXI_BLOCKS) +
1188 (12 * A6XX_NUM_XIN_CORE_BLOCKS);
1189 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1190 size_t size;
1191 unsigned int reg_clk;
1192
1193 size = (dwords * sizeof(unsigned int)) + sizeof(*header);
1194
1195 if (remain < size) {
1196 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1197 return 0;
1198 }
1199 header->id = block->block_id;
1200 header->count = dwords;
1201
1202 kgsl_regread(device, A6XX_VBIF_CLKON, &reg_clk);
1203 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk |
1204 (A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK <<
1205 A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT));
1206 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 0);
1207 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS_OUT_CTRL,
1208 (A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK <<
1209 A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT));
1210
1211 for (i = 0; i < A6XX_NUM_AXI_ARB_BLOCKS; i++) {
1212 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0,
1213 (1 << (i + 16)));
1214 for (j = 0; j < 16; j++) {
1215 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1216 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1217 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1218 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1219 data);
1220 data++;
1221 }
1222 }
1223
1224 /* XIN blocks AXI side */
1225 for (i = 0; i < A6XX_NUM_XIN_AXI_BLOCKS; i++) {
1226 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
1227 for (j = 0; j < 18; j++) {
1228 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1229 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1230 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1231 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1232 data);
1233 data++;
1234 }
1235 }
1236 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 0);
1237
1238 /* XIN blocks core clock side */
1239 for (i = 0; i < A6XX_NUM_XIN_CORE_BLOCKS; i++) {
1240 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
1241 for (j = 0; j < 12; j++) {
1242 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL1,
1243 ((j & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
1244 << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
1245 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1246 data);
1247 data++;
1248 }
1249 }
1250 /* restore the clock of VBIF */
1251 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk);
1252 return size;
1253}
1254
Lynus Vazff24c972017-03-07 19:27:46 +05301255static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1256{
1257 void __iomem *reg;
1258
1259 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1260 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1261 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1262 return;
1263
1264 reg = a6xx_cx_dbgc +
1265 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1266 *value = __raw_readl(reg);
1267
1268 /*
1269 * ensure this read finishes before the next one.
1270 * i.e. act like normal readl()
1271 */
1272 rmb();
1273}
1274
1275static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1276{
1277 void __iomem *reg;
1278
1279 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1280 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1281 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1282 return;
1283
1284 reg = a6xx_cx_dbgc +
1285 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1286
1287 /*
1288 * ensure previous writes post before this one,
1289 * i.e. act like normal writel()
1290 */
1291 wmb();
1292 __raw_writel(value, reg);
1293}
1294
1295/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1296static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1297 unsigned int block_id, unsigned int index, unsigned int *val)
1298{
1299 unsigned int reg;
1300
1301 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1302 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1303
1304 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1305 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1306 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1307 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1308
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001309 /*
1310 * There needs to be a delay of 1 us to ensure enough time for correct
1311 * data is funneled into the trace buffer
1312 */
1313 udelay(1);
1314
Lynus Vazff24c972017-03-07 19:27:46 +05301315 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1316 val++;
1317 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1318}
1319
1320/*
1321 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1322 * block from the CX DBGC block
1323 */
1324static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1325 u8 *buf, size_t remain, void *priv)
1326{
1327 struct kgsl_snapshot_debugbus *header =
1328 (struct kgsl_snapshot_debugbus *)buf;
1329 struct adreno_debugbus_block *block = priv;
1330 int i;
1331 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1332 unsigned int dwords;
1333 size_t size;
1334
1335 dwords = block->dwords;
1336
1337 /* For a6xx each debug bus data unit is 2 DWRODS */
1338 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1339
1340 if (remain < size) {
1341 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1342 return 0;
1343 }
1344
1345 header->id = block->block_id;
1346 header->count = dwords * 2;
1347
1348 for (i = 0; i < dwords; i++)
1349 a6xx_cx_debug_bus_read(device, block->block_id, i,
1350 &data[i*2]);
1351
1352 return size;
1353}
1354
Lynus Vaz20c81272017-02-10 16:22:12 +05301355/* a6xx_snapshot_debugbus() - Capture debug bus data */
1356static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1357 struct kgsl_snapshot *snapshot)
1358{
1359 int i;
1360
1361 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1362 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001363 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1364 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301365
1366 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1367 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1368
1369 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1370 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1371 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1372 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1373
1374 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1375 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1376 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1377 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1378 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1379 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1380 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1381 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1382 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1383 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1384 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1385 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1386 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1387 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1388 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1389 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1390 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1391 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1392
1393 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1394 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1395 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1396 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1397
Lynus Vazff24c972017-03-07 19:27:46 +05301398 a6xx_cx_dbgc = ioremap(device->reg_phys +
1399 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1400 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1401 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1402
1403 if (a6xx_cx_dbgc) {
1404 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1405 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001406 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1407 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301408
1409 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1410 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1411
1412 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1413 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1414 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1415 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1416
1417 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1418 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1419 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1420 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1421 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1422 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1423 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1424 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1425 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1426 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1427 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1428 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1429 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1430 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1431 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1432 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1433 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1434 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1435
1436 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1437 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1438 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1439 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1440 } else
1441 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1442
Lynus Vaz20c81272017-02-10 16:22:12 +05301443 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1444 kgsl_snapshot_add_section(device,
1445 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1446 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1447 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1448 }
Lynus Vazff24c972017-03-07 19:27:46 +05301449
Lynus Vazdaac540732017-07-27 14:23:35 +05301450 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1451 snapshot, a6xx_snapshot_vbif_debugbus_block,
1452 (void *) &a6xx_vbif_debugbus_blocks);
1453
Lynus Vazff24c972017-03-07 19:27:46 +05301454 if (a6xx_cx_dbgc) {
1455 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1456 kgsl_snapshot_add_section(device,
1457 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1458 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1459 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1460 }
1461 iounmap(a6xx_cx_dbgc);
1462 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301463}
1464
Carter Cooperb88b7082017-09-14 09:03:26 -06001465/*
1466 * a6xx_snapshot_gmu() - A6XX GMU snapshot function
1467 * @adreno_dev: Device being snapshotted
1468 * @snapshot: Pointer to the snapshot instance
1469 *
1470 * This is where all of the A6XX GMU specific bits and pieces are grabbed
1471 * into the snapshot memory
1472 */
1473void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
Kyle Piefer60733aa2017-03-21 11:24:01 -07001474 struct kgsl_snapshot *snapshot)
1475{
Carter Cooperb88b7082017-09-14 09:03:26 -06001476 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
George Shen1d447b02017-07-12 13:40:28 -07001477 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1478
Kyle Piefer60733aa2017-03-21 11:24:01 -07001479 if (!kgsl_gmu_isenabled(device))
1480 return;
1481
Lynus Vazd37f1d82017-05-24 16:39:15 +05301482 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1483 ARRAY_SIZE(a6xx_gmu_registers) / 2);
George Shen1d447b02017-07-12 13:40:28 -07001484
1485 if (gpudev->gx_is_on(adreno_dev))
1486 adreno_snapshot_registers(device, snapshot,
1487 a6xx_gmu_gx_registers,
1488 ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001489}
1490
Lynus Vaz85150052017-02-21 17:57:48 +05301491/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1492static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1493 size_t remain, void *priv)
1494{
1495 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1496 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1497 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1498 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1499
1500 if (remain < DEBUG_SECTION_SZ(1)) {
1501 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1502 return 0;
1503 }
1504
1505 /* Dump the SQE firmware version */
1506 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1507 header->size = 1;
1508 *data = fw->version;
1509
1510 return DEBUG_SECTION_SZ(1);
1511}
1512
Shrenuj Bansal41665402016-12-16 15:25:54 -08001513static void _a6xx_do_crashdump(struct kgsl_device *device)
1514{
1515 unsigned long wait_time;
1516 unsigned int reg = 0;
1517 unsigned int val;
1518
1519 crash_dump_valid = false;
1520
Lynus Vaz0a06efd2017-09-13 20:21:07 +05301521 if (!device->snapshot_crashdumper)
1522 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001523 if (a6xx_capturescript.gpuaddr == 0 ||
1524 a6xx_crashdump_registers.gpuaddr == 0)
1525 return;
1526
1527 /* IF the SMMU is stalled we cannot do a crash dump */
1528 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1529 if (val & BIT(24))
1530 return;
1531
1532 /* Turn on APRIV so we can access the buffers */
1533 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1534
1535 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1536 lower_32_bits(a6xx_capturescript.gpuaddr));
1537 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1538 upper_32_bits(a6xx_capturescript.gpuaddr));
1539 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1540
1541 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1542 while (!time_after(jiffies, wait_time)) {
1543 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1544 if (reg & 0x2)
1545 break;
1546 cpu_relax();
1547 }
1548
1549 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1550
1551 if (!(reg & 0x2)) {
1552 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1553 return;
1554 }
1555
1556 crash_dump_valid = true;
1557}
1558
1559/*
1560 * a6xx_snapshot() - A6XX GPU snapshot function
1561 * @adreno_dev: Device being snapshotted
1562 * @snapshot: Pointer to the snapshot instance
1563 *
1564 * This is where all of the A6XX specific bits and pieces are grabbed
1565 * into the snapshot memory
1566 */
1567void a6xx_snapshot(struct adreno_device *adreno_dev,
1568 struct kgsl_snapshot *snapshot)
1569{
1570 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1571 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1572 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001573 bool sptprac_on;
Lynus Vaz96de8522017-09-13 20:17:03 +05301574 unsigned int i;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001575
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001576 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1577
1578 /* Return if the GX is off */
Carter Cooperb88b7082017-09-14 09:03:26 -06001579 if (!gpudev->gx_is_on(adreno_dev))
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001580 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001581
Lynus Vaz030473e2017-06-22 17:33:06 +05301582 /* Dump the registers which get affected by crash dumper trigger */
1583 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1584 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1585
1586 /* Dump vbif registers as well which get affected by crash dumper */
1587 adreno_snapshot_vbif_registers(device, snapshot,
1588 a6xx_vbif_snapshot_registers,
1589 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
1590
Shrenuj Bansal41665402016-12-16 15:25:54 -08001591 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001592 if (sptprac_on)
1593 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001594
Lynus Vaz96de8522017-09-13 20:17:03 +05301595 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1596 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1597 snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]);
1598 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001599
Shrenuj Bansal41665402016-12-16 15:25:54 -08001600 /* CP_SQE indexed registers */
1601 kgsl_snapshot_indexed_registers(device, snapshot,
1602 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1603 0, snap_data->sect_sizes->cp_pfp);
1604
1605 /* CP_DRAW_STATE */
1606 kgsl_snapshot_indexed_registers(device, snapshot,
1607 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1608 0, 0x100);
1609
1610 /* SQE_UCODE Cache */
1611 kgsl_snapshot_indexed_registers(device, snapshot,
1612 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1613 0, 0x6000);
1614
1615 /* CP ROQ */
1616 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1617 snapshot, adreno_snapshot_cp_roq,
1618 &snap_data->sect_sizes->roq);
1619
Lynus Vaz85150052017-02-21 17:57:48 +05301620 /* SQE Firmware */
1621 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1622 snapshot, a6xx_snapshot_sqe, NULL);
1623
Lynus Vaza5922742017-03-14 18:50:54 +05301624 /* Mempool debug data */
1625 a6xx_snapshot_mempool(device, snapshot);
1626
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001627 if (sptprac_on) {
1628 /* Shader memory */
1629 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301630
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001631 /* MVC register section */
1632 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001633
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001634 /* registers dumped through DBG AHB */
1635 a6xx_snapshot_dbgahb_regs(device, snapshot);
1636 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301637
Lynus Vaz20c81272017-02-10 16:22:12 +05301638 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001639
Shrenuj Bansal41665402016-12-16 15:25:54 -08001640}
1641
1642static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1643{
1644 int qwords = 0;
1645 unsigned int i, j, k;
1646 unsigned int count;
1647
1648 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1649 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1650
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001651 if (cluster->sel) {
1652 ptr[qwords++] = cluster->sel->val;
1653 ptr[qwords++] = ((uint64_t)cluster->sel->cd_reg << 44) |
1654 (1 << 21) | 1;
1655 }
1656
Shrenuj Bansal41665402016-12-16 15:25:54 -08001657 cluster->offset0 = *offset;
1658 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1659
1660 if (j == 1)
1661 cluster->offset1 = *offset;
1662
1663 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1664 ptr[qwords++] =
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001665 ((uint64_t)A6XX_CP_APERTURE_CNTL_CD << 44) |
Shrenuj Bansal41665402016-12-16 15:25:54 -08001666 (1 << 21) | 1;
1667
1668 for (k = 0; k < cluster->num_sets; k++) {
1669 count = REG_PAIR_COUNT(cluster->regs, k);
1670 ptr[qwords++] =
1671 a6xx_crashdump_registers.gpuaddr + *offset;
1672 ptr[qwords++] =
1673 (((uint64_t)cluster->regs[2 * k]) << 44) |
1674 count;
1675
1676 *offset += count * sizeof(unsigned int);
1677 }
1678 }
1679 }
1680
1681 return qwords;
1682}
1683
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301684static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1685 uint64_t *ptr, uint64_t *offset)
1686{
1687 int qwords = 0;
1688 unsigned int j;
1689
1690 /* Capture each bank in the block */
1691 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1692 /* Program the aperture */
1693 ptr[qwords++] =
1694 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1695 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1696 (1 << 21) | 1;
1697
1698 /* Read all the data in one chunk */
1699 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1700 ptr[qwords++] =
1701 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1702 block->sz;
1703
1704 /* Remember the offset of the first bank for easy access */
1705 if (j == 0)
1706 block->offset = *offset;
1707
1708 *offset += block->sz * sizeof(unsigned int);
1709 }
1710
1711 return qwords;
1712}
1713
Lynus Vaz1e258612017-04-27 21:35:22 +05301714static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1715{
1716 int qwords = 0;
1717 unsigned int i, j, k;
1718 unsigned int count;
1719
1720 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1721 struct a6xx_cluster_dbgahb_registers *cluster =
1722 &a6xx_dbgahb_ctx_clusters[i];
1723
1724 cluster->offset0 = *offset;
1725
1726 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1727 if (j == 1)
1728 cluster->offset1 = *offset;
1729
1730 /* Program the aperture */
1731 ptr[qwords++] =
1732 ((cluster->statetype + j * 2) & 0xff) << 8;
1733 ptr[qwords++] =
1734 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1735 (1 << 21) | 1;
1736
1737 for (k = 0; k < cluster->num_sets; k++) {
1738 unsigned int start = cluster->regs[2 * k];
1739
1740 count = REG_PAIR_COUNT(cluster->regs, k);
1741 ptr[qwords++] =
1742 a6xx_crashdump_registers.gpuaddr + *offset;
1743 ptr[qwords++] =
1744 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1745 start - cluster->regbase / 4) << 44)) |
1746 count;
1747
1748 *offset += count * sizeof(unsigned int);
1749 }
1750 }
1751 }
1752 return qwords;
1753}
1754
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001755static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1756{
1757 int qwords = 0;
1758 unsigned int i, k;
1759 unsigned int count;
1760
1761 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1762 struct a6xx_non_ctx_dbgahb_registers *regs =
1763 &a6xx_non_ctx_dbgahb[i];
1764
1765 regs->offset = *offset;
1766
1767 /* Program the aperture */
1768 ptr[qwords++] = (regs->statetype & 0xff) << 8;
1769 ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1770 (1 << 21) | 1;
1771
1772 for (k = 0; k < regs->num_sets; k++) {
1773 unsigned int start = regs->regs[2 * k];
1774
1775 count = REG_PAIR_COUNT(regs->regs, k);
1776 ptr[qwords++] =
1777 a6xx_crashdump_registers.gpuaddr + *offset;
1778 ptr[qwords++] =
1779 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1780 start - regs->regbase / 4) << 44)) |
1781 count;
1782
1783 *offset += count * sizeof(unsigned int);
1784 }
1785 }
1786 return qwords;
1787}
1788
Shrenuj Bansal41665402016-12-16 15:25:54 -08001789void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1790{
1791 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1792 unsigned int script_size = 0;
1793 unsigned int data_size = 0;
1794 unsigned int i, j, k;
1795 uint64_t *ptr;
1796 uint64_t offset = 0;
1797
1798 if (a6xx_capturescript.gpuaddr != 0 &&
1799 a6xx_crashdump_registers.gpuaddr != 0)
1800 return;
1801
1802 /*
1803 * We need to allocate two buffers:
1804 * 1 - the buffer to hold the draw script
1805 * 2 - the buffer to hold the data
1806 */
1807
1808 /*
1809 * To save the registers, we need 16 bytes per register pair for the
1810 * script and a dword for each register in the data
1811 */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001812 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1813 struct reg_list *regs = &a6xx_reg_list[i];
1814
1815 /* 16 bytes for programming the aperture */
1816 if (regs->sel)
1817 script_size += 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001818
1819 /* Each pair needs 16 bytes (2 qwords) */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001820 script_size += regs->count * 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001821
1822 /* Each register needs a dword in the data */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001823 for (j = 0; j < regs->count; j++)
Shrenuj Bansal41665402016-12-16 15:25:54 -08001824 data_size += REG_PAIR_COUNT(regs->regs, j) *
1825 sizeof(unsigned int);
1826
1827 }
1828
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301829 /*
1830 * To save the shader blocks for each block in each type we need 32
1831 * bytes for the script (16 bytes to program the aperture and 16 to
1832 * read the data) and then a block specific number of bytes to hold
1833 * the data
1834 */
1835 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1836 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1837 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1838 A6XX_NUM_SHADER_BANKS;
1839 }
1840
Shrenuj Bansal41665402016-12-16 15:25:54 -08001841 /* Calculate the script and data size for MVC registers */
1842 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1843 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1844
1845 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1846
1847 /* 16 bytes for programming the aperture */
1848 script_size += 16;
1849
1850 /* Reading each pair of registers takes 16 bytes */
1851 script_size += 16 * cluster->num_sets;
1852
1853 /* A dword per register read from the cluster list */
1854 for (k = 0; k < cluster->num_sets; k++)
1855 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1856 sizeof(unsigned int);
1857 }
1858 }
1859
Lynus Vaz1e258612017-04-27 21:35:22 +05301860 /* Calculate the script and data size for debug AHB registers */
1861 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1862 struct a6xx_cluster_dbgahb_registers *cluster =
1863 &a6xx_dbgahb_ctx_clusters[i];
1864
1865 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1866
1867 /* 16 bytes for programming the aperture */
1868 script_size += 16;
1869
1870 /* Reading each pair of registers takes 16 bytes */
1871 script_size += 16 * cluster->num_sets;
1872
1873 /* A dword per register read from the cluster list */
1874 for (k = 0; k < cluster->num_sets; k++)
1875 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1876 sizeof(unsigned int);
1877 }
1878 }
1879
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001880 /*
1881 * Calculate the script and data size for non context debug
1882 * AHB registers
1883 */
1884 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
1885 struct a6xx_non_ctx_dbgahb_registers *regs =
1886 &a6xx_non_ctx_dbgahb[i];
1887
1888 /* 16 bytes for programming the aperture */
1889 script_size += 16;
1890
1891 /* Reading each pair of registers takes 16 bytes */
1892 script_size += 16 * regs->num_sets;
1893
1894 /* A dword per register read from the cluster list */
1895 for (k = 0; k < regs->num_sets; k++)
1896 data_size += REG_PAIR_COUNT(regs->regs, k) *
1897 sizeof(unsigned int);
1898 }
1899
Shrenuj Bansal41665402016-12-16 15:25:54 -08001900 /* Now allocate the script and data buffers */
1901
1902 /* The script buffers needs 2 extra qwords on the end */
1903 if (kgsl_allocate_global(device, &a6xx_capturescript,
1904 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1905 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1906 return;
1907
1908 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1909 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1910 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1911 return;
1912 }
1913
1914 /* Build the crash script */
1915
1916 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1917
1918 /* For the registers, program a read command for each pair */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001919 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1920 struct reg_list *regs = &a6xx_reg_list[i];
Shrenuj Bansal41665402016-12-16 15:25:54 -08001921
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001922 /* Program the SEL_CNTL_CD register appropriately */
1923 if (regs->sel) {
1924 *ptr++ = regs->sel->val;
1925 *ptr++ = (((uint64_t)regs->sel->cd_reg << 44)) |
1926 (1 << 21) | 1;
1927 }
1928
1929 for (j = 0; j < regs->count; j++) {
Shrenuj Bansal41665402016-12-16 15:25:54 -08001930 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1931 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1932 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1933 offset += r * sizeof(unsigned int);
1934 }
1935 }
1936
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301937 /* Program each shader block */
1938 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1939 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1940 &offset);
1941 }
1942
Shrenuj Bansal41665402016-12-16 15:25:54 -08001943 /* Program the capturescript for the MVC regsiters */
1944 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1945
Lynus Vaz1e258612017-04-27 21:35:22 +05301946 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1947
Harshdeep Dhatt52ccc942017-05-10 12:35:30 -06001948 ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
1949
Shrenuj Bansal41665402016-12-16 15:25:54 -08001950 *ptr++ = 0;
1951 *ptr++ = 0;
1952}