blob: a40c4779eda643c8c486f1b8520cae0d0f81929b [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
Lynus Vazdaac540732017-07-27 14:23:35 +053024#define A6XX_NUM_AXI_ARB_BLOCKS 2
25#define A6XX_NUM_XIN_AXI_BLOCKS 5
26#define A6XX_NUM_XIN_CORE_BLOCKS 4
Shrenuj Bansal41665402016-12-16 15:25:54 -080027
28static const unsigned int a6xx_gras_cluster[] = {
29 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
30 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
31 0x8400, 0x840B,
32};
33
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060034static const unsigned int a6xx_ps_cluster_rac[] = {
Shrenuj Bansal41665402016-12-16 15:25:54 -080035 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
36 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060037 0x88C0, 0x88C1, 0x88D0, 0x88E3, 0x8900, 0x890C, 0x890F, 0x891A,
38 0x8C00, 0x8C01, 0x8C08, 0x8C10, 0x8C17, 0x8C1F, 0x8C26, 0x8C33,
39};
40
41static const unsigned int a6xx_ps_cluster_rbp[] = {
42 0x88F0, 0x88F3, 0x890D, 0x890E, 0x8927, 0x8928, 0x8BF0, 0x8BF1,
43 0x8C02, 0x8C07, 0x8C11, 0x8C16, 0x8C20, 0x8C25,
44};
45
46static const unsigned int a6xx_ps_cluster[] = {
47 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
Shrenuj Bansal41665402016-12-16 15:25:54 -080048};
49
50static const unsigned int a6xx_fe_cluster[] = {
51 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
52 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
53};
54
55static const unsigned int a6xx_pc_vs_cluster[] = {
56 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
57};
58
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060059static const struct sel_reg {
60 unsigned int host_reg;
61 unsigned int cd_reg;
62 unsigned int val;
63} _a6xx_rb_rac_aperture = {
64 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
65 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
66 .val = 0x0,
67},
68_a6xx_rb_rbp_aperture = {
69 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
70 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
71 .val = 0x9,
72};
73
Shrenuj Bansal41665402016-12-16 15:25:54 -080074static struct a6xx_cluster_registers {
75 unsigned int id;
76 const unsigned int *regs;
77 unsigned int num_sets;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060078 const struct sel_reg *sel;
Shrenuj Bansal41665402016-12-16 15:25:54 -080079 unsigned int offset0;
80 unsigned int offset1;
81} a6xx_clusters[] = {
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060082 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2,
83 NULL },
84 { CP_CLUSTER_PS, a6xx_ps_cluster_rac, ARRAY_SIZE(a6xx_ps_cluster_rac)/2,
85 &_a6xx_rb_rac_aperture },
86 { CP_CLUSTER_PS, a6xx_ps_cluster_rbp, ARRAY_SIZE(a6xx_ps_cluster_rbp)/2,
87 &_a6xx_rb_rbp_aperture },
88 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2,
89 NULL },
90 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2,
91 NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080092 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060093 ARRAY_SIZE(a6xx_pc_vs_cluster)/2, NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080094};
95
96struct a6xx_cluster_regs_info {
97 struct a6xx_cluster_registers *cluster;
98 unsigned int ctxt_id;
99};
100
Lynus Vaz461e2382017-01-16 19:35:41 +0530101static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
102 0xB800, 0xB803, 0xB820, 0xB822,
103};
104
105static const unsigned int a6xx_sp_vs_sp_cluster[] = {
106 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
107 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
108};
109
110static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
111 0xBB10, 0xBB11, 0xBB20, 0xBB29,
112};
113
114static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
115 0xBD80, 0xBD80,
116};
117
118static const unsigned int a6xx_sp_duplicate_cluster[] = {
119 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
120};
121
122static const unsigned int a6xx_tp_duplicate_cluster[] = {
123 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
124};
125
126static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
127 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
128 0xB9C0, 0xB9C9,
129};
130
131static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
132 0xBD80, 0xBD80,
133};
134
135static const unsigned int a6xx_sp_ps_sp_cluster[] = {
136 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
137 0xAA00, 0xAA00, 0xAA30, 0xAA31,
138};
139
140static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
141 0xACC0, 0xACC0,
142};
143
144static const unsigned int a6xx_sp_ps_tp_cluster[] = {
145 0xB180, 0xB183, 0xB190, 0xB191,
146};
147
148static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
149 0xB4C0, 0xB4D1,
150};
151
152static struct a6xx_cluster_dbgahb_registers {
153 unsigned int id;
154 unsigned int regbase;
155 unsigned int statetype;
156 const unsigned int *regs;
157 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530158 unsigned int offset0;
159 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530160} a6xx_dbgahb_ctx_clusters[] = {
161 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
162 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
163 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
164 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700165 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530166 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
167 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
168 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700169 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530170 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700171 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530172 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700173 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530174 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700175 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530176 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700177 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530178 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700179 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530180 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700181 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530182 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700183 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530184 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700185 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530186 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700187 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530188 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700189 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530190 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
191};
192
193struct a6xx_cluster_dbgahb_regs_info {
194 struct a6xx_cluster_dbgahb_registers *cluster;
195 unsigned int ctxt_id;
196};
197
Shrenuj Bansal41665402016-12-16 15:25:54 -0800198static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
199 /* VBIF */
200 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
201 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
202 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
203 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
204 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
205 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
206 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
207 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
208 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
209 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
210 0x3410, 0x3410, 0x3800, 0x3801,
211};
212
George Shen1d447b02017-07-12 13:40:28 -0700213static const unsigned int a6xx_gmu_gx_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700214 /* GMU GX */
215 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
216 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
217 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
218 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
219 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
George Shen1d447b02017-07-12 13:40:28 -0700220};
221
222static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700223 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700224 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700225 /* GMU CX */
226 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
227 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
228 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
229 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
230 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
231 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
232 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
233 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
Lokesh Batrac367dc92017-08-24 13:40:32 -0700234 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA01,
Kyle Pieferbce21702017-06-08 09:21:28 -0700235 /* GPU RSCC */
George Shen6927d8f2017-07-19 11:38:10 -0700236 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747,
237 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
238 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
Kyle Pieferbce21702017-06-08 09:21:28 -0700239 /* GMU AO */
240 0x23B00, 0x23B16, 0x23C00, 0x23C00,
241 /* GPU CC */
242 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
243 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
244 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
245 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
246 0x26000, 0x26002,
247 /* GPU CC ACD */
248 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700249};
250
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600251static const unsigned int a6xx_rb_rac_registers[] = {
252 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E10, 0x8E1C, 0x8E20, 0x8E25,
253 0x8E28, 0x8E28, 0x8E2C, 0x8E2F, 0x8E50, 0x8E52,
254};
255
256static const unsigned int a6xx_rb_rbp_registers[] = {
257 0x8E01, 0x8E01, 0x8E0C, 0x8E0C, 0x8E3B, 0x8E3E, 0x8E40, 0x8E43,
258 0x8E53, 0x8E5F, 0x8E70, 0x8E77,
259};
260
Shrenuj Bansal41665402016-12-16 15:25:54 -0800261static const struct adreno_vbif_snapshot_registers
262a6xx_vbif_snapshot_registers[] = {
263 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
264 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
265};
266
267/*
268 * Set of registers to dump for A6XX on snapshot.
269 * Registers in pairs - first value is the start offset, second
270 * is the stop offset (inclusive)
271 */
272
273static const unsigned int a6xx_registers[] = {
274 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530275 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
276 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
277 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530278 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
279 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
280 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800281 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530282 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
283 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
284 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
285 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
286 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
287 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
288 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800289 /* VSC */
290 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
291 /* UCHE */
292 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
293 0x0E38, 0x0E39,
294 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530295 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
296 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800297 /* VPC */
298 0x9600, 0x9604, 0x9624, 0x9637,
299 /* PC */
300 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
301 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
302 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
303 /* VFD */
304 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530305 0xA630, 0xA630,
Lynus Vaz3a5a8eb2017-11-08 12:38:10 +0530306 /* SP */
307 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
308 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
309 /* TP */
310 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
311 /* HLSQ */
312 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
313 0xBE20, 0xBE23,
314
Shrenuj Bansal41665402016-12-16 15:25:54 -0800315};
316
Lynus Vaz030473e2017-06-22 17:33:06 +0530317/*
318 * Set of registers to dump for A6XX before actually triggering crash dumper.
319 * Registers in pairs - first value is the start offset, second
320 * is the stop offset (inclusive)
321 */
322static const unsigned int a6xx_pre_crashdumper_registers[] = {
323 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
324 0x210, 0x213,
325 /* CP: CP_STATUS_1 */
326 0x825, 0x825,
327};
328
Lynus Vaz20c81272017-02-10 16:22:12 +0530329enum a6xx_debugbus_id {
330 A6XX_DBGBUS_CP = 0x1,
331 A6XX_DBGBUS_RBBM = 0x2,
332 A6XX_DBGBUS_VBIF = 0x3,
333 A6XX_DBGBUS_HLSQ = 0x4,
334 A6XX_DBGBUS_UCHE = 0x5,
335 A6XX_DBGBUS_DPM = 0x6,
336 A6XX_DBGBUS_TESS = 0x7,
337 A6XX_DBGBUS_PC = 0x8,
338 A6XX_DBGBUS_VFDP = 0x9,
339 A6XX_DBGBUS_VPC = 0xa,
340 A6XX_DBGBUS_TSE = 0xb,
341 A6XX_DBGBUS_RAS = 0xc,
342 A6XX_DBGBUS_VSC = 0xd,
343 A6XX_DBGBUS_COM = 0xe,
344 A6XX_DBGBUS_LRZ = 0x10,
345 A6XX_DBGBUS_A2D = 0x11,
346 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530347 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530348 A6XX_DBGBUS_RBP = 0x14,
349 A6XX_DBGBUS_DCS = 0x15,
350 A6XX_DBGBUS_RBBM_CFG = 0x16,
351 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530352 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530353 A6XX_DBGBUS_TPFCHE = 0x19,
354 A6XX_DBGBUS_GPC = 0x1d,
355 A6XX_DBGBUS_LARC = 0x1e,
356 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
357 A6XX_DBGBUS_RB_0 = 0x20,
358 A6XX_DBGBUS_RB_1 = 0x21,
359 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
360 A6XX_DBGBUS_CCU_0 = 0x28,
361 A6XX_DBGBUS_CCU_1 = 0x29,
362 A6XX_DBGBUS_VFD_0 = 0x38,
363 A6XX_DBGBUS_VFD_1 = 0x39,
364 A6XX_DBGBUS_VFD_2 = 0x3a,
365 A6XX_DBGBUS_VFD_3 = 0x3b,
366 A6XX_DBGBUS_SP_0 = 0x40,
367 A6XX_DBGBUS_SP_1 = 0x41,
368 A6XX_DBGBUS_TPL1_0 = 0x48,
369 A6XX_DBGBUS_TPL1_1 = 0x49,
370 A6XX_DBGBUS_TPL1_2 = 0x4a,
371 A6XX_DBGBUS_TPL1_3 = 0x4b,
372};
373
374static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
375 { A6XX_DBGBUS_CP, 0x100, },
376 { A6XX_DBGBUS_RBBM, 0x100, },
377 { A6XX_DBGBUS_HLSQ, 0x100, },
378 { A6XX_DBGBUS_UCHE, 0x100, },
379 { A6XX_DBGBUS_DPM, 0x100, },
380 { A6XX_DBGBUS_TESS, 0x100, },
381 { A6XX_DBGBUS_PC, 0x100, },
382 { A6XX_DBGBUS_VFDP, 0x100, },
383 { A6XX_DBGBUS_VPC, 0x100, },
384 { A6XX_DBGBUS_TSE, 0x100, },
385 { A6XX_DBGBUS_RAS, 0x100, },
386 { A6XX_DBGBUS_VSC, 0x100, },
387 { A6XX_DBGBUS_COM, 0x100, },
388 { A6XX_DBGBUS_LRZ, 0x100, },
389 { A6XX_DBGBUS_A2D, 0x100, },
390 { A6XX_DBGBUS_CCUFCHE, 0x100, },
391 { A6XX_DBGBUS_RBP, 0x100, },
392 { A6XX_DBGBUS_DCS, 0x100, },
393 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530394 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530395 { A6XX_DBGBUS_TPFCHE, 0x100, },
396 { A6XX_DBGBUS_GPC, 0x100, },
397 { A6XX_DBGBUS_LARC, 0x100, },
398 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
399 { A6XX_DBGBUS_RB_0, 0x100, },
400 { A6XX_DBGBUS_RB_1, 0x100, },
401 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
402 { A6XX_DBGBUS_CCU_0, 0x100, },
403 { A6XX_DBGBUS_CCU_1, 0x100, },
404 { A6XX_DBGBUS_VFD_0, 0x100, },
405 { A6XX_DBGBUS_VFD_1, 0x100, },
406 { A6XX_DBGBUS_VFD_2, 0x100, },
407 { A6XX_DBGBUS_VFD_3, 0x100, },
408 { A6XX_DBGBUS_SP_0, 0x100, },
409 { A6XX_DBGBUS_SP_1, 0x100, },
410 { A6XX_DBGBUS_TPL1_0, 0x100, },
411 { A6XX_DBGBUS_TPL1_1, 0x100, },
412 { A6XX_DBGBUS_TPL1_2, 0x100, },
413 { A6XX_DBGBUS_TPL1_3, 0x100, },
414};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800415
Lynus Vazdaac540732017-07-27 14:23:35 +0530416static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
417 A6XX_DBGBUS_VBIF, 0x100,
418};
419
Lynus Vazff24c972017-03-07 19:27:46 +0530420static void __iomem *a6xx_cx_dbgc;
421static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
Lynus Vazecd472c2017-04-18 14:15:57 +0530422 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530423 { A6XX_DBGBUS_CX, 0x100, },
424};
425
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530426#define A6XX_NUM_SHADER_BANKS 3
427#define A6XX_SHADER_STATETYPE_SHIFT 8
428
429enum a6xx_shader_obj {
430 A6XX_TP0_TMO_DATA = 0x9,
431 A6XX_TP0_SMO_DATA = 0xa,
432 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
433 A6XX_TP1_TMO_DATA = 0x19,
434 A6XX_TP1_SMO_DATA = 0x1a,
435 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
436 A6XX_SP_INST_DATA = 0x29,
437 A6XX_SP_LB_0_DATA = 0x2a,
438 A6XX_SP_LB_1_DATA = 0x2b,
439 A6XX_SP_LB_2_DATA = 0x2c,
440 A6XX_SP_LB_3_DATA = 0x2d,
441 A6XX_SP_LB_4_DATA = 0x2e,
442 A6XX_SP_LB_5_DATA = 0x2f,
443 A6XX_SP_CB_BINDLESS_DATA = 0x30,
444 A6XX_SP_CB_LEGACY_DATA = 0x31,
445 A6XX_SP_UAV_DATA = 0x32,
446 A6XX_SP_INST_TAG = 0x33,
447 A6XX_SP_CB_BINDLESS_TAG = 0x34,
448 A6XX_SP_TMO_UMO_TAG = 0x35,
449 A6XX_SP_SMO_TAG = 0x36,
450 A6XX_SP_STATE_DATA = 0x37,
451 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
452 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
453 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
454 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
455 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
456 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
457 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
458 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
459 A6XX_HLSQ_INST_RAM = 0x52,
460 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
461 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
462 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
463 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
464 A6XX_HLSQ_INST_RAM_TAG = 0x57,
465 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
466 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
467 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
468 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
469 A6XX_HLSQ_DATAPATH_META = 0x60,
470 A6XX_HLSQ_FRONTEND_META = 0x61,
471 A6XX_HLSQ_INDIRECT_META = 0x62,
472 A6XX_HLSQ_BACKEND_META = 0x63
473};
474
475struct a6xx_shader_block {
476 unsigned int statetype;
477 unsigned int sz;
478 uint64_t offset;
479};
480
481struct a6xx_shader_block_info {
482 struct a6xx_shader_block *block;
483 unsigned int bank;
484 uint64_t offset;
485};
486
487static struct a6xx_shader_block a6xx_shader_blocks[] = {
488 {A6XX_TP0_TMO_DATA, 0x200},
489 {A6XX_TP0_SMO_DATA, 0x80,},
490 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
491 {A6XX_TP1_TMO_DATA, 0x200},
492 {A6XX_TP1_SMO_DATA, 0x80,},
493 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
494 {A6XX_SP_INST_DATA, 0x800},
495 {A6XX_SP_LB_0_DATA, 0x800},
496 {A6XX_SP_LB_1_DATA, 0x800},
497 {A6XX_SP_LB_2_DATA, 0x800},
498 {A6XX_SP_LB_3_DATA, 0x800},
499 {A6XX_SP_LB_4_DATA, 0x800},
500 {A6XX_SP_LB_5_DATA, 0x200},
501 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
502 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
503 {A6XX_SP_UAV_DATA, 0x80,},
504 {A6XX_SP_INST_TAG, 0x80,},
505 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
506 {A6XX_SP_TMO_UMO_TAG, 0x80,},
507 {A6XX_SP_SMO_TAG, 0x80},
508 {A6XX_SP_STATE_DATA, 0x3F},
509 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
510 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
511 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
512 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
513 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
514 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
515 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
516 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
517 {A6XX_HLSQ_INST_RAM, 0x800},
518 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
519 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
520 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
521 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
522 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
523 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
524 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
525 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
526 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
527 {A6XX_HLSQ_DATAPATH_META, 0x40,},
528 {A6XX_HLSQ_FRONTEND_META, 0x40},
529 {A6XX_HLSQ_INDIRECT_META, 0x40,}
530};
531
Shrenuj Bansal41665402016-12-16 15:25:54 -0800532static struct kgsl_memdesc a6xx_capturescript;
533static struct kgsl_memdesc a6xx_crashdump_registers;
534static bool crash_dump_valid;
535
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600536static struct reg_list {
Shrenuj Bansal41665402016-12-16 15:25:54 -0800537 const unsigned int *regs;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600538 unsigned int count;
539 const struct sel_reg *sel;
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530540 uint64_t offset;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600541} a6xx_reg_list[] = {
542 { a6xx_registers, ARRAY_SIZE(a6xx_registers) / 2, NULL },
543 { a6xx_rb_rac_registers, ARRAY_SIZE(a6xx_rb_rac_registers) / 2,
544 &_a6xx_rb_rac_aperture },
545 { a6xx_rb_rbp_registers, ARRAY_SIZE(a6xx_rb_rbp_registers) / 2,
546 &_a6xx_rb_rbp_aperture },
Shrenuj Bansal41665402016-12-16 15:25:54 -0800547};
548
549#define REG_PAIR_COUNT(_a, _i) \
550 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
551
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600552static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
Lynus Vaz96de8522017-09-13 20:17:03 +0530553 u8 *buf, size_t remain, struct reg_list *regs)
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600554{
Lynus Vaz96de8522017-09-13 20:17:03 +0530555 struct kgsl_snapshot_registers snapshot_regs = {
556 .regs = regs->regs,
557 .count = regs->count,
558 };
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600559
Lynus Vaz96de8522017-09-13 20:17:03 +0530560 if (regs->sel)
561 kgsl_regwrite(device, regs->sel->host_reg, regs->sel->val);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600562
Lynus Vaz96de8522017-09-13 20:17:03 +0530563 return kgsl_snapshot_dump_registers(device, buf, remain,
564 &snapshot_regs);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600565}
566
Shrenuj Bansal41665402016-12-16 15:25:54 -0800567static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
568 size_t remain, void *priv)
569{
570 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
Lynus Vaz96de8522017-09-13 20:17:03 +0530571 struct reg_list *regs = (struct reg_list *)priv;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800572 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530573 unsigned int *src;
Lynus Vaz96de8522017-09-13 20:17:03 +0530574 unsigned int j, k;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800575 unsigned int count = 0;
576
577 if (crash_dump_valid == false)
Lynus Vaz96de8522017-09-13 20:17:03 +0530578 return a6xx_legacy_snapshot_registers(device, buf, remain,
579 regs);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800580
581 if (remain < sizeof(*header)) {
582 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
583 return 0;
584 }
585
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530586 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800587 remain -= sizeof(*header);
588
Lynus Vaz96de8522017-09-13 20:17:03 +0530589 for (j = 0; j < regs->count; j++) {
590 unsigned int start = regs->regs[2 * j];
591 unsigned int end = regs->regs[(2 * j) + 1];
Shrenuj Bansal41665402016-12-16 15:25:54 -0800592
Lynus Vaz96de8522017-09-13 20:17:03 +0530593 if (remain < ((end - start) + 1) * 8) {
594 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
595 goto out;
596 }
Shrenuj Bansal41665402016-12-16 15:25:54 -0800597
Lynus Vaz96de8522017-09-13 20:17:03 +0530598 remain -= ((end - start) + 1) * 8;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800599
Lynus Vaz96de8522017-09-13 20:17:03 +0530600 for (k = start; k <= end; k++, count++) {
601 *data++ = k;
602 *data++ = *src++;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800603 }
604 }
605
606out:
607 header->count = count;
608
609 /* Return the size of the section */
610 return (count * 8) + sizeof(*header);
611}
612
Lynus Vaz030473e2017-06-22 17:33:06 +0530613static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
614 u8 *buf, size_t remain, void *priv)
615{
616 struct kgsl_snapshot_registers pre_cdregs = {
617 .regs = a6xx_pre_crashdumper_registers,
618 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
619 };
620
621 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
622}
623
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530624static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
625 u8 *buf, size_t remain, void *priv)
626{
627 struct kgsl_snapshot_shader *header =
628 (struct kgsl_snapshot_shader *) buf;
629 struct a6xx_shader_block_info *info =
630 (struct a6xx_shader_block_info *) priv;
631 struct a6xx_shader_block *block = info->block;
632 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
633
634 if (remain < SHADER_SECTION_SZ(block->sz)) {
635 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
636 return 0;
637 }
638
639 header->type = block->statetype;
640 header->index = info->bank;
641 header->size = block->sz;
642
643 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
644 block->sz);
645
646 return SHADER_SECTION_SZ(block->sz);
647}
648
649static void a6xx_snapshot_shader(struct kgsl_device *device,
650 struct kgsl_snapshot *snapshot)
651{
652 unsigned int i, j;
653 struct a6xx_shader_block_info info;
654
655 /* Shader blocks can only be read by the crash dumper */
656 if (crash_dump_valid == false)
657 return;
658
659 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
660 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
661 info.block = &a6xx_shader_blocks[i];
662 info.bank = j;
663 info.offset = a6xx_shader_blocks[i].offset +
664 (j * a6xx_shader_blocks[i].sz);
665
666 /* Shader working/shadow memory */
667 kgsl_snapshot_add_section(device,
668 KGSL_SNAPSHOT_SECTION_SHADER,
669 snapshot, a6xx_snapshot_shader_memory, &info);
670 }
671 }
672}
673
Lynus Vaza5922742017-03-14 18:50:54 +0530674static void a6xx_snapshot_mempool(struct kgsl_device *device,
675 struct kgsl_snapshot *snapshot)
676{
677 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530678 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530679
Lynus Vazb8e43d52017-04-20 14:47:37 +0530680 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530681 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
682 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
683
684 kgsl_snapshot_indexed_registers(device, snapshot,
685 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
686 0, 0x2060);
687
Lynus Vazb8e43d52017-04-20 14:47:37 +0530688 /*
689 * Data at offset 0x2000 in the mempool section is the mempool size.
690 * Since we set it to 0, patch in the original size so that the data
691 * is consistent.
692 */
693 if (buf < snapshot->ptr) {
694 unsigned int *data;
695
696 /* Skip over the headers */
697 buf += sizeof(struct kgsl_snapshot_section_header) +
698 sizeof(struct kgsl_snapshot_indexed_regs);
699
700 data = (unsigned int *)buf + 0x2000;
701 *data = pool_size;
702 }
703
Lynus Vaza5922742017-03-14 18:50:54 +0530704 /* Restore the saved mempool size */
705 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
706}
707
Lynus Vaz461e2382017-01-16 19:35:41 +0530708static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
709 unsigned int regbase, unsigned int reg)
710{
711 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
712 reg - regbase / 4;
713 unsigned int val;
714
715 kgsl_regread(device, read_reg, &val);
716 return val;
717}
718
Lynus Vaz1e258612017-04-27 21:35:22 +0530719static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
720 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530721{
722 struct kgsl_snapshot_mvc_regs *header =
723 (struct kgsl_snapshot_mvc_regs *)buf;
724 struct a6xx_cluster_dbgahb_regs_info *info =
725 (struct a6xx_cluster_dbgahb_regs_info *)priv;
726 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
727 unsigned int read_sel;
728 unsigned int data_size = 0;
729 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
730 int i, j;
731
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600732 if (!device->snapshot_legacy)
733 return 0;
734
Lynus Vaz461e2382017-01-16 19:35:41 +0530735 if (remain < sizeof(*header)) {
736 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
737 return 0;
738 }
739
740 remain -= sizeof(*header);
741
742 header->ctxt_id = info->ctxt_id;
743 header->cluster_id = cur_cluster->id;
744
745 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
746 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
747
748 for (i = 0; i < cur_cluster->num_sets; i++) {
749 unsigned int start = cur_cluster->regs[2 * i];
750 unsigned int end = cur_cluster->regs[2 * i + 1];
751
752 if (remain < (end - start + 3) * 4) {
753 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
754 goto out;
755 }
756
757 remain -= (end - start + 3) * 4;
758 data_size += (end - start + 3) * 4;
759
760 *data++ = start | (1 << 31);
761 *data++ = end;
762
763 for (j = start; j <= end; j++) {
764 unsigned int val;
765
766 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
767 *data++ = val;
768
769 }
770 }
771
772out:
773 return data_size + sizeof(*header);
774}
775
Lynus Vaz1e258612017-04-27 21:35:22 +0530776static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
777 size_t remain, void *priv)
778{
779 struct kgsl_snapshot_mvc_regs *header =
780 (struct kgsl_snapshot_mvc_regs *)buf;
781 struct a6xx_cluster_dbgahb_regs_info *info =
782 (struct a6xx_cluster_dbgahb_regs_info *)priv;
783 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
784 unsigned int data_size = 0;
785 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
786 int i, j;
787 unsigned int *src;
788
789
790 if (crash_dump_valid == false)
791 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
792 info);
793
794 if (remain < sizeof(*header)) {
795 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
796 return 0;
797 }
798
799 remain -= sizeof(*header);
800
801 header->ctxt_id = info->ctxt_id;
802 header->cluster_id = cluster->id;
803
804 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
805 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
806
807 for (i = 0; i < cluster->num_sets; i++) {
808 unsigned int start;
809 unsigned int end;
810
811 start = cluster->regs[2 * i];
812 end = cluster->regs[2 * i + 1];
813
814 if (remain < (end - start + 3) * 4) {
815 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
816 goto out;
817 }
818
819 remain -= (end - start + 3) * 4;
820 data_size += (end - start + 3) * 4;
821
822 *data++ = start | (1 << 31);
823 *data++ = end;
824 for (j = start; j <= end; j++)
825 *data++ = *src++;
826 }
827out:
828 return data_size + sizeof(*header);
829}
830
Lynus Vaz461e2382017-01-16 19:35:41 +0530831static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
832 struct kgsl_snapshot *snapshot)
833{
834 int i, j;
835
836 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
837 struct a6xx_cluster_dbgahb_registers *cluster =
838 &a6xx_dbgahb_ctx_clusters[i];
839 struct a6xx_cluster_dbgahb_regs_info info;
840
841 info.cluster = cluster;
842 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
843 info.ctxt_id = j;
844
845 kgsl_snapshot_add_section(device,
846 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
847 a6xx_snapshot_cluster_dbgahb, &info);
848 }
849 }
Lynus Vaz461e2382017-01-16 19:35:41 +0530850}
851
Shrenuj Bansal41665402016-12-16 15:25:54 -0800852static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
853 size_t remain, void *priv)
854{
855 struct kgsl_snapshot_mvc_regs *header =
856 (struct kgsl_snapshot_mvc_regs *)buf;
857 struct a6xx_cluster_regs_info *info =
858 (struct a6xx_cluster_regs_info *)priv;
859 struct a6xx_cluster_registers *cur_cluster = info->cluster;
860 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
861 unsigned int ctxt = info->ctxt_id;
862 unsigned int start, end, i, j, aperture_cntl = 0;
863 unsigned int data_size = 0;
864
865 if (remain < sizeof(*header)) {
866 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
867 return 0;
868 }
869
870 remain -= sizeof(*header);
871
872 header->ctxt_id = info->ctxt_id;
873 header->cluster_id = cur_cluster->id;
874
875 /*
876 * Set the AHB control for the Host to read from the
877 * cluster/context for this iteration.
878 */
879 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
880 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
881
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600882 if (cur_cluster->sel)
883 kgsl_regwrite(device, cur_cluster->sel->host_reg,
884 cur_cluster->sel->val);
885
Shrenuj Bansal41665402016-12-16 15:25:54 -0800886 for (i = 0; i < cur_cluster->num_sets; i++) {
887 start = cur_cluster->regs[2 * i];
888 end = cur_cluster->regs[2 * i + 1];
889
890 if (remain < (end - start + 3) * 4) {
891 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
892 goto out;
893 }
894
895 remain -= (end - start + 3) * 4;
896 data_size += (end - start + 3) * 4;
897
898 *data++ = start | (1 << 31);
899 *data++ = end;
900 for (j = start; j <= end; j++) {
901 unsigned int val;
902
903 kgsl_regread(device, j, &val);
904 *data++ = val;
905 }
906 }
907out:
908 return data_size + sizeof(*header);
909}
910
911static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
912 size_t remain, void *priv)
913{
914 struct kgsl_snapshot_mvc_regs *header =
915 (struct kgsl_snapshot_mvc_regs *)buf;
916 struct a6xx_cluster_regs_info *info =
917 (struct a6xx_cluster_regs_info *)priv;
918 struct a6xx_cluster_registers *cluster = info->cluster;
919 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
920 unsigned int *src;
921 int i, j;
922 unsigned int start, end;
923 size_t data_size = 0;
924
925 if (crash_dump_valid == false)
926 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
927
928 if (remain < sizeof(*header)) {
929 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
930 return 0;
931 }
932
933 remain -= sizeof(*header);
934
935 header->ctxt_id = info->ctxt_id;
936 header->cluster_id = cluster->id;
937
938 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
939 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
940
941 for (i = 0; i < cluster->num_sets; i++) {
942 start = cluster->regs[2 * i];
943 end = cluster->regs[2 * i + 1];
944
945 if (remain < (end - start + 3) * 4) {
946 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
947 goto out;
948 }
949
950 remain -= (end - start + 3) * 4;
951 data_size += (end - start + 3) * 4;
952
953 *data++ = start | (1 << 31);
954 *data++ = end;
955 for (j = start; j <= end; j++)
956 *data++ = *src++;
957 }
958
959out:
960 return data_size + sizeof(*header);
961
962}
963
964static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
965 struct kgsl_snapshot *snapshot)
966{
967 int i, j;
968 struct a6xx_cluster_regs_info info;
969
970 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
971 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
972
973 info.cluster = cluster;
974 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
975 info.ctxt_id = j;
976
977 kgsl_snapshot_add_section(device,
978 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
979 a6xx_snapshot_mvc, &info);
980 }
981 }
982}
983
Lynus Vaz20c81272017-02-10 16:22:12 +0530984/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
985static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
986 unsigned int block_id, unsigned int index, unsigned int *val)
987{
988 unsigned int reg;
989
990 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
991 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
992
993 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
994 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
995 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
996 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
997
Shrenuj Bansald4508ba2017-05-11 15:59:37 -0700998 /*
999 * There needs to be a delay of 1 us to ensure enough time for correct
1000 * data is funneled into the trace buffer
1001 */
1002 udelay(1);
1003
Lynus Vaz20c81272017-02-10 16:22:12 +05301004 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1005 val++;
1006 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1007}
1008
Lynus Vazdaac540732017-07-27 14:23:35 +05301009/* a6xx_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
Lynus Vaz20c81272017-02-10 16:22:12 +05301010static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1011 u8 *buf, size_t remain, void *priv)
1012{
Lynus Vazecd472c2017-04-18 14:15:57 +05301013 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301014 struct kgsl_snapshot_debugbus *header =
1015 (struct kgsl_snapshot_debugbus *)buf;
1016 struct adreno_debugbus_block *block = priv;
1017 int i;
1018 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1019 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301020 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301021 size_t size;
1022
1023 dwords = block->dwords;
1024
1025 /* For a6xx each debug bus data unit is 2 DWORDS */
1026 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1027
1028 if (remain < size) {
1029 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1030 return 0;
1031 }
1032
1033 header->id = block->block_id;
1034 header->count = dwords * 2;
1035
Lynus Vazecd472c2017-04-18 14:15:57 +05301036 block_id = block->block_id;
1037 /* GMU_GX data is read using the GMU_CX block id on A630 */
1038 if (adreno_is_a630(adreno_dev) &&
1039 (block_id == A6XX_DBGBUS_GMU_GX))
1040 block_id = A6XX_DBGBUS_GMU_CX;
1041
Lynus Vaz20c81272017-02-10 16:22:12 +05301042 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301043 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301044
1045 return size;
1046}
1047
Lynus Vazdaac540732017-07-27 14:23:35 +05301048/* a6xx_snapshot_vbif_debugbus_block() - Capture debug data for VBIF block */
1049static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
1050 u8 *buf, size_t remain, void *priv)
1051{
1052 struct kgsl_snapshot_debugbus *header =
1053 (struct kgsl_snapshot_debugbus *)buf;
1054 struct adreno_debugbus_block *block = priv;
1055 int i, j;
1056 /*
1057 * Total number of VBIF data words considering 3 sections:
1058 * 2 arbiter blocks of 16 words
1059 * 5 AXI XIN blocks of 18 dwords each
1060 * 4 core clock side XIN blocks of 12 dwords each
1061 */
1062 unsigned int dwords = (16 * A6XX_NUM_AXI_ARB_BLOCKS) +
1063 (18 * A6XX_NUM_XIN_AXI_BLOCKS) +
1064 (12 * A6XX_NUM_XIN_CORE_BLOCKS);
1065 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1066 size_t size;
1067 unsigned int reg_clk;
1068
1069 size = (dwords * sizeof(unsigned int)) + sizeof(*header);
1070
1071 if (remain < size) {
1072 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1073 return 0;
1074 }
1075 header->id = block->block_id;
1076 header->count = dwords;
1077
1078 kgsl_regread(device, A6XX_VBIF_CLKON, &reg_clk);
1079 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk |
1080 (A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK <<
1081 A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT));
1082 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 0);
1083 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS_OUT_CTRL,
1084 (A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK <<
1085 A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT));
1086
1087 for (i = 0; i < A6XX_NUM_AXI_ARB_BLOCKS; i++) {
1088 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0,
1089 (1 << (i + 16)));
1090 for (j = 0; j < 16; j++) {
1091 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1092 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1093 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1094 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1095 data);
1096 data++;
1097 }
1098 }
1099
1100 /* XIN blocks AXI side */
1101 for (i = 0; i < A6XX_NUM_XIN_AXI_BLOCKS; i++) {
1102 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
1103 for (j = 0; j < 18; j++) {
1104 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1105 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1106 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1107 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1108 data);
1109 data++;
1110 }
1111 }
1112 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 0);
1113
1114 /* XIN blocks core clock side */
1115 for (i = 0; i < A6XX_NUM_XIN_CORE_BLOCKS; i++) {
1116 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
1117 for (j = 0; j < 12; j++) {
1118 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL1,
1119 ((j & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
1120 << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
1121 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1122 data);
1123 data++;
1124 }
1125 }
1126 /* restore the clock of VBIF */
1127 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk);
1128 return size;
1129}
1130
Lynus Vazff24c972017-03-07 19:27:46 +05301131static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
1132{
1133 void __iomem *reg;
1134
1135 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1136 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1137 "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
1138 return;
1139
1140 reg = a6xx_cx_dbgc +
1141 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1142 *value = __raw_readl(reg);
1143
1144 /*
1145 * ensure this read finishes before the next one.
1146 * i.e. act like normal readl()
1147 */
1148 rmb();
1149}
1150
1151static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
1152{
1153 void __iomem *reg;
1154
1155 if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
1156 (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
1157 "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
1158 return;
1159
1160 reg = a6xx_cx_dbgc +
1161 ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
1162
1163 /*
1164 * ensure previous writes post before this one,
1165 * i.e. act like normal writel()
1166 */
1167 wmb();
1168 __raw_writel(value, reg);
1169}
1170
1171/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1172static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1173 unsigned int block_id, unsigned int index, unsigned int *val)
1174{
1175 unsigned int reg;
1176
1177 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1178 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1179
1180 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1181 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1182 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1183 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
1184
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001185 /*
1186 * There needs to be a delay of 1 us to ensure enough time for correct
1187 * data is funneled into the trace buffer
1188 */
1189 udelay(1);
1190
Lynus Vazff24c972017-03-07 19:27:46 +05301191 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1192 val++;
1193 _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1194}
1195
1196/*
1197 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1198 * block from the CX DBGC block
1199 */
1200static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1201 u8 *buf, size_t remain, void *priv)
1202{
1203 struct kgsl_snapshot_debugbus *header =
1204 (struct kgsl_snapshot_debugbus *)buf;
1205 struct adreno_debugbus_block *block = priv;
1206 int i;
1207 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1208 unsigned int dwords;
1209 size_t size;
1210
1211 dwords = block->dwords;
1212
1213 /* For a6xx each debug bus data unit is 2 DWRODS */
1214 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1215
1216 if (remain < size) {
1217 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1218 return 0;
1219 }
1220
1221 header->id = block->block_id;
1222 header->count = dwords * 2;
1223
1224 for (i = 0; i < dwords; i++)
1225 a6xx_cx_debug_bus_read(device, block->block_id, i,
1226 &data[i*2]);
1227
1228 return size;
1229}
1230
Lynus Vaz20c81272017-02-10 16:22:12 +05301231/* a6xx_snapshot_debugbus() - Capture debug bus data */
1232static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1233 struct kgsl_snapshot *snapshot)
1234{
1235 int i;
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301236 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301237
1238 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1239 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001240 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1241 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301242
1243 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1244 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1245
1246 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1247 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1248 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1249 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1250
1251 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1252 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1253 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1254 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1255 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1256 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1257 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1258 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1259 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1260 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1261 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1262 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1263 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1264 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1265 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1266 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1267 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1268 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1269
1270 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1271 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1272 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1273 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1274
Lynus Vazff24c972017-03-07 19:27:46 +05301275 a6xx_cx_dbgc = ioremap(device->reg_phys +
1276 (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
1277 (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
1278 A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
1279
1280 if (a6xx_cx_dbgc) {
1281 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
1282 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001283 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1284 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301285
1286 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1287 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
1288
1289 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1290 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1291 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1292 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1293
1294 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1295 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1296 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1297 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1298 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1299 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1300 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1301 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1302 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1303 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1304 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1305 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1306 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1307 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1308 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1309 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1310 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1311 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1312
1313 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1314 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1315 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1316 _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1317 } else
1318 KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
1319
Lynus Vaz20c81272017-02-10 16:22:12 +05301320 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1321 kgsl_snapshot_add_section(device,
1322 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1323 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1324 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1325 }
Lynus Vazff24c972017-03-07 19:27:46 +05301326
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301327 /* Skip if GPU has GBIF */
1328 if (!adreno_has_gbif(adreno_dev))
1329 kgsl_snapshot_add_section(device,
1330 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1331 snapshot, a6xx_snapshot_vbif_debugbus_block,
1332 (void *) &a6xx_vbif_debugbus_blocks);
Lynus Vazdaac540732017-07-27 14:23:35 +05301333
Lynus Vazff24c972017-03-07 19:27:46 +05301334 if (a6xx_cx_dbgc) {
1335 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1336 kgsl_snapshot_add_section(device,
1337 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1338 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1339 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1340 }
1341 iounmap(a6xx_cx_dbgc);
1342 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301343}
1344
Carter Cooperb88b7082017-09-14 09:03:26 -06001345/*
1346 * a6xx_snapshot_gmu() - A6XX GMU snapshot function
1347 * @adreno_dev: Device being snapshotted
1348 * @snapshot: Pointer to the snapshot instance
1349 *
1350 * This is where all of the A6XX GMU specific bits and pieces are grabbed
1351 * into the snapshot memory
1352 */
1353void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
Kyle Piefer60733aa2017-03-21 11:24:01 -07001354 struct kgsl_snapshot *snapshot)
1355{
Carter Cooperb88b7082017-09-14 09:03:26 -06001356 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
George Shen1d447b02017-07-12 13:40:28 -07001357 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
George Sheneb93bd32017-10-11 15:52:53 -07001358 unsigned int val;
George Shen1d447b02017-07-12 13:40:28 -07001359
Kyle Piefer60733aa2017-03-21 11:24:01 -07001360 if (!kgsl_gmu_isenabled(device))
1361 return;
1362
Lynus Vazd37f1d82017-05-24 16:39:15 +05301363 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1364 ARRAY_SIZE(a6xx_gmu_registers) / 2);
George Shen1d447b02017-07-12 13:40:28 -07001365
George Sheneb93bd32017-10-11 15:52:53 -07001366 if (gpudev->gx_is_on(adreno_dev)) {
1367 /* Set fence to ALLOW mode so registers can be read */
1368 kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
1369 kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
1370
1371 KGSL_DRV_ERR(device, "set FENCE to ALLOW mode:%x\n", val);
George Shen1d447b02017-07-12 13:40:28 -07001372 adreno_snapshot_registers(device, snapshot,
1373 a6xx_gmu_gx_registers,
1374 ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
George Sheneb93bd32017-10-11 15:52:53 -07001375 }
Kyle Piefer60733aa2017-03-21 11:24:01 -07001376}
1377
Lynus Vaz85150052017-02-21 17:57:48 +05301378/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1379static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1380 size_t remain, void *priv)
1381{
1382 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1383 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1384 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1385 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1386
1387 if (remain < DEBUG_SECTION_SZ(1)) {
1388 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1389 return 0;
1390 }
1391
1392 /* Dump the SQE firmware version */
1393 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1394 header->size = 1;
1395 *data = fw->version;
1396
1397 return DEBUG_SECTION_SZ(1);
1398}
1399
Shrenuj Bansal41665402016-12-16 15:25:54 -08001400static void _a6xx_do_crashdump(struct kgsl_device *device)
1401{
1402 unsigned long wait_time;
1403 unsigned int reg = 0;
1404 unsigned int val;
1405
1406 crash_dump_valid = false;
1407
Lynus Vaz0a06efd2017-09-13 20:21:07 +05301408 if (!device->snapshot_crashdumper)
1409 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001410 if (a6xx_capturescript.gpuaddr == 0 ||
1411 a6xx_crashdump_registers.gpuaddr == 0)
1412 return;
1413
1414 /* IF the SMMU is stalled we cannot do a crash dump */
1415 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1416 if (val & BIT(24))
1417 return;
1418
1419 /* Turn on APRIV so we can access the buffers */
1420 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1421
1422 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1423 lower_32_bits(a6xx_capturescript.gpuaddr));
1424 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1425 upper_32_bits(a6xx_capturescript.gpuaddr));
1426 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1427
1428 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1429 while (!time_after(jiffies, wait_time)) {
1430 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1431 if (reg & 0x2)
1432 break;
1433 cpu_relax();
1434 }
1435
1436 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1437
1438 if (!(reg & 0x2)) {
1439 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1440 return;
1441 }
1442
1443 crash_dump_valid = true;
1444}
1445
1446/*
1447 * a6xx_snapshot() - A6XX GPU snapshot function
1448 * @adreno_dev: Device being snapshotted
1449 * @snapshot: Pointer to the snapshot instance
1450 *
1451 * This is where all of the A6XX specific bits and pieces are grabbed
1452 * into the snapshot memory
1453 */
1454void a6xx_snapshot(struct adreno_device *adreno_dev,
1455 struct kgsl_snapshot *snapshot)
1456{
1457 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1458 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1459 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001460 bool sptprac_on;
Lynus Vaz96de8522017-09-13 20:17:03 +05301461 unsigned int i;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001462
Kyle Pieferda0fa542017-08-04 13:39:40 -07001463 /* GMU TCM data dumped through AHB */
1464 a6xx_snapshot_gmu(adreno_dev, snapshot);
1465
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001466 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1467
1468 /* Return if the GX is off */
Carter Cooperb88b7082017-09-14 09:03:26 -06001469 if (!gpudev->gx_is_on(adreno_dev))
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001470 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001471
Lynus Vaz030473e2017-06-22 17:33:06 +05301472 /* Dump the registers which get affected by crash dumper trigger */
1473 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1474 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1475
1476 /* Dump vbif registers as well which get affected by crash dumper */
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301477 if (!adreno_has_gbif(adreno_dev))
1478 adreno_snapshot_vbif_registers(device, snapshot,
1479 a6xx_vbif_snapshot_registers,
1480 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
Lynus Vaz030473e2017-06-22 17:33:06 +05301481
Shrenuj Bansal41665402016-12-16 15:25:54 -08001482 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001483 if (sptprac_on)
1484 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001485
Lynus Vaz96de8522017-09-13 20:17:03 +05301486 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1487 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1488 snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]);
1489 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001490
Shrenuj Bansal41665402016-12-16 15:25:54 -08001491 /* CP_SQE indexed registers */
1492 kgsl_snapshot_indexed_registers(device, snapshot,
1493 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1494 0, snap_data->sect_sizes->cp_pfp);
1495
1496 /* CP_DRAW_STATE */
1497 kgsl_snapshot_indexed_registers(device, snapshot,
1498 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1499 0, 0x100);
1500
1501 /* SQE_UCODE Cache */
1502 kgsl_snapshot_indexed_registers(device, snapshot,
1503 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1504 0, 0x6000);
1505
1506 /* CP ROQ */
1507 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1508 snapshot, adreno_snapshot_cp_roq,
1509 &snap_data->sect_sizes->roq);
1510
Lynus Vaz85150052017-02-21 17:57:48 +05301511 /* SQE Firmware */
1512 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1513 snapshot, a6xx_snapshot_sqe, NULL);
1514
Lynus Vaza5922742017-03-14 18:50:54 +05301515 /* Mempool debug data */
1516 a6xx_snapshot_mempool(device, snapshot);
1517
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001518 if (sptprac_on) {
1519 /* Shader memory */
1520 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301521
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001522 /* MVC register section */
1523 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001524
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001525 /* registers dumped through DBG AHB */
1526 a6xx_snapshot_dbgahb_regs(device, snapshot);
1527 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301528
Lynus Vaz20c81272017-02-10 16:22:12 +05301529 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001530
Shrenuj Bansal41665402016-12-16 15:25:54 -08001531}
1532
1533static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1534{
1535 int qwords = 0;
1536 unsigned int i, j, k;
1537 unsigned int count;
1538
1539 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1540 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1541
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001542 if (cluster->sel) {
1543 ptr[qwords++] = cluster->sel->val;
1544 ptr[qwords++] = ((uint64_t)cluster->sel->cd_reg << 44) |
1545 (1 << 21) | 1;
1546 }
1547
Shrenuj Bansal41665402016-12-16 15:25:54 -08001548 cluster->offset0 = *offset;
1549 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1550
1551 if (j == 1)
1552 cluster->offset1 = *offset;
1553
1554 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1555 ptr[qwords++] =
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001556 ((uint64_t)A6XX_CP_APERTURE_CNTL_CD << 44) |
Shrenuj Bansal41665402016-12-16 15:25:54 -08001557 (1 << 21) | 1;
1558
1559 for (k = 0; k < cluster->num_sets; k++) {
1560 count = REG_PAIR_COUNT(cluster->regs, k);
1561 ptr[qwords++] =
1562 a6xx_crashdump_registers.gpuaddr + *offset;
1563 ptr[qwords++] =
1564 (((uint64_t)cluster->regs[2 * k]) << 44) |
1565 count;
1566
1567 *offset += count * sizeof(unsigned int);
1568 }
1569 }
1570 }
1571
1572 return qwords;
1573}
1574
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301575static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1576 uint64_t *ptr, uint64_t *offset)
1577{
1578 int qwords = 0;
1579 unsigned int j;
1580
1581 /* Capture each bank in the block */
1582 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1583 /* Program the aperture */
1584 ptr[qwords++] =
1585 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1586 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1587 (1 << 21) | 1;
1588
1589 /* Read all the data in one chunk */
1590 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1591 ptr[qwords++] =
1592 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1593 block->sz;
1594
1595 /* Remember the offset of the first bank for easy access */
1596 if (j == 0)
1597 block->offset = *offset;
1598
1599 *offset += block->sz * sizeof(unsigned int);
1600 }
1601
1602 return qwords;
1603}
1604
Lynus Vaz1e258612017-04-27 21:35:22 +05301605static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1606{
1607 int qwords = 0;
1608 unsigned int i, j, k;
1609 unsigned int count;
1610
1611 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1612 struct a6xx_cluster_dbgahb_registers *cluster =
1613 &a6xx_dbgahb_ctx_clusters[i];
1614
1615 cluster->offset0 = *offset;
1616
1617 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1618 if (j == 1)
1619 cluster->offset1 = *offset;
1620
1621 /* Program the aperture */
1622 ptr[qwords++] =
1623 ((cluster->statetype + j * 2) & 0xff) << 8;
1624 ptr[qwords++] =
1625 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1626 (1 << 21) | 1;
1627
1628 for (k = 0; k < cluster->num_sets; k++) {
1629 unsigned int start = cluster->regs[2 * k];
1630
1631 count = REG_PAIR_COUNT(cluster->regs, k);
1632 ptr[qwords++] =
1633 a6xx_crashdump_registers.gpuaddr + *offset;
1634 ptr[qwords++] =
1635 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1636 start - cluster->regbase / 4) << 44)) |
1637 count;
1638
1639 *offset += count * sizeof(unsigned int);
1640 }
1641 }
1642 }
1643 return qwords;
1644}
1645
Shrenuj Bansal41665402016-12-16 15:25:54 -08001646void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1647{
1648 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1649 unsigned int script_size = 0;
1650 unsigned int data_size = 0;
1651 unsigned int i, j, k;
1652 uint64_t *ptr;
1653 uint64_t offset = 0;
1654
1655 if (a6xx_capturescript.gpuaddr != 0 &&
1656 a6xx_crashdump_registers.gpuaddr != 0)
1657 return;
1658
1659 /*
1660 * We need to allocate two buffers:
1661 * 1 - the buffer to hold the draw script
1662 * 2 - the buffer to hold the data
1663 */
1664
1665 /*
1666 * To save the registers, we need 16 bytes per register pair for the
1667 * script and a dword for each register in the data
1668 */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001669 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1670 struct reg_list *regs = &a6xx_reg_list[i];
1671
1672 /* 16 bytes for programming the aperture */
1673 if (regs->sel)
1674 script_size += 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001675
1676 /* Each pair needs 16 bytes (2 qwords) */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001677 script_size += regs->count * 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001678
1679 /* Each register needs a dword in the data */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001680 for (j = 0; j < regs->count; j++)
Shrenuj Bansal41665402016-12-16 15:25:54 -08001681 data_size += REG_PAIR_COUNT(regs->regs, j) *
1682 sizeof(unsigned int);
1683
1684 }
1685
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301686 /*
1687 * To save the shader blocks for each block in each type we need 32
1688 * bytes for the script (16 bytes to program the aperture and 16 to
1689 * read the data) and then a block specific number of bytes to hold
1690 * the data
1691 */
1692 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1693 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1694 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1695 A6XX_NUM_SHADER_BANKS;
1696 }
1697
Shrenuj Bansal41665402016-12-16 15:25:54 -08001698 /* Calculate the script and data size for MVC registers */
1699 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1700 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1701
1702 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1703
1704 /* 16 bytes for programming the aperture */
1705 script_size += 16;
1706
1707 /* Reading each pair of registers takes 16 bytes */
1708 script_size += 16 * cluster->num_sets;
1709
1710 /* A dword per register read from the cluster list */
1711 for (k = 0; k < cluster->num_sets; k++)
1712 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1713 sizeof(unsigned int);
1714 }
1715 }
1716
Lynus Vaz1e258612017-04-27 21:35:22 +05301717 /* Calculate the script and data size for debug AHB registers */
1718 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1719 struct a6xx_cluster_dbgahb_registers *cluster =
1720 &a6xx_dbgahb_ctx_clusters[i];
1721
1722 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1723
1724 /* 16 bytes for programming the aperture */
1725 script_size += 16;
1726
1727 /* Reading each pair of registers takes 16 bytes */
1728 script_size += 16 * cluster->num_sets;
1729
1730 /* A dword per register read from the cluster list */
1731 for (k = 0; k < cluster->num_sets; k++)
1732 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1733 sizeof(unsigned int);
1734 }
1735 }
1736
Shrenuj Bansal41665402016-12-16 15:25:54 -08001737 /* Now allocate the script and data buffers */
1738
1739 /* The script buffers needs 2 extra qwords on the end */
1740 if (kgsl_allocate_global(device, &a6xx_capturescript,
1741 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1742 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1743 return;
1744
1745 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1746 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1747 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1748 return;
1749 }
1750
1751 /* Build the crash script */
1752
1753 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1754
1755 /* For the registers, program a read command for each pair */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001756 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1757 struct reg_list *regs = &a6xx_reg_list[i];
Shrenuj Bansal41665402016-12-16 15:25:54 -08001758
Lynus Vaz1bba57b2017-09-26 11:55:04 +05301759 regs->offset = offset;
1760
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001761 /* Program the SEL_CNTL_CD register appropriately */
1762 if (regs->sel) {
1763 *ptr++ = regs->sel->val;
1764 *ptr++ = (((uint64_t)regs->sel->cd_reg << 44)) |
1765 (1 << 21) | 1;
1766 }
1767
1768 for (j = 0; j < regs->count; j++) {
Shrenuj Bansal41665402016-12-16 15:25:54 -08001769 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1770 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1771 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1772 offset += r * sizeof(unsigned int);
1773 }
1774 }
1775
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301776 /* Program each shader block */
1777 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1778 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1779 &offset);
1780 }
1781
Shrenuj Bansal41665402016-12-16 15:25:54 -08001782 /* Program the capturescript for the MVC regsiters */
1783 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1784
Lynus Vaz1e258612017-04-27 21:35:22 +05301785 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1786
Shrenuj Bansal41665402016-12-16 15:25:54 -08001787 *ptr++ = 0;
1788 *ptr++ = 0;
1789}