blob: b9a2f8dbb6bb47bf2a77542182eb39437051703c [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
Kyle Piefer60733aa2017-03-21 11:24:01 -070021#include "kgsl_gmu.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080022
23#define A6XX_NUM_CTXTS 2
Lynus Vazdaac540732017-07-27 14:23:35 +053024#define A6XX_NUM_AXI_ARB_BLOCKS 2
25#define A6XX_NUM_XIN_AXI_BLOCKS 5
26#define A6XX_NUM_XIN_CORE_BLOCKS 4
Shrenuj Bansal41665402016-12-16 15:25:54 -080027
28static const unsigned int a6xx_gras_cluster[] = {
29 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
30 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
31 0x8400, 0x840B,
32};
33
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060034static const unsigned int a6xx_ps_cluster_rac[] = {
Shrenuj Bansal41665402016-12-16 15:25:54 -080035 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
36 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060037 0x88C0, 0x88C1, 0x88D0, 0x88E3, 0x8900, 0x890C, 0x890F, 0x891A,
38 0x8C00, 0x8C01, 0x8C08, 0x8C10, 0x8C17, 0x8C1F, 0x8C26, 0x8C33,
39};
40
41static const unsigned int a6xx_ps_cluster_rbp[] = {
42 0x88F0, 0x88F3, 0x890D, 0x890E, 0x8927, 0x8928, 0x8BF0, 0x8BF1,
43 0x8C02, 0x8C07, 0x8C11, 0x8C16, 0x8C20, 0x8C25,
44};
45
46static const unsigned int a6xx_ps_cluster[] = {
47 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
Shrenuj Bansal41665402016-12-16 15:25:54 -080048};
49
50static const unsigned int a6xx_fe_cluster[] = {
51 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
52 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
53};
54
55static const unsigned int a6xx_pc_vs_cluster[] = {
56 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
57};
58
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060059static const struct sel_reg {
60 unsigned int host_reg;
61 unsigned int cd_reg;
62 unsigned int val;
63} _a6xx_rb_rac_aperture = {
64 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
65 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
66 .val = 0x0,
67},
68_a6xx_rb_rbp_aperture = {
69 .host_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
70 .cd_reg = A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
71 .val = 0x9,
72};
73
Shrenuj Bansal41665402016-12-16 15:25:54 -080074static struct a6xx_cluster_registers {
75 unsigned int id;
76 const unsigned int *regs;
77 unsigned int num_sets;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060078 const struct sel_reg *sel;
Shrenuj Bansal41665402016-12-16 15:25:54 -080079 unsigned int offset0;
80 unsigned int offset1;
81} a6xx_clusters[] = {
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060082 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2,
83 NULL },
84 { CP_CLUSTER_PS, a6xx_ps_cluster_rac, ARRAY_SIZE(a6xx_ps_cluster_rac)/2,
85 &_a6xx_rb_rac_aperture },
86 { CP_CLUSTER_PS, a6xx_ps_cluster_rbp, ARRAY_SIZE(a6xx_ps_cluster_rbp)/2,
87 &_a6xx_rb_rbp_aperture },
88 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2,
89 NULL },
90 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2,
91 NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080092 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -060093 ARRAY_SIZE(a6xx_pc_vs_cluster)/2, NULL },
Shrenuj Bansal41665402016-12-16 15:25:54 -080094};
95
96struct a6xx_cluster_regs_info {
97 struct a6xx_cluster_registers *cluster;
98 unsigned int ctxt_id;
99};
100
Lynus Vaz461e2382017-01-16 19:35:41 +0530101static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
102 0xB800, 0xB803, 0xB820, 0xB822,
103};
104
105static const unsigned int a6xx_sp_vs_sp_cluster[] = {
106 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
107 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
108};
109
110static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
111 0xBB10, 0xBB11, 0xBB20, 0xBB29,
112};
113
114static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
115 0xBD80, 0xBD80,
116};
117
118static const unsigned int a6xx_sp_duplicate_cluster[] = {
119 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
120};
121
122static const unsigned int a6xx_tp_duplicate_cluster[] = {
123 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
124};
125
126static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
127 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
128 0xB9C0, 0xB9C9,
129};
130
131static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
132 0xBD80, 0xBD80,
133};
134
135static const unsigned int a6xx_sp_ps_sp_cluster[] = {
136 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
137 0xAA00, 0xAA00, 0xAA30, 0xAA31,
138};
139
140static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
141 0xACC0, 0xACC0,
142};
143
144static const unsigned int a6xx_sp_ps_tp_cluster[] = {
145 0xB180, 0xB183, 0xB190, 0xB191,
146};
147
148static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
149 0xB4C0, 0xB4D1,
150};
151
152static struct a6xx_cluster_dbgahb_registers {
153 unsigned int id;
154 unsigned int regbase;
155 unsigned int statetype;
156 const unsigned int *regs;
157 unsigned int num_sets;
Lynus Vaz1e258612017-04-27 21:35:22 +0530158 unsigned int offset0;
159 unsigned int offset1;
Lynus Vaz461e2382017-01-16 19:35:41 +0530160} a6xx_dbgahb_ctx_clusters[] = {
161 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
162 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
163 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
164 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700165 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530166 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
167 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
168 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700169 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530170 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700171 { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530172 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700173 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530174 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700175 { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530176 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700177 { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530178 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700179 { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530180 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700181 { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530182 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700183 { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530184 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700185 { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530186 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700187 { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530188 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
Shrenuj Bansalcbdf19b2017-04-13 11:28:51 -0700189 { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
Lynus Vaz461e2382017-01-16 19:35:41 +0530190 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
191};
192
193struct a6xx_cluster_dbgahb_regs_info {
194 struct a6xx_cluster_dbgahb_registers *cluster;
195 unsigned int ctxt_id;
196};
197
Shrenuj Bansal41665402016-12-16 15:25:54 -0800198static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
199 /* VBIF */
200 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
201 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
202 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
203 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
204 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
205 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
206 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
207 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
208 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
209 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
210 0x3410, 0x3410, 0x3800, 0x3801,
211};
212
George Shen1d447b02017-07-12 13:40:28 -0700213static const unsigned int a6xx_gmu_gx_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700214 /* GMU GX */
215 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
216 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
217 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
218 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
219 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
George Shen1d447b02017-07-12 13:40:28 -0700220};
221
222static const unsigned int a6xx_gmu_registers[] = {
Kyle Pieferbce21702017-06-08 09:21:28 -0700223 /* GMU TCM */
Kyle Piefer60733aa2017-03-21 11:24:01 -0700224 0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
Kyle Pieferbce21702017-06-08 09:21:28 -0700225 /* GMU CX */
226 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
227 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
228 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
229 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
230 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
231 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
232 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
233 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
Lokesh Batrac367dc92017-08-24 13:40:32 -0700234 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA01,
Kyle Pieferbce21702017-06-08 09:21:28 -0700235 /* GPU RSCC */
George Shen6927d8f2017-07-19 11:38:10 -0700236 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747,
237 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
238 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
Kyle Pieferbce21702017-06-08 09:21:28 -0700239 /* GMU AO */
240 0x23B00, 0x23B16, 0x23C00, 0x23C00,
241 /* GPU CC */
242 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
243 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
244 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
245 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
246 0x26000, 0x26002,
247 /* GPU CC ACD */
248 0x26400, 0x26416, 0x26420, 0x26427,
Kyle Piefer60733aa2017-03-21 11:24:01 -0700249};
250
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600251static const unsigned int a6xx_rb_rac_registers[] = {
252 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E10, 0x8E1C, 0x8E20, 0x8E25,
253 0x8E28, 0x8E28, 0x8E2C, 0x8E2F, 0x8E50, 0x8E52,
254};
255
256static const unsigned int a6xx_rb_rbp_registers[] = {
257 0x8E01, 0x8E01, 0x8E0C, 0x8E0C, 0x8E3B, 0x8E3E, 0x8E40, 0x8E43,
258 0x8E53, 0x8E5F, 0x8E70, 0x8E77,
259};
260
Shrenuj Bansal41665402016-12-16 15:25:54 -0800261static const struct adreno_vbif_snapshot_registers
262a6xx_vbif_snapshot_registers[] = {
263 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
264 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
265};
266
267/*
268 * Set of registers to dump for A6XX on snapshot.
269 * Registers in pairs - first value is the start offset, second
270 * is the stop offset (inclusive)
271 */
272
273static const unsigned int a6xx_registers[] = {
274 /* RBBM */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530275 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
276 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
277 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
Lynus Vaz030473e2017-06-22 17:33:06 +0530278 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
279 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
280 0x0540, 0x0555,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800281 /* CP */
Lynus Vaz030473e2017-06-22 17:33:06 +0530282 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
283 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
284 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
285 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
286 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
287 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
288 0x0A00, 0x0A03,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800289 /* VSC */
290 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
291 /* UCHE */
292 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
293 0x0E38, 0x0E39,
294 /* GRAS */
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530295 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
296 0x8630, 0x8637,
Shrenuj Bansal41665402016-12-16 15:25:54 -0800297 /* VPC */
298 0x9600, 0x9604, 0x9624, 0x9637,
299 /* PC */
300 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
301 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
302 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
303 /* VFD */
304 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
Lynus Vazdb0be0a2017-04-20 18:09:17 +0530305 0xA630, 0xA630,
Lynus Vaz3a5a8eb2017-11-08 12:38:10 +0530306 /* SP */
307 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
308 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
309 /* TP */
310 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
311 /* HLSQ */
312 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
313 0xBE20, 0xBE23,
314
Shrenuj Bansal41665402016-12-16 15:25:54 -0800315};
316
Lynus Vaz030473e2017-06-22 17:33:06 +0530317/*
318 * Set of registers to dump for A6XX before actually triggering crash dumper.
319 * Registers in pairs - first value is the start offset, second
320 * is the stop offset (inclusive)
321 */
322static const unsigned int a6xx_pre_crashdumper_registers[] = {
323 /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
324 0x210, 0x213,
325 /* CP: CP_STATUS_1 */
326 0x825, 0x825,
327};
328
Lynus Vaz20c81272017-02-10 16:22:12 +0530329enum a6xx_debugbus_id {
330 A6XX_DBGBUS_CP = 0x1,
331 A6XX_DBGBUS_RBBM = 0x2,
332 A6XX_DBGBUS_VBIF = 0x3,
333 A6XX_DBGBUS_HLSQ = 0x4,
334 A6XX_DBGBUS_UCHE = 0x5,
335 A6XX_DBGBUS_DPM = 0x6,
336 A6XX_DBGBUS_TESS = 0x7,
337 A6XX_DBGBUS_PC = 0x8,
338 A6XX_DBGBUS_VFDP = 0x9,
339 A6XX_DBGBUS_VPC = 0xa,
340 A6XX_DBGBUS_TSE = 0xb,
341 A6XX_DBGBUS_RAS = 0xc,
342 A6XX_DBGBUS_VSC = 0xd,
343 A6XX_DBGBUS_COM = 0xe,
344 A6XX_DBGBUS_LRZ = 0x10,
345 A6XX_DBGBUS_A2D = 0x11,
346 A6XX_DBGBUS_CCUFCHE = 0x12,
Lynus Vazecd472c2017-04-18 14:15:57 +0530347 A6XX_DBGBUS_GMU_CX = 0x13,
Lynus Vaz20c81272017-02-10 16:22:12 +0530348 A6XX_DBGBUS_RBP = 0x14,
349 A6XX_DBGBUS_DCS = 0x15,
350 A6XX_DBGBUS_RBBM_CFG = 0x16,
351 A6XX_DBGBUS_CX = 0x17,
Lynus Vazecd472c2017-04-18 14:15:57 +0530352 A6XX_DBGBUS_GMU_GX = 0x18,
Lynus Vaz20c81272017-02-10 16:22:12 +0530353 A6XX_DBGBUS_TPFCHE = 0x19,
354 A6XX_DBGBUS_GPC = 0x1d,
355 A6XX_DBGBUS_LARC = 0x1e,
356 A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
357 A6XX_DBGBUS_RB_0 = 0x20,
358 A6XX_DBGBUS_RB_1 = 0x21,
359 A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
360 A6XX_DBGBUS_CCU_0 = 0x28,
361 A6XX_DBGBUS_CCU_1 = 0x29,
362 A6XX_DBGBUS_VFD_0 = 0x38,
363 A6XX_DBGBUS_VFD_1 = 0x39,
364 A6XX_DBGBUS_VFD_2 = 0x3a,
365 A6XX_DBGBUS_VFD_3 = 0x3b,
366 A6XX_DBGBUS_SP_0 = 0x40,
367 A6XX_DBGBUS_SP_1 = 0x41,
368 A6XX_DBGBUS_TPL1_0 = 0x48,
369 A6XX_DBGBUS_TPL1_1 = 0x49,
370 A6XX_DBGBUS_TPL1_2 = 0x4a,
371 A6XX_DBGBUS_TPL1_3 = 0x4b,
372};
373
374static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
375 { A6XX_DBGBUS_CP, 0x100, },
376 { A6XX_DBGBUS_RBBM, 0x100, },
377 { A6XX_DBGBUS_HLSQ, 0x100, },
378 { A6XX_DBGBUS_UCHE, 0x100, },
379 { A6XX_DBGBUS_DPM, 0x100, },
380 { A6XX_DBGBUS_TESS, 0x100, },
381 { A6XX_DBGBUS_PC, 0x100, },
382 { A6XX_DBGBUS_VFDP, 0x100, },
383 { A6XX_DBGBUS_VPC, 0x100, },
384 { A6XX_DBGBUS_TSE, 0x100, },
385 { A6XX_DBGBUS_RAS, 0x100, },
386 { A6XX_DBGBUS_VSC, 0x100, },
387 { A6XX_DBGBUS_COM, 0x100, },
388 { A6XX_DBGBUS_LRZ, 0x100, },
389 { A6XX_DBGBUS_A2D, 0x100, },
390 { A6XX_DBGBUS_CCUFCHE, 0x100, },
391 { A6XX_DBGBUS_RBP, 0x100, },
392 { A6XX_DBGBUS_DCS, 0x100, },
393 { A6XX_DBGBUS_RBBM_CFG, 0x100, },
Lynus Vazecd472c2017-04-18 14:15:57 +0530394 { A6XX_DBGBUS_GMU_GX, 0x100, },
Lynus Vaz20c81272017-02-10 16:22:12 +0530395 { A6XX_DBGBUS_TPFCHE, 0x100, },
396 { A6XX_DBGBUS_GPC, 0x100, },
397 { A6XX_DBGBUS_LARC, 0x100, },
398 { A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
399 { A6XX_DBGBUS_RB_0, 0x100, },
400 { A6XX_DBGBUS_RB_1, 0x100, },
401 { A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
402 { A6XX_DBGBUS_CCU_0, 0x100, },
403 { A6XX_DBGBUS_CCU_1, 0x100, },
404 { A6XX_DBGBUS_VFD_0, 0x100, },
405 { A6XX_DBGBUS_VFD_1, 0x100, },
406 { A6XX_DBGBUS_VFD_2, 0x100, },
407 { A6XX_DBGBUS_VFD_3, 0x100, },
408 { A6XX_DBGBUS_SP_0, 0x100, },
409 { A6XX_DBGBUS_SP_1, 0x100, },
410 { A6XX_DBGBUS_TPL1_0, 0x100, },
411 { A6XX_DBGBUS_TPL1_1, 0x100, },
412 { A6XX_DBGBUS_TPL1_2, 0x100, },
413 { A6XX_DBGBUS_TPL1_3, 0x100, },
414};
Shrenuj Bansal41665402016-12-16 15:25:54 -0800415
Lynus Vazdaac540732017-07-27 14:23:35 +0530416static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
417 A6XX_DBGBUS_VBIF, 0x100,
418};
419
Lynus Vazff24c972017-03-07 19:27:46 +0530420static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
Lynus Vazecd472c2017-04-18 14:15:57 +0530421 { A6XX_DBGBUS_GMU_CX, 0x100, },
Lynus Vazff24c972017-03-07 19:27:46 +0530422 { A6XX_DBGBUS_CX, 0x100, },
423};
424
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530425#define A6XX_NUM_SHADER_BANKS 3
426#define A6XX_SHADER_STATETYPE_SHIFT 8
427
428enum a6xx_shader_obj {
429 A6XX_TP0_TMO_DATA = 0x9,
430 A6XX_TP0_SMO_DATA = 0xa,
431 A6XX_TP0_MIPMAP_BASE_DATA = 0xb,
432 A6XX_TP1_TMO_DATA = 0x19,
433 A6XX_TP1_SMO_DATA = 0x1a,
434 A6XX_TP1_MIPMAP_BASE_DATA = 0x1b,
435 A6XX_SP_INST_DATA = 0x29,
436 A6XX_SP_LB_0_DATA = 0x2a,
437 A6XX_SP_LB_1_DATA = 0x2b,
438 A6XX_SP_LB_2_DATA = 0x2c,
439 A6XX_SP_LB_3_DATA = 0x2d,
440 A6XX_SP_LB_4_DATA = 0x2e,
441 A6XX_SP_LB_5_DATA = 0x2f,
442 A6XX_SP_CB_BINDLESS_DATA = 0x30,
443 A6XX_SP_CB_LEGACY_DATA = 0x31,
444 A6XX_SP_UAV_DATA = 0x32,
445 A6XX_SP_INST_TAG = 0x33,
446 A6XX_SP_CB_BINDLESS_TAG = 0x34,
447 A6XX_SP_TMO_UMO_TAG = 0x35,
448 A6XX_SP_SMO_TAG = 0x36,
449 A6XX_SP_STATE_DATA = 0x37,
450 A6XX_HLSQ_CHUNK_CVS_RAM = 0x49,
451 A6XX_HLSQ_CHUNK_CPS_RAM = 0x4a,
452 A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 0x4b,
453 A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 0x4c,
454 A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 0x4d,
455 A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 0x4e,
456 A6XX_HLSQ_CVS_MISC_RAM = 0x50,
457 A6XX_HLSQ_CPS_MISC_RAM = 0x51,
458 A6XX_HLSQ_INST_RAM = 0x52,
459 A6XX_HLSQ_GFX_CVS_CONST_RAM = 0x53,
460 A6XX_HLSQ_GFX_CPS_CONST_RAM = 0x54,
461 A6XX_HLSQ_CVS_MISC_RAM_TAG = 0x55,
462 A6XX_HLSQ_CPS_MISC_RAM_TAG = 0x56,
463 A6XX_HLSQ_INST_RAM_TAG = 0x57,
464 A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
465 A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
466 A6XX_HLSQ_PWR_REST_RAM = 0x5a,
467 A6XX_HLSQ_PWR_REST_TAG = 0x5b,
468 A6XX_HLSQ_DATAPATH_META = 0x60,
469 A6XX_HLSQ_FRONTEND_META = 0x61,
470 A6XX_HLSQ_INDIRECT_META = 0x62,
471 A6XX_HLSQ_BACKEND_META = 0x63
472};
473
474struct a6xx_shader_block {
475 unsigned int statetype;
476 unsigned int sz;
477 uint64_t offset;
478};
479
480struct a6xx_shader_block_info {
481 struct a6xx_shader_block *block;
482 unsigned int bank;
483 uint64_t offset;
484};
485
486static struct a6xx_shader_block a6xx_shader_blocks[] = {
487 {A6XX_TP0_TMO_DATA, 0x200},
488 {A6XX_TP0_SMO_DATA, 0x80,},
489 {A6XX_TP0_MIPMAP_BASE_DATA, 0x3C0},
490 {A6XX_TP1_TMO_DATA, 0x200},
491 {A6XX_TP1_SMO_DATA, 0x80,},
492 {A6XX_TP1_MIPMAP_BASE_DATA, 0x3C0},
493 {A6XX_SP_INST_DATA, 0x800},
494 {A6XX_SP_LB_0_DATA, 0x800},
495 {A6XX_SP_LB_1_DATA, 0x800},
496 {A6XX_SP_LB_2_DATA, 0x800},
497 {A6XX_SP_LB_3_DATA, 0x800},
498 {A6XX_SP_LB_4_DATA, 0x800},
499 {A6XX_SP_LB_5_DATA, 0x200},
500 {A6XX_SP_CB_BINDLESS_DATA, 0x2000},
501 {A6XX_SP_CB_LEGACY_DATA, 0x280,},
502 {A6XX_SP_UAV_DATA, 0x80,},
503 {A6XX_SP_INST_TAG, 0x80,},
504 {A6XX_SP_CB_BINDLESS_TAG, 0x80,},
505 {A6XX_SP_TMO_UMO_TAG, 0x80,},
506 {A6XX_SP_SMO_TAG, 0x80},
507 {A6XX_SP_STATE_DATA, 0x3F},
508 {A6XX_HLSQ_CHUNK_CVS_RAM, 0x1C0},
509 {A6XX_HLSQ_CHUNK_CPS_RAM, 0x280},
510 {A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40,},
511 {A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40,},
512 {A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4,},
513 {A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4,},
514 {A6XX_HLSQ_CVS_MISC_RAM, 0x1C0},
515 {A6XX_HLSQ_CPS_MISC_RAM, 0x580},
516 {A6XX_HLSQ_INST_RAM, 0x800},
517 {A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800},
518 {A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800},
519 {A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8,},
520 {A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4,},
521 {A6XX_HLSQ_INST_RAM_TAG, 0x80,},
522 {A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
523 {A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
524 {A6XX_HLSQ_PWR_REST_RAM, 0x28},
525 {A6XX_HLSQ_PWR_REST_TAG, 0x14},
526 {A6XX_HLSQ_DATAPATH_META, 0x40,},
527 {A6XX_HLSQ_FRONTEND_META, 0x40},
528 {A6XX_HLSQ_INDIRECT_META, 0x40,}
529};
530
Shrenuj Bansal41665402016-12-16 15:25:54 -0800531static struct kgsl_memdesc a6xx_capturescript;
532static struct kgsl_memdesc a6xx_crashdump_registers;
533static bool crash_dump_valid;
534
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600535static struct reg_list {
Shrenuj Bansal41665402016-12-16 15:25:54 -0800536 const unsigned int *regs;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600537 unsigned int count;
538 const struct sel_reg *sel;
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530539 uint64_t offset;
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600540} a6xx_reg_list[] = {
541 { a6xx_registers, ARRAY_SIZE(a6xx_registers) / 2, NULL },
542 { a6xx_rb_rac_registers, ARRAY_SIZE(a6xx_rb_rac_registers) / 2,
543 &_a6xx_rb_rac_aperture },
544 { a6xx_rb_rbp_registers, ARRAY_SIZE(a6xx_rb_rbp_registers) / 2,
545 &_a6xx_rb_rbp_aperture },
Shrenuj Bansal41665402016-12-16 15:25:54 -0800546};
547
548#define REG_PAIR_COUNT(_a, _i) \
549 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
550
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600551static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
Lynus Vaz96de8522017-09-13 20:17:03 +0530552 u8 *buf, size_t remain, struct reg_list *regs)
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600553{
Lynus Vaz96de8522017-09-13 20:17:03 +0530554 struct kgsl_snapshot_registers snapshot_regs = {
555 .regs = regs->regs,
556 .count = regs->count,
557 };
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600558
Lynus Vaz96de8522017-09-13 20:17:03 +0530559 if (regs->sel)
560 kgsl_regwrite(device, regs->sel->host_reg, regs->sel->val);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600561
Lynus Vaz96de8522017-09-13 20:17:03 +0530562 return kgsl_snapshot_dump_registers(device, buf, remain,
563 &snapshot_regs);
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600564}
565
Shrenuj Bansal41665402016-12-16 15:25:54 -0800566static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
567 size_t remain, void *priv)
568{
569 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
Lynus Vaz96de8522017-09-13 20:17:03 +0530570 struct reg_list *regs = (struct reg_list *)priv;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800571 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530572 unsigned int *src;
Lynus Vaz96de8522017-09-13 20:17:03 +0530573 unsigned int j, k;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800574 unsigned int count = 0;
575
576 if (crash_dump_valid == false)
Lynus Vaz96de8522017-09-13 20:17:03 +0530577 return a6xx_legacy_snapshot_registers(device, buf, remain,
578 regs);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800579
580 if (remain < sizeof(*header)) {
581 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
582 return 0;
583 }
584
Lynus Vaz1bba57b2017-09-26 11:55:04 +0530585 src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800586 remain -= sizeof(*header);
587
Lynus Vaz96de8522017-09-13 20:17:03 +0530588 for (j = 0; j < regs->count; j++) {
589 unsigned int start = regs->regs[2 * j];
590 unsigned int end = regs->regs[(2 * j) + 1];
Shrenuj Bansal41665402016-12-16 15:25:54 -0800591
Lynus Vaz96de8522017-09-13 20:17:03 +0530592 if (remain < ((end - start) + 1) * 8) {
593 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
594 goto out;
595 }
Shrenuj Bansal41665402016-12-16 15:25:54 -0800596
Lynus Vaz96de8522017-09-13 20:17:03 +0530597 remain -= ((end - start) + 1) * 8;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800598
Lynus Vaz96de8522017-09-13 20:17:03 +0530599 for (k = start; k <= end; k++, count++) {
600 *data++ = k;
601 *data++ = *src++;
Shrenuj Bansal41665402016-12-16 15:25:54 -0800602 }
603 }
604
605out:
606 header->count = count;
607
608 /* Return the size of the section */
609 return (count * 8) + sizeof(*header);
610}
611
Lynus Vaz030473e2017-06-22 17:33:06 +0530612static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
613 u8 *buf, size_t remain, void *priv)
614{
615 struct kgsl_snapshot_registers pre_cdregs = {
616 .regs = a6xx_pre_crashdumper_registers,
617 .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
618 };
619
620 return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
621}
622
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530623static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
624 u8 *buf, size_t remain, void *priv)
625{
626 struct kgsl_snapshot_shader *header =
627 (struct kgsl_snapshot_shader *) buf;
628 struct a6xx_shader_block_info *info =
629 (struct a6xx_shader_block_info *) priv;
630 struct a6xx_shader_block *block = info->block;
631 unsigned int *data = (unsigned int *) (buf + sizeof(*header));
632
633 if (remain < SHADER_SECTION_SZ(block->sz)) {
634 SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
635 return 0;
636 }
637
638 header->type = block->statetype;
639 header->index = info->bank;
640 header->size = block->sz;
641
642 memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
Lynus Vaz24f75eb2017-11-22 11:25:04 +0530643 block->sz * sizeof(unsigned int));
Lynus Vaz9ad67a32017-03-10 14:55:02 +0530644
645 return SHADER_SECTION_SZ(block->sz);
646}
647
648static void a6xx_snapshot_shader(struct kgsl_device *device,
649 struct kgsl_snapshot *snapshot)
650{
651 unsigned int i, j;
652 struct a6xx_shader_block_info info;
653
654 /* Shader blocks can only be read by the crash dumper */
655 if (crash_dump_valid == false)
656 return;
657
658 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
659 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
660 info.block = &a6xx_shader_blocks[i];
661 info.bank = j;
662 info.offset = a6xx_shader_blocks[i].offset +
663 (j * a6xx_shader_blocks[i].sz);
664
665 /* Shader working/shadow memory */
666 kgsl_snapshot_add_section(device,
667 KGSL_SNAPSHOT_SECTION_SHADER,
668 snapshot, a6xx_snapshot_shader_memory, &info);
669 }
670 }
671}
672
Lynus Vaza5922742017-03-14 18:50:54 +0530673static void a6xx_snapshot_mempool(struct kgsl_device *device,
674 struct kgsl_snapshot *snapshot)
675{
676 unsigned int pool_size;
Lynus Vazb8e43d52017-04-20 14:47:37 +0530677 u8 *buf = snapshot->ptr;
Lynus Vaza5922742017-03-14 18:50:54 +0530678
Lynus Vazb8e43d52017-04-20 14:47:37 +0530679 /* Set the mempool size to 0 to stabilize it while dumping */
Lynus Vaza5922742017-03-14 18:50:54 +0530680 kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
681 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
682
683 kgsl_snapshot_indexed_registers(device, snapshot,
684 A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
685 0, 0x2060);
686
Lynus Vazb8e43d52017-04-20 14:47:37 +0530687 /*
688 * Data at offset 0x2000 in the mempool section is the mempool size.
689 * Since we set it to 0, patch in the original size so that the data
690 * is consistent.
691 */
692 if (buf < snapshot->ptr) {
693 unsigned int *data;
694
695 /* Skip over the headers */
696 buf += sizeof(struct kgsl_snapshot_section_header) +
697 sizeof(struct kgsl_snapshot_indexed_regs);
698
699 data = (unsigned int *)buf + 0x2000;
700 *data = pool_size;
701 }
702
Lynus Vaza5922742017-03-14 18:50:54 +0530703 /* Restore the saved mempool size */
704 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
705}
706
Lynus Vaz461e2382017-01-16 19:35:41 +0530707static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
708 unsigned int regbase, unsigned int reg)
709{
710 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
711 reg - regbase / 4;
712 unsigned int val;
713
714 kgsl_regread(device, read_reg, &val);
715 return val;
716}
717
Lynus Vaz1e258612017-04-27 21:35:22 +0530718static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
719 u8 *buf, size_t remain, void *priv)
Lynus Vaz461e2382017-01-16 19:35:41 +0530720{
721 struct kgsl_snapshot_mvc_regs *header =
722 (struct kgsl_snapshot_mvc_regs *)buf;
723 struct a6xx_cluster_dbgahb_regs_info *info =
724 (struct a6xx_cluster_dbgahb_regs_info *)priv;
725 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
726 unsigned int read_sel;
727 unsigned int data_size = 0;
728 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
729 int i, j;
730
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600731 if (!device->snapshot_legacy)
732 return 0;
733
Lynus Vaz461e2382017-01-16 19:35:41 +0530734 if (remain < sizeof(*header)) {
735 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
736 return 0;
737 }
738
739 remain -= sizeof(*header);
740
741 header->ctxt_id = info->ctxt_id;
742 header->cluster_id = cur_cluster->id;
743
744 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
745 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
746
747 for (i = 0; i < cur_cluster->num_sets; i++) {
748 unsigned int start = cur_cluster->regs[2 * i];
749 unsigned int end = cur_cluster->regs[2 * i + 1];
750
751 if (remain < (end - start + 3) * 4) {
752 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
753 goto out;
754 }
755
756 remain -= (end - start + 3) * 4;
757 data_size += (end - start + 3) * 4;
758
759 *data++ = start | (1 << 31);
760 *data++ = end;
761
762 for (j = start; j <= end; j++) {
763 unsigned int val;
764
765 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
766 *data++ = val;
767
768 }
769 }
770
771out:
772 return data_size + sizeof(*header);
773}
774
Lynus Vaz1e258612017-04-27 21:35:22 +0530775static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
776 size_t remain, void *priv)
777{
778 struct kgsl_snapshot_mvc_regs *header =
779 (struct kgsl_snapshot_mvc_regs *)buf;
780 struct a6xx_cluster_dbgahb_regs_info *info =
781 (struct a6xx_cluster_dbgahb_regs_info *)priv;
782 struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
783 unsigned int data_size = 0;
784 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
785 int i, j;
786 unsigned int *src;
787
788
789 if (crash_dump_valid == false)
790 return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
791 info);
792
793 if (remain < sizeof(*header)) {
794 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
795 return 0;
796 }
797
798 remain -= sizeof(*header);
799
800 header->ctxt_id = info->ctxt_id;
801 header->cluster_id = cluster->id;
802
803 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
804 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
805
806 for (i = 0; i < cluster->num_sets; i++) {
807 unsigned int start;
808 unsigned int end;
809
810 start = cluster->regs[2 * i];
811 end = cluster->regs[2 * i + 1];
812
813 if (remain < (end - start + 3) * 4) {
814 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
815 goto out;
816 }
817
818 remain -= (end - start + 3) * 4;
819 data_size += (end - start + 3) * 4;
820
821 *data++ = start | (1 << 31);
822 *data++ = end;
823 for (j = start; j <= end; j++)
824 *data++ = *src++;
825 }
826out:
827 return data_size + sizeof(*header);
828}
829
Lynus Vaz461e2382017-01-16 19:35:41 +0530830static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
831 struct kgsl_snapshot *snapshot)
832{
833 int i, j;
834
835 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
836 struct a6xx_cluster_dbgahb_registers *cluster =
837 &a6xx_dbgahb_ctx_clusters[i];
838 struct a6xx_cluster_dbgahb_regs_info info;
839
840 info.cluster = cluster;
841 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
842 info.ctxt_id = j;
843
844 kgsl_snapshot_add_section(device,
845 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
846 a6xx_snapshot_cluster_dbgahb, &info);
847 }
848 }
Lynus Vaz461e2382017-01-16 19:35:41 +0530849}
850
Shrenuj Bansal41665402016-12-16 15:25:54 -0800851static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
852 size_t remain, void *priv)
853{
854 struct kgsl_snapshot_mvc_regs *header =
855 (struct kgsl_snapshot_mvc_regs *)buf;
856 struct a6xx_cluster_regs_info *info =
857 (struct a6xx_cluster_regs_info *)priv;
858 struct a6xx_cluster_registers *cur_cluster = info->cluster;
859 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
860 unsigned int ctxt = info->ctxt_id;
861 unsigned int start, end, i, j, aperture_cntl = 0;
862 unsigned int data_size = 0;
863
864 if (remain < sizeof(*header)) {
865 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
866 return 0;
867 }
868
869 remain -= sizeof(*header);
870
871 header->ctxt_id = info->ctxt_id;
872 header->cluster_id = cur_cluster->id;
873
874 /*
875 * Set the AHB control for the Host to read from the
876 * cluster/context for this iteration.
877 */
878 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
879 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
880
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -0600881 if (cur_cluster->sel)
882 kgsl_regwrite(device, cur_cluster->sel->host_reg,
883 cur_cluster->sel->val);
884
Shrenuj Bansal41665402016-12-16 15:25:54 -0800885 for (i = 0; i < cur_cluster->num_sets; i++) {
886 start = cur_cluster->regs[2 * i];
887 end = cur_cluster->regs[2 * i + 1];
888
889 if (remain < (end - start + 3) * 4) {
890 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
891 goto out;
892 }
893
894 remain -= (end - start + 3) * 4;
895 data_size += (end - start + 3) * 4;
896
897 *data++ = start | (1 << 31);
898 *data++ = end;
899 for (j = start; j <= end; j++) {
900 unsigned int val;
901
902 kgsl_regread(device, j, &val);
903 *data++ = val;
904 }
905 }
906out:
907 return data_size + sizeof(*header);
908}
909
910static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
911 size_t remain, void *priv)
912{
913 struct kgsl_snapshot_mvc_regs *header =
914 (struct kgsl_snapshot_mvc_regs *)buf;
915 struct a6xx_cluster_regs_info *info =
916 (struct a6xx_cluster_regs_info *)priv;
917 struct a6xx_cluster_registers *cluster = info->cluster;
918 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
919 unsigned int *src;
920 int i, j;
921 unsigned int start, end;
922 size_t data_size = 0;
923
924 if (crash_dump_valid == false)
925 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
926
927 if (remain < sizeof(*header)) {
928 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
929 return 0;
930 }
931
932 remain -= sizeof(*header);
933
934 header->ctxt_id = info->ctxt_id;
935 header->cluster_id = cluster->id;
936
937 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
938 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
939
940 for (i = 0; i < cluster->num_sets; i++) {
941 start = cluster->regs[2 * i];
942 end = cluster->regs[2 * i + 1];
943
944 if (remain < (end - start + 3) * 4) {
945 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
946 goto out;
947 }
948
949 remain -= (end - start + 3) * 4;
950 data_size += (end - start + 3) * 4;
951
952 *data++ = start | (1 << 31);
953 *data++ = end;
954 for (j = start; j <= end; j++)
955 *data++ = *src++;
956 }
957
958out:
959 return data_size + sizeof(*header);
960
961}
962
963static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
964 struct kgsl_snapshot *snapshot)
965{
966 int i, j;
967 struct a6xx_cluster_regs_info info;
968
969 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
970 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
971
972 info.cluster = cluster;
973 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
974 info.ctxt_id = j;
975
976 kgsl_snapshot_add_section(device,
977 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
978 a6xx_snapshot_mvc, &info);
979 }
980 }
981}
982
Lynus Vaz20c81272017-02-10 16:22:12 +0530983/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
984static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
985 unsigned int block_id, unsigned int index, unsigned int *val)
986{
987 unsigned int reg;
988
989 reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
990 (index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
991
992 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
993 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
994 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
995 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
996
Shrenuj Bansald4508ba2017-05-11 15:59:37 -0700997 /*
998 * There needs to be a delay of 1 us to ensure enough time for correct
999 * data is funneled into the trace buffer
1000 */
1001 udelay(1);
1002
Lynus Vaz20c81272017-02-10 16:22:12 +05301003 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
1004 val++;
1005 kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
1006}
1007
Lynus Vazdaac540732017-07-27 14:23:35 +05301008/* a6xx_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
Lynus Vaz20c81272017-02-10 16:22:12 +05301009static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
1010 u8 *buf, size_t remain, void *priv)
1011{
Lynus Vazecd472c2017-04-18 14:15:57 +05301012 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301013 struct kgsl_snapshot_debugbus *header =
1014 (struct kgsl_snapshot_debugbus *)buf;
1015 struct adreno_debugbus_block *block = priv;
1016 int i;
1017 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1018 unsigned int dwords;
Lynus Vazecd472c2017-04-18 14:15:57 +05301019 unsigned int block_id;
Lynus Vaz20c81272017-02-10 16:22:12 +05301020 size_t size;
1021
1022 dwords = block->dwords;
1023
1024 /* For a6xx each debug bus data unit is 2 DWORDS */
1025 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1026
1027 if (remain < size) {
1028 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1029 return 0;
1030 }
1031
1032 header->id = block->block_id;
1033 header->count = dwords * 2;
1034
Lynus Vazecd472c2017-04-18 14:15:57 +05301035 block_id = block->block_id;
1036 /* GMU_GX data is read using the GMU_CX block id on A630 */
1037 if (adreno_is_a630(adreno_dev) &&
1038 (block_id == A6XX_DBGBUS_GMU_GX))
1039 block_id = A6XX_DBGBUS_GMU_CX;
1040
Lynus Vaz20c81272017-02-10 16:22:12 +05301041 for (i = 0; i < dwords; i++)
Lynus Vazecd472c2017-04-18 14:15:57 +05301042 a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
Lynus Vaz20c81272017-02-10 16:22:12 +05301043
1044 return size;
1045}
1046
Lynus Vazdaac540732017-07-27 14:23:35 +05301047/* a6xx_snapshot_vbif_debugbus_block() - Capture debug data for VBIF block */
1048static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
1049 u8 *buf, size_t remain, void *priv)
1050{
1051 struct kgsl_snapshot_debugbus *header =
1052 (struct kgsl_snapshot_debugbus *)buf;
1053 struct adreno_debugbus_block *block = priv;
1054 int i, j;
1055 /*
1056 * Total number of VBIF data words considering 3 sections:
1057 * 2 arbiter blocks of 16 words
1058 * 5 AXI XIN blocks of 18 dwords each
1059 * 4 core clock side XIN blocks of 12 dwords each
1060 */
1061 unsigned int dwords = (16 * A6XX_NUM_AXI_ARB_BLOCKS) +
1062 (18 * A6XX_NUM_XIN_AXI_BLOCKS) +
1063 (12 * A6XX_NUM_XIN_CORE_BLOCKS);
1064 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1065 size_t size;
1066 unsigned int reg_clk;
1067
1068 size = (dwords * sizeof(unsigned int)) + sizeof(*header);
1069
1070 if (remain < size) {
1071 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1072 return 0;
1073 }
1074 header->id = block->block_id;
1075 header->count = dwords;
1076
1077 kgsl_regread(device, A6XX_VBIF_CLKON, &reg_clk);
1078 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk |
1079 (A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK <<
1080 A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT));
1081 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 0);
1082 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS_OUT_CTRL,
1083 (A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK <<
1084 A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT));
1085
1086 for (i = 0; i < A6XX_NUM_AXI_ARB_BLOCKS; i++) {
1087 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0,
1088 (1 << (i + 16)));
1089 for (j = 0; j < 16; j++) {
1090 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1091 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1092 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1093 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1094 data);
1095 data++;
1096 }
1097 }
1098
1099 /* XIN blocks AXI side */
1100 for (i = 0; i < A6XX_NUM_XIN_AXI_BLOCKS; i++) {
1101 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
1102 for (j = 0; j < 18; j++) {
1103 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
1104 ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
1105 << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
1106 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1107 data);
1108 data++;
1109 }
1110 }
1111 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 0);
1112
1113 /* XIN blocks core clock side */
1114 for (i = 0; i < A6XX_NUM_XIN_CORE_BLOCKS; i++) {
1115 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
1116 for (j = 0; j < 12; j++) {
1117 kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL1,
1118 ((j & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
1119 << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
1120 kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
1121 data);
1122 data++;
1123 }
1124 }
1125 /* restore the clock of VBIF */
1126 kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk);
1127 return size;
1128}
1129
Lynus Vazff24c972017-03-07 19:27:46 +05301130/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
1131static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
1132 unsigned int block_id, unsigned int index, unsigned int *val)
1133{
1134 unsigned int reg;
1135
1136 reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
1137 (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
1138
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301139 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
1140 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
1141 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
1142 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
Lynus Vazff24c972017-03-07 19:27:46 +05301143
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001144 /*
1145 * There needs to be a delay of 1 us to ensure enough time for correct
1146 * data is funneled into the trace buffer
1147 */
1148 udelay(1);
1149
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301150 adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
Lynus Vazff24c972017-03-07 19:27:46 +05301151 val++;
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301152 adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
Lynus Vazff24c972017-03-07 19:27:46 +05301153}
1154
1155/*
1156 * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
1157 * block from the CX DBGC block
1158 */
1159static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
1160 u8 *buf, size_t remain, void *priv)
1161{
1162 struct kgsl_snapshot_debugbus *header =
1163 (struct kgsl_snapshot_debugbus *)buf;
1164 struct adreno_debugbus_block *block = priv;
1165 int i;
1166 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1167 unsigned int dwords;
1168 size_t size;
1169
1170 dwords = block->dwords;
1171
1172 /* For a6xx each debug bus data unit is 2 DWRODS */
1173 size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
1174
1175 if (remain < size) {
1176 SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
1177 return 0;
1178 }
1179
1180 header->id = block->block_id;
1181 header->count = dwords * 2;
1182
1183 for (i = 0; i < dwords; i++)
1184 a6xx_cx_debug_bus_read(device, block->block_id, i,
1185 &data[i*2]);
1186
1187 return size;
1188}
1189
Lynus Vaz20c81272017-02-10 16:22:12 +05301190/* a6xx_snapshot_debugbus() - Capture debug bus data */
1191static void a6xx_snapshot_debugbus(struct kgsl_device *device,
1192 struct kgsl_snapshot *snapshot)
1193{
1194 int i;
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301195 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz20c81272017-02-10 16:22:12 +05301196
1197 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
1198 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001199 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1200 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vaz20c81272017-02-10 16:22:12 +05301201
1202 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
1203 0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
1204
1205 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1206 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1207 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1208 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
1209
1210 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
1211 (0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1212 (1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1213 (2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1214 (3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1215 (4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1216 (5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1217 (6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1218 (7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1219 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
1220 (8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1221 (9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1222 (10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1223 (11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1224 (12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1225 (13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1226 (14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1227 (15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
1228
1229 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1230 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1231 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1232 kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
1233
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301234 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
Lynus Vazff24c972017-03-07 19:27:46 +05301235 (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
Shrenuj Bansald4508ba2017-05-11 15:59:37 -07001236 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
1237 (0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301238
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301239 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
1240 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
Lynus Vazff24c972017-03-07 19:27:46 +05301241
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301242 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
1243 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
1244 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
1245 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
Lynus Vazff24c972017-03-07 19:27:46 +05301246
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301247 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
1248 (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
1249 (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
1250 (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
1251 (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
1252 (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
1253 (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
1254 (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
1255 (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
1256 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
1257 (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
1258 (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
1259 (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
1260 (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
1261 (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
1262 (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
1263 (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
1264 (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
Lynus Vazff24c972017-03-07 19:27:46 +05301265
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301266 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
1267 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
1268 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
1269 adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
Lynus Vazff24c972017-03-07 19:27:46 +05301270
Lynus Vaz20c81272017-02-10 16:22:12 +05301271 for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
1272 kgsl_snapshot_add_section(device,
1273 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1274 snapshot, a6xx_snapshot_dbgc_debugbus_block,
1275 (void *) &a6xx_dbgc_debugbus_blocks[i]);
1276 }
Lynus Vazff24c972017-03-07 19:27:46 +05301277
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301278 /* Skip if GPU has GBIF */
1279 if (!adreno_has_gbif(adreno_dev))
1280 kgsl_snapshot_add_section(device,
1281 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1282 snapshot, a6xx_snapshot_vbif_debugbus_block,
1283 (void *) &a6xx_vbif_debugbus_blocks);
Lynus Vazdaac540732017-07-27 14:23:35 +05301284
Lynus Vaz9fdc1d22017-09-21 22:06:14 +05301285 /* Dump the CX debugbus data if the block exists */
1286 if (adreno_is_cx_dbgc_register(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A)) {
Lynus Vazff24c972017-03-07 19:27:46 +05301287 for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
1288 kgsl_snapshot_add_section(device,
1289 KGSL_SNAPSHOT_SECTION_DEBUGBUS,
1290 snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
1291 (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
1292 }
Lynus Vazff24c972017-03-07 19:27:46 +05301293 }
Lynus Vaz20c81272017-02-10 16:22:12 +05301294}
1295
Carter Cooperb88b7082017-09-14 09:03:26 -06001296/*
1297 * a6xx_snapshot_gmu() - A6XX GMU snapshot function
1298 * @adreno_dev: Device being snapshotted
1299 * @snapshot: Pointer to the snapshot instance
1300 *
1301 * This is where all of the A6XX GMU specific bits and pieces are grabbed
1302 * into the snapshot memory
1303 */
1304void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
Kyle Piefer60733aa2017-03-21 11:24:01 -07001305 struct kgsl_snapshot *snapshot)
1306{
Carter Cooperb88b7082017-09-14 09:03:26 -06001307 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
George Shen1d447b02017-07-12 13:40:28 -07001308 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
George Sheneb93bd32017-10-11 15:52:53 -07001309 unsigned int val;
George Shen1d447b02017-07-12 13:40:28 -07001310
Kyle Piefer60733aa2017-03-21 11:24:01 -07001311 if (!kgsl_gmu_isenabled(device))
1312 return;
1313
Lynus Vazd37f1d82017-05-24 16:39:15 +05301314 adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
1315 ARRAY_SIZE(a6xx_gmu_registers) / 2);
George Shen1d447b02017-07-12 13:40:28 -07001316
George Sheneb93bd32017-10-11 15:52:53 -07001317 if (gpudev->gx_is_on(adreno_dev)) {
1318 /* Set fence to ALLOW mode so registers can be read */
1319 kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
1320 kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
1321
1322 KGSL_DRV_ERR(device, "set FENCE to ALLOW mode:%x\n", val);
George Shen1d447b02017-07-12 13:40:28 -07001323 adreno_snapshot_registers(device, snapshot,
1324 a6xx_gmu_gx_registers,
1325 ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
George Sheneb93bd32017-10-11 15:52:53 -07001326 }
Kyle Piefer60733aa2017-03-21 11:24:01 -07001327}
1328
Lynus Vaz85150052017-02-21 17:57:48 +05301329/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
1330static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
1331 size_t remain, void *priv)
1332{
1333 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1334 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1335 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1336 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
1337
1338 if (remain < DEBUG_SECTION_SZ(1)) {
1339 SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
1340 return 0;
1341 }
1342
1343 /* Dump the SQE firmware version */
1344 header->type = SNAPSHOT_DEBUG_SQE_VERSION;
1345 header->size = 1;
1346 *data = fw->version;
1347
1348 return DEBUG_SECTION_SZ(1);
1349}
1350
Shrenuj Bansal41665402016-12-16 15:25:54 -08001351static void _a6xx_do_crashdump(struct kgsl_device *device)
1352{
1353 unsigned long wait_time;
1354 unsigned int reg = 0;
1355 unsigned int val;
1356
1357 crash_dump_valid = false;
1358
Lynus Vaz0a06efd2017-09-13 20:21:07 +05301359 if (!device->snapshot_crashdumper)
1360 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001361 if (a6xx_capturescript.gpuaddr == 0 ||
1362 a6xx_crashdump_registers.gpuaddr == 0)
1363 return;
1364
1365 /* IF the SMMU is stalled we cannot do a crash dump */
1366 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
1367 if (val & BIT(24))
1368 return;
1369
1370 /* Turn on APRIV so we can access the buffers */
1371 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
1372
1373 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
1374 lower_32_bits(a6xx_capturescript.gpuaddr));
1375 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
1376 upper_32_bits(a6xx_capturescript.gpuaddr));
1377 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
1378
1379 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
1380 while (!time_after(jiffies, wait_time)) {
1381 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
1382 if (reg & 0x2)
1383 break;
1384 cpu_relax();
1385 }
1386
1387 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
1388
1389 if (!(reg & 0x2)) {
1390 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
1391 return;
1392 }
1393
1394 crash_dump_valid = true;
1395}
1396
1397/*
1398 * a6xx_snapshot() - A6XX GPU snapshot function
1399 * @adreno_dev: Device being snapshotted
1400 * @snapshot: Pointer to the snapshot instance
1401 *
1402 * This is where all of the A6XX specific bits and pieces are grabbed
1403 * into the snapshot memory
1404 */
1405void a6xx_snapshot(struct adreno_device *adreno_dev,
1406 struct kgsl_snapshot *snapshot)
1407{
1408 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1409 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1410 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001411 bool sptprac_on;
Lynus Vaz96de8522017-09-13 20:17:03 +05301412 unsigned int i;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001413
Kyle Pieferda0fa542017-08-04 13:39:40 -07001414 /* GMU TCM data dumped through AHB */
1415 a6xx_snapshot_gmu(adreno_dev, snapshot);
1416
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001417 sptprac_on = gpudev->sptprac_is_on(adreno_dev);
1418
1419 /* Return if the GX is off */
Carter Cooperb88b7082017-09-14 09:03:26 -06001420 if (!gpudev->gx_is_on(adreno_dev))
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001421 return;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001422
Lynus Vaz030473e2017-06-22 17:33:06 +05301423 /* Dump the registers which get affected by crash dumper trigger */
1424 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1425 snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
1426
1427 /* Dump vbif registers as well which get affected by crash dumper */
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05301428 if (!adreno_has_gbif(adreno_dev))
1429 adreno_snapshot_vbif_registers(device, snapshot,
1430 a6xx_vbif_snapshot_registers,
1431 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
Lynus Vaz030473e2017-06-22 17:33:06 +05301432
Shrenuj Bansal41665402016-12-16 15:25:54 -08001433 /* Try to run the crash dumper */
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001434 if (sptprac_on)
1435 _a6xx_do_crashdump(device);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001436
Lynus Vaz96de8522017-09-13 20:17:03 +05301437 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1438 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1439 snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]);
1440 }
Shrenuj Bansal41665402016-12-16 15:25:54 -08001441
Shrenuj Bansal41665402016-12-16 15:25:54 -08001442 /* CP_SQE indexed registers */
1443 kgsl_snapshot_indexed_registers(device, snapshot,
1444 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
1445 0, snap_data->sect_sizes->cp_pfp);
1446
1447 /* CP_DRAW_STATE */
1448 kgsl_snapshot_indexed_registers(device, snapshot,
1449 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
1450 0, 0x100);
1451
1452 /* SQE_UCODE Cache */
1453 kgsl_snapshot_indexed_registers(device, snapshot,
1454 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
1455 0, 0x6000);
1456
1457 /* CP ROQ */
1458 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1459 snapshot, adreno_snapshot_cp_roq,
1460 &snap_data->sect_sizes->roq);
1461
Lynus Vaz85150052017-02-21 17:57:48 +05301462 /* SQE Firmware */
1463 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
1464 snapshot, a6xx_snapshot_sqe, NULL);
1465
Lynus Vaza5922742017-03-14 18:50:54 +05301466 /* Mempool debug data */
1467 a6xx_snapshot_mempool(device, snapshot);
1468
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001469 if (sptprac_on) {
1470 /* Shader memory */
1471 a6xx_snapshot_shader(device, snapshot);
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301472
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001473 /* MVC register section */
1474 a6xx_snapshot_mvc_regs(device, snapshot);
Shrenuj Bansal41665402016-12-16 15:25:54 -08001475
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001476 /* registers dumped through DBG AHB */
1477 a6xx_snapshot_dbgahb_regs(device, snapshot);
1478 }
Lynus Vaz461e2382017-01-16 19:35:41 +05301479
Lynus Vaz20c81272017-02-10 16:22:12 +05301480 a6xx_snapshot_debugbus(device, snapshot);
Kyle Piefer60733aa2017-03-21 11:24:01 -07001481
Shrenuj Bansal41665402016-12-16 15:25:54 -08001482}
1483
1484static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
1485{
1486 int qwords = 0;
1487 unsigned int i, j, k;
1488 unsigned int count;
1489
1490 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1491 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1492
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001493 if (cluster->sel) {
1494 ptr[qwords++] = cluster->sel->val;
1495 ptr[qwords++] = ((uint64_t)cluster->sel->cd_reg << 44) |
1496 (1 << 21) | 1;
1497 }
1498
Shrenuj Bansal41665402016-12-16 15:25:54 -08001499 cluster->offset0 = *offset;
1500 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1501
1502 if (j == 1)
1503 cluster->offset1 = *offset;
1504
1505 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
1506 ptr[qwords++] =
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001507 ((uint64_t)A6XX_CP_APERTURE_CNTL_CD << 44) |
Shrenuj Bansal41665402016-12-16 15:25:54 -08001508 (1 << 21) | 1;
1509
1510 for (k = 0; k < cluster->num_sets; k++) {
1511 count = REG_PAIR_COUNT(cluster->regs, k);
1512 ptr[qwords++] =
1513 a6xx_crashdump_registers.gpuaddr + *offset;
1514 ptr[qwords++] =
1515 (((uint64_t)cluster->regs[2 * k]) << 44) |
1516 count;
1517
1518 *offset += count * sizeof(unsigned int);
1519 }
1520 }
1521 }
1522
1523 return qwords;
1524}
1525
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301526static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
1527 uint64_t *ptr, uint64_t *offset)
1528{
1529 int qwords = 0;
1530 unsigned int j;
1531
1532 /* Capture each bank in the block */
1533 for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
1534 /* Program the aperture */
1535 ptr[qwords++] =
1536 (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
1537 ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
1538 (1 << 21) | 1;
1539
1540 /* Read all the data in one chunk */
1541 ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
1542 ptr[qwords++] =
1543 (((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
1544 block->sz;
1545
1546 /* Remember the offset of the first bank for easy access */
1547 if (j == 0)
1548 block->offset = *offset;
1549
1550 *offset += block->sz * sizeof(unsigned int);
1551 }
1552
1553 return qwords;
1554}
1555
Lynus Vaz1e258612017-04-27 21:35:22 +05301556static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
1557{
1558 int qwords = 0;
1559 unsigned int i, j, k;
1560 unsigned int count;
1561
1562 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1563 struct a6xx_cluster_dbgahb_registers *cluster =
1564 &a6xx_dbgahb_ctx_clusters[i];
1565
1566 cluster->offset0 = *offset;
1567
1568 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1569 if (j == 1)
1570 cluster->offset1 = *offset;
1571
1572 /* Program the aperture */
1573 ptr[qwords++] =
1574 ((cluster->statetype + j * 2) & 0xff) << 8;
1575 ptr[qwords++] =
1576 (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
1577 (1 << 21) | 1;
1578
1579 for (k = 0; k < cluster->num_sets; k++) {
1580 unsigned int start = cluster->regs[2 * k];
1581
1582 count = REG_PAIR_COUNT(cluster->regs, k);
1583 ptr[qwords++] =
1584 a6xx_crashdump_registers.gpuaddr + *offset;
1585 ptr[qwords++] =
1586 (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
1587 start - cluster->regbase / 4) << 44)) |
1588 count;
1589
1590 *offset += count * sizeof(unsigned int);
1591 }
1592 }
1593 }
1594 return qwords;
1595}
1596
Shrenuj Bansal41665402016-12-16 15:25:54 -08001597void a6xx_crashdump_init(struct adreno_device *adreno_dev)
1598{
1599 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1600 unsigned int script_size = 0;
1601 unsigned int data_size = 0;
1602 unsigned int i, j, k;
1603 uint64_t *ptr;
1604 uint64_t offset = 0;
1605
1606 if (a6xx_capturescript.gpuaddr != 0 &&
1607 a6xx_crashdump_registers.gpuaddr != 0)
1608 return;
1609
1610 /*
1611 * We need to allocate two buffers:
1612 * 1 - the buffer to hold the draw script
1613 * 2 - the buffer to hold the data
1614 */
1615
1616 /*
1617 * To save the registers, we need 16 bytes per register pair for the
1618 * script and a dword for each register in the data
1619 */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001620 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1621 struct reg_list *regs = &a6xx_reg_list[i];
1622
1623 /* 16 bytes for programming the aperture */
1624 if (regs->sel)
1625 script_size += 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001626
1627 /* Each pair needs 16 bytes (2 qwords) */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001628 script_size += regs->count * 16;
Shrenuj Bansal41665402016-12-16 15:25:54 -08001629
1630 /* Each register needs a dword in the data */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001631 for (j = 0; j < regs->count; j++)
Shrenuj Bansal41665402016-12-16 15:25:54 -08001632 data_size += REG_PAIR_COUNT(regs->regs, j) *
1633 sizeof(unsigned int);
1634
1635 }
1636
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301637 /*
1638 * To save the shader blocks for each block in each type we need 32
1639 * bytes for the script (16 bytes to program the aperture and 16 to
1640 * read the data) and then a block specific number of bytes to hold
1641 * the data
1642 */
1643 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1644 script_size += 32 * A6XX_NUM_SHADER_BANKS;
1645 data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
1646 A6XX_NUM_SHADER_BANKS;
1647 }
1648
Shrenuj Bansal41665402016-12-16 15:25:54 -08001649 /* Calculate the script and data size for MVC registers */
1650 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
1651 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
1652
1653 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1654
1655 /* 16 bytes for programming the aperture */
1656 script_size += 16;
1657
1658 /* Reading each pair of registers takes 16 bytes */
1659 script_size += 16 * cluster->num_sets;
1660
1661 /* A dword per register read from the cluster list */
1662 for (k = 0; k < cluster->num_sets; k++)
1663 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1664 sizeof(unsigned int);
1665 }
1666 }
1667
Lynus Vaz1e258612017-04-27 21:35:22 +05301668 /* Calculate the script and data size for debug AHB registers */
1669 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
1670 struct a6xx_cluster_dbgahb_registers *cluster =
1671 &a6xx_dbgahb_ctx_clusters[i];
1672
1673 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
1674
1675 /* 16 bytes for programming the aperture */
1676 script_size += 16;
1677
1678 /* Reading each pair of registers takes 16 bytes */
1679 script_size += 16 * cluster->num_sets;
1680
1681 /* A dword per register read from the cluster list */
1682 for (k = 0; k < cluster->num_sets; k++)
1683 data_size += REG_PAIR_COUNT(cluster->regs, k) *
1684 sizeof(unsigned int);
1685 }
1686 }
1687
Shrenuj Bansal41665402016-12-16 15:25:54 -08001688 /* Now allocate the script and data buffers */
1689
1690 /* The script buffers needs 2 extra qwords on the end */
1691 if (kgsl_allocate_global(device, &a6xx_capturescript,
1692 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
1693 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
1694 return;
1695
1696 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
1697 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
1698 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
1699 return;
1700 }
1701
1702 /* Build the crash script */
1703
1704 ptr = (uint64_t *)a6xx_capturescript.hostptr;
1705
1706 /* For the registers, program a read command for each pair */
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001707 for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
1708 struct reg_list *regs = &a6xx_reg_list[i];
Shrenuj Bansal41665402016-12-16 15:25:54 -08001709
Lynus Vaz1bba57b2017-09-26 11:55:04 +05301710 regs->offset = offset;
1711
Harshdeep Dhatta0cf2412017-06-22 11:53:31 -06001712 /* Program the SEL_CNTL_CD register appropriately */
1713 if (regs->sel) {
1714 *ptr++ = regs->sel->val;
1715 *ptr++ = (((uint64_t)regs->sel->cd_reg << 44)) |
1716 (1 << 21) | 1;
1717 }
1718
1719 for (j = 0; j < regs->count; j++) {
Shrenuj Bansal41665402016-12-16 15:25:54 -08001720 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
1721 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
1722 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
1723 offset += r * sizeof(unsigned int);
1724 }
1725 }
1726
Lynus Vaz9ad67a32017-03-10 14:55:02 +05301727 /* Program each shader block */
1728 for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
1729 ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
1730 &offset);
1731 }
1732
Shrenuj Bansal41665402016-12-16 15:25:54 -08001733 /* Program the capturescript for the MVC regsiters */
1734 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
1735
Lynus Vaz1e258612017-04-27 21:35:22 +05301736 ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
1737
Shrenuj Bansal41665402016-12-16 15:25:54 -08001738 *ptr++ = 0;
1739 *ptr++ = 0;
1740}