blob: 4e2eb9e257539e25c975954006348c069e781e90 [file] [log] [blame]
Shrenuj Bansal41665402016-12-16 15:25:54 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/io.h>
15#include "kgsl.h"
16#include "adreno.h"
17#include "kgsl_snapshot.h"
18#include "adreno_snapshot.h"
19#include "a6xx_reg.h"
20#include "adreno_a6xx.h"
21
22
23#define A6XX_NUM_CTXTS 2
24
25static const unsigned int a6xx_gras_cluster[] = {
26 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
27 0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
28 0x8400, 0x840B,
29};
30
31static const unsigned int a6xx_ps_cluster[] = {
32 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
33 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
34 0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
35 0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
36 0x9218, 0x9236, 0x9300, 0x9306,
37};
38
39static const unsigned int a6xx_fe_cluster[] = {
40 0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
41 0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
42};
43
44static const unsigned int a6xx_pc_vs_cluster[] = {
45 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
46};
47
48static struct a6xx_cluster_registers {
49 unsigned int id;
50 const unsigned int *regs;
51 unsigned int num_sets;
52 unsigned int offset0;
53 unsigned int offset1;
54} a6xx_clusters[] = {
55 { CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
56 { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
57 { CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
58 { CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
59 ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
60};
61
62struct a6xx_cluster_regs_info {
63 struct a6xx_cluster_registers *cluster;
64 unsigned int ctxt_id;
65};
66
Lynus Vaz461e2382017-01-16 19:35:41 +053067static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
68 0xB800, 0xB803, 0xB820, 0xB822,
69};
70
71static const unsigned int a6xx_sp_vs_sp_cluster[] = {
72 0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
73 0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
74};
75
76static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
77 0xBB10, 0xBB11, 0xBB20, 0xBB29,
78};
79
80static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
81 0xBD80, 0xBD80,
82};
83
84static const unsigned int a6xx_sp_duplicate_cluster[] = {
85 0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
86};
87
88static const unsigned int a6xx_tp_duplicate_cluster[] = {
89 0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
90};
91
92static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
93 0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
94 0xB9C0, 0xB9C9,
95};
96
97static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
98 0xBD80, 0xBD80,
99};
100
101static const unsigned int a6xx_sp_ps_sp_cluster[] = {
102 0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
103 0xAA00, 0xAA00, 0xAA30, 0xAA31,
104};
105
106static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
107 0xACC0, 0xACC0,
108};
109
110static const unsigned int a6xx_sp_ps_tp_cluster[] = {
111 0xB180, 0xB183, 0xB190, 0xB191,
112};
113
114static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
115 0xB4C0, 0xB4D1,
116};
117
118static struct a6xx_cluster_dbgahb_registers {
119 unsigned int id;
120 unsigned int regbase;
121 unsigned int statetype;
122 const unsigned int *regs;
123 unsigned int num_sets;
124} a6xx_dbgahb_ctx_clusters[] = {
125 { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
126 ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
127 { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
128 ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
129 { CP_CLUSTER_SP_VS, 0x0002EC00, 0x41, a6xx_hlsq_duplicate_cluster,
130 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
131 { CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
132 ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
133 { CP_CLUSTER_SP_VS, 0x0002AC00, 0x21, a6xx_sp_duplicate_cluster,
134 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
135 { CP_CLUSTER_SP_VS, 0x0002CC00, 0x1, a6xx_tp_duplicate_cluster,
136 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
137 { CP_CLUSTER_SP_PS, 0x0002E600, 0x42, a6xx_sp_ps_hlsq_cluster,
138 ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
139 { CP_CLUSTER_SP_PS, 0x0002F300, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
140 ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
141 { CP_CLUSTER_SP_PS, 0x0002A600, 0x22, a6xx_sp_ps_sp_cluster,
142 ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
143 { CP_CLUSTER_SP_PS, 0x0002B300, 0x26, a6xx_sp_ps_sp_2d_cluster,
144 ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
145 { CP_CLUSTER_SP_PS, 0x0002C600, 0x2, a6xx_sp_ps_tp_cluster,
146 ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
147 { CP_CLUSTER_SP_PS, 0x0002D300, 0x6, a6xx_sp_ps_tp_2d_cluster,
148 ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
149 { CP_CLUSTER_SP_PS, 0x0002EC00, 0x42, a6xx_hlsq_duplicate_cluster,
150 ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
151 { CP_CLUSTER_SP_VS, 0x0002AC00, 0x22, a6xx_sp_duplicate_cluster,
152 ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
153 { CP_CLUSTER_SP_VS, 0x0002CC00, 0x2, a6xx_tp_duplicate_cluster,
154 ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
155};
156
157struct a6xx_cluster_dbgahb_regs_info {
158 struct a6xx_cluster_dbgahb_registers *cluster;
159 unsigned int ctxt_id;
160};
161
162static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
163 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
164 0xBE20, 0xBE23,
165};
166
167static const unsigned int a6xx_sp_non_ctx_registers[] = {
168 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
169 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
170};
171
172static const unsigned int a6xx_tp_non_ctx_registers[] = {
173 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
174};
175
176static struct a6xx_non_ctx_dbgahb_registers {
177 unsigned int regbase;
178 unsigned int statetype;
179 const unsigned int *regs;
180 unsigned int num_sets;
181} a6xx_non_ctx_dbgahb[] = {
182 { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
183 ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
184 { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
185 ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
186 { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
187 ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
188};
189
Shrenuj Bansal41665402016-12-16 15:25:54 -0800190static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
191 /* VBIF */
192 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
193 0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
194 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
195 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
196 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
197 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
198 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
199 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
200 0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
201 0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
202 0x3410, 0x3410, 0x3800, 0x3801,
203};
204
205static const struct adreno_vbif_snapshot_registers
206a6xx_vbif_snapshot_registers[] = {
207 { 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
208 ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
209};
210
211/*
212 * Set of registers to dump for A6XX on snapshot.
213 * Registers in pairs - first value is the start offset, second
214 * is the stop offset (inclusive)
215 */
216
217static const unsigned int a6xx_registers[] = {
218 /* RBBM */
219 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0014, 0x0014,
220 0x0018, 0x001B, 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042,
221 0x0044, 0x0044, 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE,
222 0x00B0, 0x00FB, 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213,
223 0x0218, 0x023D, 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B,
224 0x050E, 0x0511, 0x0533, 0x0533, 0x0540, 0x0555,
225 /* CP */
226 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
227 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
228 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
229 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
230 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
231 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
232 /* VSC */
233 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
234 /* UCHE */
235 0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
236 0x0E38, 0x0E39,
237 /* GRAS */
238 0x8600, 0x8601, 0x8604, 0x8605, 0x8610, 0x861B, 0x8620, 0x8620,
239 0x8628, 0x862B, 0x8630, 0x8637,
240 /* RB */
241 0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
242 0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
243 0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
244 /* VPC */
245 0x9600, 0x9604, 0x9624, 0x9637,
246 /* PC */
247 0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
248 0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
249 0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
250 /* VFD */
251 0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
252 0xA630, 0xA630, 0xD200, 0xD263,
253};
254
255
256static struct kgsl_memdesc a6xx_capturescript;
257static struct kgsl_memdesc a6xx_crashdump_registers;
258static bool crash_dump_valid;
259
260static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
261 u8 *buf, size_t remain)
262{
263 struct kgsl_snapshot_registers regs = {
264 .regs = a6xx_registers,
265 .count = ARRAY_SIZE(a6xx_registers) / 2,
266 };
267
268 return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
269}
270
271static struct cdregs {
272 const unsigned int *regs;
273 unsigned int size;
274} _a6xx_cd_registers[] = {
275 { a6xx_registers, ARRAY_SIZE(a6xx_registers) },
276};
277
278#define REG_PAIR_COUNT(_a, _i) \
279 (((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
280
281static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
282 size_t remain, void *priv)
283{
284 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
285 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
286 unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
287 unsigned int i, j, k;
288 unsigned int count = 0;
289
290 if (crash_dump_valid == false)
291 return a6xx_legacy_snapshot_registers(device, buf, remain);
292
293 if (remain < sizeof(*header)) {
294 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
295 return 0;
296 }
297
298 remain -= sizeof(*header);
299
300 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
301 struct cdregs *regs = &_a6xx_cd_registers[i];
302
303 for (j = 0; j < regs->size / 2; j++) {
304 unsigned int start = regs->regs[2 * j];
305 unsigned int end = regs->regs[(2 * j) + 1];
306
307 if (remain < ((end - start) + 1) * 8) {
308 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
309 goto out;
310 }
311
312 remain -= ((end - start) + 1) * 8;
313
314 for (k = start; k <= end; k++, count++) {
315 *data++ = k;
316 *data++ = *src++;
317 }
318 }
319 }
320
321out:
322 header->count = count;
323
324 /* Return the size of the section */
325 return (count * 8) + sizeof(*header);
326}
327
Lynus Vaz461e2382017-01-16 19:35:41 +0530328static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
329 unsigned int regbase, unsigned int reg)
330{
331 unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
332 reg - regbase / 4;
333 unsigned int val;
334
335 kgsl_regread(device, read_reg, &val);
336 return val;
337}
338
339static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
340 size_t remain, void *priv)
341{
342 struct kgsl_snapshot_mvc_regs *header =
343 (struct kgsl_snapshot_mvc_regs *)buf;
344 struct a6xx_cluster_dbgahb_regs_info *info =
345 (struct a6xx_cluster_dbgahb_regs_info *)priv;
346 struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
347 unsigned int read_sel;
348 unsigned int data_size = 0;
349 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
350 int i, j;
351
352 if (remain < sizeof(*header)) {
353 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
354 return 0;
355 }
356
357 remain -= sizeof(*header);
358
359 header->ctxt_id = info->ctxt_id;
360 header->cluster_id = cur_cluster->id;
361
362 read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
363 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
364
365 for (i = 0; i < cur_cluster->num_sets; i++) {
366 unsigned int start = cur_cluster->regs[2 * i];
367 unsigned int end = cur_cluster->regs[2 * i + 1];
368
369 if (remain < (end - start + 3) * 4) {
370 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
371 goto out;
372 }
373
374 remain -= (end - start + 3) * 4;
375 data_size += (end - start + 3) * 4;
376
377 *data++ = start | (1 << 31);
378 *data++ = end;
379
380 for (j = start; j <= end; j++) {
381 unsigned int val;
382
383 val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
384 *data++ = val;
385
386 }
387 }
388
389out:
390 return data_size + sizeof(*header);
391}
392
393static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
394 size_t remain, void *priv)
395{
396 struct kgsl_snapshot_regs *header =
397 (struct kgsl_snapshot_regs *)buf;
398 struct a6xx_non_ctx_dbgahb_registers *regs =
399 (struct a6xx_non_ctx_dbgahb_registers *)priv;
400 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
401 int count = 0;
402 unsigned int read_sel;
403 int i, j;
404
405 /* Figure out how many registers we are going to dump */
406 for (i = 0; i < regs->num_sets; i++) {
407 int start = regs->regs[i * 2];
408 int end = regs->regs[i * 2 + 1];
409
410 count += (end - start + 1);
411 }
412
413 if (remain < (count * 8) + sizeof(*header)) {
414 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
415 return 0;
416 }
417
418 header->count = count;
419
420 read_sel = (regs->statetype & 0xff) << 8;
421 kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
422
423 for (i = 0; i < regs->num_sets; i++) {
424 unsigned int start = regs->regs[2 * i];
425 unsigned int end = regs->regs[2 * i + 1];
426
427 for (j = start; j <= end; j++) {
428 unsigned int val;
429
430 val = a6xx_read_dbgahb(device, regs->regbase, j);
431 *data++ = j;
432 *data++ = val;
433
434 }
435 }
436 return (count * 8) + sizeof(*header);
437}
438
439static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
440 struct kgsl_snapshot *snapshot)
441{
442 int i, j;
443
444 for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
445 struct a6xx_cluster_dbgahb_registers *cluster =
446 &a6xx_dbgahb_ctx_clusters[i];
447 struct a6xx_cluster_dbgahb_regs_info info;
448
449 info.cluster = cluster;
450 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
451 info.ctxt_id = j;
452
453 kgsl_snapshot_add_section(device,
454 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
455 a6xx_snapshot_cluster_dbgahb, &info);
456 }
457 }
458
459 for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
460 kgsl_snapshot_add_section(device,
461 KGSL_SNAPSHOT_SECTION_REGS, snapshot,
462 a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
463 }
464}
465
Shrenuj Bansal41665402016-12-16 15:25:54 -0800466static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
467 size_t remain, void *priv)
468{
469 struct kgsl_snapshot_mvc_regs *header =
470 (struct kgsl_snapshot_mvc_regs *)buf;
471 struct a6xx_cluster_regs_info *info =
472 (struct a6xx_cluster_regs_info *)priv;
473 struct a6xx_cluster_registers *cur_cluster = info->cluster;
474 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
475 unsigned int ctxt = info->ctxt_id;
476 unsigned int start, end, i, j, aperture_cntl = 0;
477 unsigned int data_size = 0;
478
479 if (remain < sizeof(*header)) {
480 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
481 return 0;
482 }
483
484 remain -= sizeof(*header);
485
486 header->ctxt_id = info->ctxt_id;
487 header->cluster_id = cur_cluster->id;
488
489 /*
490 * Set the AHB control for the Host to read from the
491 * cluster/context for this iteration.
492 */
493 aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
494 kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
495
496 for (i = 0; i < cur_cluster->num_sets; i++) {
497 start = cur_cluster->regs[2 * i];
498 end = cur_cluster->regs[2 * i + 1];
499
500 if (remain < (end - start + 3) * 4) {
501 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
502 goto out;
503 }
504
505 remain -= (end - start + 3) * 4;
506 data_size += (end - start + 3) * 4;
507
508 *data++ = start | (1 << 31);
509 *data++ = end;
510 for (j = start; j <= end; j++) {
511 unsigned int val;
512
513 kgsl_regread(device, j, &val);
514 *data++ = val;
515 }
516 }
517out:
518 return data_size + sizeof(*header);
519}
520
521static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
522 size_t remain, void *priv)
523{
524 struct kgsl_snapshot_mvc_regs *header =
525 (struct kgsl_snapshot_mvc_regs *)buf;
526 struct a6xx_cluster_regs_info *info =
527 (struct a6xx_cluster_regs_info *)priv;
528 struct a6xx_cluster_registers *cluster = info->cluster;
529 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
530 unsigned int *src;
531 int i, j;
532 unsigned int start, end;
533 size_t data_size = 0;
534
535 if (crash_dump_valid == false)
536 return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
537
538 if (remain < sizeof(*header)) {
539 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
540 return 0;
541 }
542
543 remain -= sizeof(*header);
544
545 header->ctxt_id = info->ctxt_id;
546 header->cluster_id = cluster->id;
547
548 src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
549 (header->ctxt_id ? cluster->offset1 : cluster->offset0));
550
551 for (i = 0; i < cluster->num_sets; i++) {
552 start = cluster->regs[2 * i];
553 end = cluster->regs[2 * i + 1];
554
555 if (remain < (end - start + 3) * 4) {
556 SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
557 goto out;
558 }
559
560 remain -= (end - start + 3) * 4;
561 data_size += (end - start + 3) * 4;
562
563 *data++ = start | (1 << 31);
564 *data++ = end;
565 for (j = start; j <= end; j++)
566 *data++ = *src++;
567 }
568
569out:
570 return data_size + sizeof(*header);
571
572}
573
574static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
575 struct kgsl_snapshot *snapshot)
576{
577 int i, j;
578 struct a6xx_cluster_regs_info info;
579
580 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
581 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
582
583 info.cluster = cluster;
584 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
585 info.ctxt_id = j;
586
587 kgsl_snapshot_add_section(device,
588 KGSL_SNAPSHOT_SECTION_MVC, snapshot,
589 a6xx_snapshot_mvc, &info);
590 }
591 }
592}
593
594static void _a6xx_do_crashdump(struct kgsl_device *device)
595{
596 unsigned long wait_time;
597 unsigned int reg = 0;
598 unsigned int val;
599
600 crash_dump_valid = false;
601
602 if (a6xx_capturescript.gpuaddr == 0 ||
603 a6xx_crashdump_registers.gpuaddr == 0)
604 return;
605
606 /* IF the SMMU is stalled we cannot do a crash dump */
607 kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
608 if (val & BIT(24))
609 return;
610
611 /* Turn on APRIV so we can access the buffers */
612 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
613
614 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
615 lower_32_bits(a6xx_capturescript.gpuaddr));
616 kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
617 upper_32_bits(a6xx_capturescript.gpuaddr));
618 kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
619
620 wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
621 while (!time_after(jiffies, wait_time)) {
622 kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
623 if (reg & 0x2)
624 break;
625 cpu_relax();
626 }
627
628 kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
629
630 if (!(reg & 0x2)) {
631 KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
632 return;
633 }
634
635 crash_dump_valid = true;
636}
637
638/*
639 * a6xx_snapshot() - A6XX GPU snapshot function
640 * @adreno_dev: Device being snapshotted
641 * @snapshot: Pointer to the snapshot instance
642 *
643 * This is where all of the A6XX specific bits and pieces are grabbed
644 * into the snapshot memory
645 */
646void a6xx_snapshot(struct adreno_device *adreno_dev,
647 struct kgsl_snapshot *snapshot)
648{
649 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
650 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
651 struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
652
653 /* Try to run the crash dumper */
654 _a6xx_do_crashdump(device);
655
656 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
657 snapshot, a6xx_snapshot_registers, NULL);
658
659 adreno_snapshot_vbif_registers(device, snapshot,
660 a6xx_vbif_snapshot_registers,
661 ARRAY_SIZE(a6xx_vbif_snapshot_registers));
662
663 /* CP_SQE indexed registers */
664 kgsl_snapshot_indexed_registers(device, snapshot,
665 A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
666 0, snap_data->sect_sizes->cp_pfp);
667
668 /* CP_DRAW_STATE */
669 kgsl_snapshot_indexed_registers(device, snapshot,
670 A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
671 0, 0x100);
672
673 /* SQE_UCODE Cache */
674 kgsl_snapshot_indexed_registers(device, snapshot,
675 A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
676 0, 0x6000);
677
678 /* CP ROQ */
679 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
680 snapshot, adreno_snapshot_cp_roq,
681 &snap_data->sect_sizes->roq);
682
683 /* MVC register section */
684 a6xx_snapshot_mvc_regs(device, snapshot);
685
Lynus Vaz461e2382017-01-16 19:35:41 +0530686 /* registers dumped through DBG AHB */
687 a6xx_snapshot_dbgahb_regs(device, snapshot);
688
Shrenuj Bansal41665402016-12-16 15:25:54 -0800689}
690
691static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
692{
693 int qwords = 0;
694 unsigned int i, j, k;
695 unsigned int count;
696
697 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
698 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
699
700 cluster->offset0 = *offset;
701 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
702
703 if (j == 1)
704 cluster->offset1 = *offset;
705
706 ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
707 ptr[qwords++] =
708 ((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
709 (1 << 21) | 1;
710
711 for (k = 0; k < cluster->num_sets; k++) {
712 count = REG_PAIR_COUNT(cluster->regs, k);
713 ptr[qwords++] =
714 a6xx_crashdump_registers.gpuaddr + *offset;
715 ptr[qwords++] =
716 (((uint64_t)cluster->regs[2 * k]) << 44) |
717 count;
718
719 *offset += count * sizeof(unsigned int);
720 }
721 }
722 }
723
724 return qwords;
725}
726
727void a6xx_crashdump_init(struct adreno_device *adreno_dev)
728{
729 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
730 unsigned int script_size = 0;
731 unsigned int data_size = 0;
732 unsigned int i, j, k;
733 uint64_t *ptr;
734 uint64_t offset = 0;
735
736 if (a6xx_capturescript.gpuaddr != 0 &&
737 a6xx_crashdump_registers.gpuaddr != 0)
738 return;
739
740 /*
741 * We need to allocate two buffers:
742 * 1 - the buffer to hold the draw script
743 * 2 - the buffer to hold the data
744 */
745
746 /*
747 * To save the registers, we need 16 bytes per register pair for the
748 * script and a dword for each register in the data
749 */
750 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
751 struct cdregs *regs = &_a6xx_cd_registers[i];
752
753 /* Each pair needs 16 bytes (2 qwords) */
754 script_size += (regs->size / 2) * 16;
755
756 /* Each register needs a dword in the data */
757 for (j = 0; j < regs->size / 2; j++)
758 data_size += REG_PAIR_COUNT(regs->regs, j) *
759 sizeof(unsigned int);
760
761 }
762
763 /* Calculate the script and data size for MVC registers */
764 for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
765 struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
766
767 for (j = 0; j < A6XX_NUM_CTXTS; j++) {
768
769 /* 16 bytes for programming the aperture */
770 script_size += 16;
771
772 /* Reading each pair of registers takes 16 bytes */
773 script_size += 16 * cluster->num_sets;
774
775 /* A dword per register read from the cluster list */
776 for (k = 0; k < cluster->num_sets; k++)
777 data_size += REG_PAIR_COUNT(cluster->regs, k) *
778 sizeof(unsigned int);
779 }
780 }
781
782 /* Now allocate the script and data buffers */
783
784 /* The script buffers needs 2 extra qwords on the end */
785 if (kgsl_allocate_global(device, &a6xx_capturescript,
786 script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
787 KGSL_MEMDESC_PRIVILEGED, "capturescript"))
788 return;
789
790 if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
791 0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
792 kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
793 return;
794 }
795
796 /* Build the crash script */
797
798 ptr = (uint64_t *)a6xx_capturescript.hostptr;
799
800 /* For the registers, program a read command for each pair */
801 for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
802 struct cdregs *regs = &_a6xx_cd_registers[i];
803
804 for (j = 0; j < regs->size / 2; j++) {
805 unsigned int r = REG_PAIR_COUNT(regs->regs, j);
806 *ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
807 *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
808 offset += r * sizeof(unsigned int);
809 }
810 }
811
812 /* Program the capturescript for the MVC regsiters */
813 ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
814
815 *ptr++ = 0;
816 *ptr++ = 0;
817}