blob: 178369171cb0237f609eecb94b4fb0fb57267bdf [file] [log] [blame]
Rajesh Kemisetti78f5bc92020-01-21 18:30:12 +05301/* Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "kgsl.h"
14#include "kgsl_sharedmem.h"
15#include "kgsl_snapshot.h"
16
17#include "adreno.h"
18#include "adreno_pm4types.h"
19#include "a3xx_reg.h"
20#include "adreno_cp_parser.h"
21#include "adreno_snapshot.h"
22#include "adreno_a5xx.h"
23
24#define VPC_MEMORY_BANKS 4
25
26/* Maintain a list of the objects we see during parsing */
27
28#define SNAPSHOT_OBJ_BUFSIZE 64
29
Shrenuj Bansala419c792016-10-20 14:05:11 -070030/* Used to print error message if an IB has too many objects in it */
31static int ib_max_objs;
32
33struct snapshot_rb_params {
34 struct kgsl_snapshot *snapshot;
35 struct adreno_ringbuffer *rb;
36};
37
38/* Keep track of how many bytes are frozen after a snapshot and tell the user */
39static size_t snapshot_frozen_objsize;
40
41static struct kgsl_snapshot_object objbuf[SNAPSHOT_OBJ_BUFSIZE];
42
43/* Pointer to the next open entry in the object list */
44static unsigned int objbufptr;
45
46static inline int adreno_rb_ctxtswitch(struct adreno_device *adreno_dev,
47 unsigned int *cmd)
48{
49 return cmd[0] == cp_packet(adreno_dev, CP_NOP, 1) &&
50 cmd[1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER;
51}
52
53/* Push a new buffer object onto the list */
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +053054void kgsl_snapshot_push_object(struct kgsl_process_private *process,
Shrenuj Bansala419c792016-10-20 14:05:11 -070055 uint64_t gpuaddr, uint64_t dwords)
56{
57 int index;
58 struct kgsl_mem_entry *entry;
59
60 if (process == NULL)
61 return;
62
63 /*
64 * Sometimes IBs can be reused in the same dump. Because we parse from
65 * oldest to newest, if we come across an IB that has already been used,
66 * assume that it has been reused and update the list with the newest
67 * size.
68 */
69
70 for (index = 0; index < objbufptr; index++) {
71 if (objbuf[index].gpuaddr == gpuaddr &&
72 objbuf[index].entry->priv == process) {
Rajesh Kemisetti779b5e02019-06-19 20:53:52 +053073 /*
74 * Check if newly requested size is within the
75 * allocated range or not, otherwise continue
76 * with previous size.
77 */
78 if (!kgsl_gpuaddr_in_memdesc(
79 &objbuf[index].entry->memdesc,
80 gpuaddr, dwords << 2)) {
81 KGSL_CORE_ERR(
82 "snapshot: IB 0x%016llx size is not within the memdesc range\n",
83 gpuaddr);
84 return;
85 }
Shrenuj Bansala419c792016-10-20 14:05:11 -070086
87 objbuf[index].size = max_t(uint64_t,
88 objbuf[index].size,
89 dwords << 2);
90 return;
91 }
92 }
93
94 if (objbufptr == SNAPSHOT_OBJ_BUFSIZE) {
95 KGSL_CORE_ERR("snapshot: too many snapshot objects\n");
96 return;
97 }
98
99 entry = kgsl_sharedmem_find(process, gpuaddr);
100 if (entry == NULL) {
101 KGSL_CORE_ERR("snapshot: Can't find entry for 0x%016llX\n",
102 gpuaddr);
103 return;
104 }
105
106 if (!kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, dwords << 2)) {
107 KGSL_CORE_ERR("snapshot: Mem entry 0x%016llX is too small\n",
108 gpuaddr);
109 kgsl_mem_entry_put(entry);
110 return;
111 }
112
113 /* Put it on the list of things to parse */
Shrenuj Bansala419c792016-10-20 14:05:11 -0700114 objbuf[objbufptr].gpuaddr = gpuaddr;
115 objbuf[objbufptr].size = dwords << 2;
116 objbuf[objbufptr++].entry = entry;
117}
118
119/*
120 * Returns index of the specified object is already on the list of buffers
121 * to be dumped
122 */
123
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530124static int find_object(uint64_t gpuaddr, struct kgsl_process_private *process)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700125{
126 int index;
127
128 for (index = 0; index < objbufptr; index++) {
129 if (objbuf[index].gpuaddr == gpuaddr &&
130 objbuf[index].entry->priv == process)
131 return index;
132 }
133 return -ENOENT;
134}
135
136/*
137 * snapshot_freeze_obj_list() - Take a list of ib objects and freeze their
138 * memory for snapshot
139 * @snapshot: The snapshot data.
140 * @process: The process to which the IB belongs
141 * @ib_obj_list: List of the IB objects
Shrenuj Bansala419c792016-10-20 14:05:11 -0700142 *
143 * Returns 0 on success else error code
144 */
145static int snapshot_freeze_obj_list(struct kgsl_snapshot *snapshot,
146 struct kgsl_process_private *process,
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530147 struct adreno_ib_object_list *ib_obj_list)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700148{
149 int ret = 0;
150 struct adreno_ib_object *ib_objs;
151 int i;
152
153 for (i = 0; i < ib_obj_list->num_objs; i++) {
154 int temp_ret;
155 int index;
156 int freeze = 1;
157
158 ib_objs = &(ib_obj_list->obj_list[i]);
159 /* Make sure this object is not going to be saved statically */
160 for (index = 0; index < objbufptr; index++) {
161 if ((objbuf[index].gpuaddr <= ib_objs->gpuaddr) &&
162 ((objbuf[index].gpuaddr +
163 (objbuf[index].size)) >=
164 (ib_objs->gpuaddr + ib_objs->size)) &&
165 (objbuf[index].entry->priv == process)) {
166 freeze = 0;
167 break;
168 }
169 }
170
171 if (freeze) {
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530172 temp_ret = kgsl_snapshot_get_object(snapshot,
173 process, ib_objs->gpuaddr,
174 ib_objs->size,
175 ib_objs->snapshot_obj_type);
176 if (temp_ret < 0) {
177 if (ret >= 0)
178 ret = temp_ret;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700179 } else {
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530180 snapshot_frozen_objsize += temp_ret;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700181 }
182 }
183 }
184 return ret;
185}
186
187/*
188 * We want to store the last executed IB1 and IB2 in the static region to ensure
189 * that we get at least some information out of the snapshot even if we can't
190 * access the dynamic data from the sysfs file. Push all other IBs on the
191 * dynamic list
192 */
193static inline void parse_ib(struct kgsl_device *device,
194 struct kgsl_snapshot *snapshot,
195 struct kgsl_process_private *process,
196 uint64_t gpuaddr, uint64_t dwords)
197{
198 struct adreno_ib_object_list *ib_obj_list;
199
200 /*
201 * Check the IB address - if it is either the last executed IB1
202 * then push it into the static blob otherwise put it in the dynamic
203 * list
204 */
205 if (gpuaddr == snapshot->ib1base) {
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530206 kgsl_snapshot_push_object(process, gpuaddr, dwords);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700207 return;
208 }
209
210 if (kgsl_snapshot_have_object(snapshot, process,
211 gpuaddr, dwords << 2))
212 return;
213
214 if (-E2BIG == adreno_ib_create_object_list(device, process,
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530215 gpuaddr, dwords, snapshot->ib2base,
216 &ib_obj_list))
Shrenuj Bansala419c792016-10-20 14:05:11 -0700217 ib_max_objs = 1;
218
219 if (ib_obj_list)
220 kgsl_snapshot_add_ib_obj_list(snapshot, ib_obj_list);
221
222}
223
224static inline bool iommu_is_setstate_addr(struct kgsl_device *device,
225 uint64_t gpuaddr, uint64_t size)
226{
227 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
228
229 if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
230 return false;
231
232 return kgsl_gpuaddr_in_memdesc(&iommu->setstate, gpuaddr,
233 size);
234}
235
236static void dump_all_ibs(struct kgsl_device *device,
237 struct adreno_ringbuffer *rb,
238 struct kgsl_snapshot *snapshot)
239{
240 int index = 0;
241 unsigned int *rbptr;
242 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
243
244 rbptr = rb->buffer_desc.hostptr;
245
246 for (index = 0; index < KGSL_RB_DWORDS;) {
247
248 if (adreno_cmd_is_ib(adreno_dev, rbptr[index])) {
249 uint64_t ibaddr;
250 uint64_t ibsize;
251
252 if (ADRENO_LEGACY_PM4(adreno_dev)) {
253 ibaddr = rbptr[index + 1];
254 ibsize = rbptr[index + 2];
255 index += 3;
256 } else {
257 ibaddr = rbptr[index + 2];
258 ibaddr = ibaddr << 32 | rbptr[index + 1];
259 ibsize = rbptr[index + 3];
260 index += 4;
261 }
262
263 /* Don't parse known global IBs */
264 if (iommu_is_setstate_addr(device, ibaddr, ibsize))
265 continue;
266
267 if (kgsl_gpuaddr_in_memdesc(&adreno_dev->pwron_fixup,
268 ibaddr, ibsize))
269 continue;
270
271 parse_ib(device, snapshot, snapshot->process, ibaddr,
272 ibsize);
273 } else
274 index = index + 1;
275 }
276}
277
278/**
279 * snapshot_rb_ibs() - Dump rb data and capture the IB's in the RB as well
280 * @device: Pointer to a KGSL device
281 * @rb: The RB to dump
282 * @data: Pointer to memory where the RB data is to be dumped
283 * @snapshot: Pointer to information about the current snapshot being taken
284 */
285static void snapshot_rb_ibs(struct kgsl_device *device,
286 struct adreno_ringbuffer *rb,
287 struct kgsl_snapshot *snapshot)
288{
289 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
290 unsigned int rptr, *rbptr;
291 int index, i;
292 int parse_ibs = 0, ib_parse_start;
293
294 /* Get the current read pointers for the RB */
295 adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
296
297 /*
298 * Figure out the window of ringbuffer data to dump. First we need to
299 * find where the last processed IB ws submitted. Start walking back
300 * from the rptr
301 */
302
303 index = rptr;
304 rbptr = rb->buffer_desc.hostptr;
305
306 do {
307 index--;
308
309 if (index < 0) {
310 if (ADRENO_LEGACY_PM4(adreno_dev))
311 index = KGSL_RB_DWORDS - 3;
312 else
313 index = KGSL_RB_DWORDS - 4;
314
315 /* We wrapped without finding what we wanted */
316 if (index < rb->wptr) {
317 index = rb->wptr;
318 break;
319 }
320 }
321
322 if (adreno_cmd_is_ib(adreno_dev, rbptr[index])) {
323 if (ADRENO_LEGACY_PM4(adreno_dev)) {
324 if (rbptr[index + 1] == snapshot->ib1base)
325 break;
326 } else {
327 uint64_t ibaddr;
328
329 ibaddr = rbptr[index + 2];
330 ibaddr = ibaddr << 32 | rbptr[index + 1];
331 if (ibaddr == snapshot->ib1base)
332 break;
333 }
334 }
335 } while (index != rb->wptr);
336
337 /*
338 * If the ib1 was not found, for example, if ib1base was restored
339 * incorrectly after preemption, then simply dump the entire
340 * ringbuffer along with all the IBs in the ringbuffer.
341 */
342
343 if (index == rb->wptr) {
344 dump_all_ibs(device, rb, snapshot);
345 return;
346 }
347
348 /*
349 * index points at the last submitted IB. We can only trust that the
350 * memory between the context switch and the hanging IB is valid, so
351 * the next step is to find the context switch before the submission
352 */
353
354 while (index != rb->wptr) {
355 index--;
356
357 if (index < 0) {
358 index = KGSL_RB_DWORDS - 2;
359
360 /*
361 * Wrapped without finding the context switch. This is
362 * harmless - we should still have enough data to dump a
363 * valid state
364 */
365
366 if (index < rb->wptr) {
367 index = rb->wptr;
368 break;
369 }
370 }
371
372 /* Break if the current packet is a context switch identifier */
373 if ((rbptr[index] == cp_packet(adreno_dev, CP_NOP, 1)) &&
374 (rbptr[index + 1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER))
375 break;
376 }
377
378 /*
379 * Index represents the start of the window of interest. We will try
380 * to dump all buffers between here and the rptr
381 */
382
383 ib_parse_start = index;
384
385 /*
386 * Loop through the RB, looking for indirect buffers and MMU pagetable
387 * changes
388 */
389
390 index = rb->wptr;
391 for (i = 0; i < KGSL_RB_DWORDS; i++) {
392 /*
393 * Only parse IBs between the start and the rptr or the next
394 * context switch, whichever comes first
395 */
396
397 if (parse_ibs == 0 && index == ib_parse_start)
398 parse_ibs = 1;
399 else if (index == rptr || adreno_rb_ctxtswitch(adreno_dev,
400 &rbptr[index]))
401 parse_ibs = 0;
402
403 if (parse_ibs && adreno_cmd_is_ib(adreno_dev, rbptr[index])) {
404 uint64_t ibaddr;
405 uint64_t ibsize;
406
407 if (ADRENO_LEGACY_PM4(adreno_dev)) {
408 ibaddr = rbptr[index + 1];
409 ibsize = rbptr[index + 2];
410 } else {
411 ibaddr = rbptr[index + 2];
412 ibaddr = ibaddr << 32 | rbptr[index + 1];
413 ibsize = rbptr[index + 3];
414 }
415
Sunil Khatrie54678d2018-07-30 15:18:17 +0530416 index = (index + 1) % KGSL_RB_DWORDS;
417
Shrenuj Bansala419c792016-10-20 14:05:11 -0700418 /* Don't parse known global IBs */
419 if (iommu_is_setstate_addr(device, ibaddr, ibsize))
420 continue;
421
422 if (kgsl_gpuaddr_in_memdesc(&adreno_dev->pwron_fixup,
423 ibaddr, ibsize))
424 continue;
425
426 parse_ib(device, snapshot, snapshot->process,
427 ibaddr, ibsize);
Sunil Khatrie54678d2018-07-30 15:18:17 +0530428 } else
429 index = (index + 1) % KGSL_RB_DWORDS;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700430 }
431
432}
433
434/* Snapshot the ringbuffer memory */
435static size_t snapshot_rb(struct kgsl_device *device, u8 *buf,
436 size_t remain, void *priv)
437{
438 struct kgsl_snapshot_rb_v2 *header = (struct kgsl_snapshot_rb_v2 *)buf;
439 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
440 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
441 struct snapshot_rb_params *snap_rb_params = priv;
442 struct kgsl_snapshot *snapshot = snap_rb_params->snapshot;
443 struct adreno_ringbuffer *rb = snap_rb_params->rb;
444
445 /*
446 * Dump the entire ringbuffer - the parser can choose how much of it to
447 * process
448 */
449
450 if (remain < KGSL_RB_SIZE + sizeof(*header)) {
451 KGSL_CORE_ERR("snapshot: Not enough memory for the rb section");
452 return 0;
453 }
454
455 /* Write the sub-header for the section */
456 header->start = 0;
457 header->end = KGSL_RB_DWORDS;
458 header->wptr = rb->wptr;
459 header->rptr = adreno_get_rptr(rb);
460 header->rbsize = KGSL_RB_DWORDS;
461 header->count = KGSL_RB_DWORDS;
462 adreno_rb_readtimestamp(adreno_dev, rb, KGSL_TIMESTAMP_QUEUED,
463 &header->timestamp_queued);
464 adreno_rb_readtimestamp(adreno_dev, rb, KGSL_TIMESTAMP_RETIRED,
465 &header->timestamp_retired);
466 header->gpuaddr = rb->buffer_desc.gpuaddr;
467 header->id = rb->id;
468
469 if (rb == adreno_dev->cur_rb)
470 snapshot_rb_ibs(device, rb, snapshot);
471
472 /* Just copy the ringbuffer, there are no active IBs */
473 memcpy(data, rb->buffer_desc.hostptr, KGSL_RB_SIZE);
474
475 /* Return the size of the section */
476 return KGSL_RB_SIZE + sizeof(*header);
477}
478
479static int _count_mem_entries(int id, void *ptr, void *data)
480{
481 int *count = data;
482 *count = *count + 1;
483 return 0;
484}
485
486struct mem_entry {
487 uint64_t gpuaddr;
488 uint64_t size;
489 unsigned int type;
490} __packed;
491
492static int _save_mem_entries(int id, void *ptr, void *data)
493{
494 struct kgsl_mem_entry *entry = ptr;
495 struct mem_entry *m = (struct mem_entry *) data;
496 unsigned int index = id - 1;
497
498 m[index].gpuaddr = entry->memdesc.gpuaddr;
499 m[index].size = entry->memdesc.size;
500 m[index].type = kgsl_memdesc_get_memtype(&entry->memdesc);
501
502 return 0;
503}
504
505static size_t snapshot_capture_mem_list(struct kgsl_device *device,
506 u8 *buf, size_t remain, void *priv)
507{
508 struct kgsl_snapshot_mem_list_v2 *header =
509 (struct kgsl_snapshot_mem_list_v2 *)buf;
510 int num_mem = 0;
511 int ret = 0;
512 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
513 struct kgsl_process_private *process = priv;
514
515 /* we need a process to search! */
516 if (process == NULL)
517 return 0;
518
519 spin_lock(&process->mem_lock);
520
521 /* We need to know the number of memory objects that the process has */
522 idr_for_each(&process->mem_idr, _count_mem_entries, &num_mem);
523
524 if (num_mem == 0)
525 goto out;
526
527 if (remain < ((num_mem * sizeof(struct mem_entry)) + sizeof(*header))) {
528 KGSL_CORE_ERR("snapshot: Not enough memory for the mem list");
529 goto out;
530 }
531
532 header->num_entries = num_mem;
533 header->ptbase = kgsl_mmu_pagetable_get_ttbr0(process->pagetable);
534
535 /*
536 * Walk through the memory list and store the
537 * tuples(gpuaddr, size, memtype) in snapshot
538 */
539 idr_for_each(&process->mem_idr, _save_mem_entries, data);
540
541 ret = sizeof(*header) + (num_mem * sizeof(struct mem_entry));
542out:
543 spin_unlock(&process->mem_lock);
544 return ret;
545}
546
547struct snapshot_ib_meta {
548 struct kgsl_snapshot *snapshot;
549 struct kgsl_snapshot_object *obj;
550 uint64_t ib1base;
551 uint64_t ib1size;
552 uint64_t ib2base;
553 uint64_t ib2size;
554};
555
556void kgsl_snapshot_add_active_ib_obj_list(struct kgsl_device *device,
557 struct kgsl_snapshot *snapshot)
558{
559 struct adreno_ib_object_list *ib_obj_list;
560 int index = -ENOENT;
561
562 if (!snapshot->ib1dumped)
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530563 index = find_object(snapshot->ib1base, snapshot->process);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700564
565 /* only do this for IB1 because the IB2's are part of IB1 objects */
566 if ((index != -ENOENT) &&
567 (snapshot->ib1base == objbuf[index].gpuaddr)) {
568 if (-E2BIG == adreno_ib_create_object_list(device,
569 objbuf[index].entry->priv,
570 objbuf[index].gpuaddr,
571 objbuf[index].size >> 2,
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530572 snapshot->ib2base,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700573 &ib_obj_list))
574 ib_max_objs = 1;
575 if (ib_obj_list) {
576 /* freeze the IB objects in the IB */
577 snapshot_freeze_obj_list(snapshot,
578 objbuf[index].entry->priv,
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530579 ib_obj_list);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700580 adreno_ib_destroy_obj_list(ib_obj_list);
581 }
582 } else {
583 /* Get the IB2 index from parsed object */
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530584 index = find_object(snapshot->ib2base, snapshot->process);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700585
586 if (index != -ENOENT)
587 parse_ib(device, snapshot, snapshot->process,
588 snapshot->ib2base, objbuf[index].size >> 2);
589 }
590}
591
592/*
593 * active_ib_is_parsed() - Checks if active ib is already parsed
594 * @gpuaddr: Active IB base address at the time of fault
595 * @size: Active IB size
596 * @process: The process to which the IB belongs
597 *
598 * Function returns true if the active is already is parsed
599 * else false
600 */
601static bool active_ib_is_parsed(uint64_t gpuaddr, uint64_t size,
602 struct kgsl_process_private *process)
603{
604 int index;
605 /* go through the static list for gpuaddr is in list or not */
606 for (index = 0; index < objbufptr; index++) {
607 if ((objbuf[index].gpuaddr <= gpuaddr) &&
608 ((objbuf[index].gpuaddr +
609 (objbuf[index].size)) >=
610 (gpuaddr + size)) &&
611 (objbuf[index].entry->priv == process))
612 return true;
613 }
614 return false;
615}
616/* Snapshot the memory for an indirect buffer */
617static size_t snapshot_ib(struct kgsl_device *device, u8 *buf,
618 size_t remain, void *priv)
619{
620 struct kgsl_snapshot_ib_v2 *header = (struct kgsl_snapshot_ib_v2 *)buf;
621 struct snapshot_ib_meta *meta = priv;
622 unsigned int *src;
623 unsigned int *dst = (unsigned int *)(buf + sizeof(*header));
624 struct adreno_ib_object_list *ib_obj_list;
625 struct kgsl_snapshot *snapshot;
626 struct kgsl_snapshot_object *obj;
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530627 struct kgsl_memdesc *memdesc;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700628
629 if (meta == NULL || meta->snapshot == NULL || meta->obj == NULL) {
630 KGSL_CORE_ERR("snapshot: bad metadata");
631 return 0;
632 }
633 snapshot = meta->snapshot;
634 obj = meta->obj;
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530635 memdesc = &obj->entry->memdesc;
636
637 /* If size is zero get it from the medesc size */
638 if (!obj->size)
639 obj->size = (memdesc->size - (obj->gpuaddr - memdesc->gpuaddr));
Shrenuj Bansala419c792016-10-20 14:05:11 -0700640
641 if (remain < (obj->size + sizeof(*header))) {
642 KGSL_CORE_ERR("snapshot: Not enough memory for the ib\n");
643 return 0;
644 }
645
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530646 src = kgsl_gpuaddr_to_vaddr(memdesc, obj->gpuaddr);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700647 if (src == NULL) {
648 KGSL_DRV_ERR(device,
649 "snapshot: Unable to map GPU memory object 0x%016llX into the kernel\n",
650 obj->gpuaddr);
651 return 0;
652 }
653
654 /* only do this for IB1 because the IB2's are part of IB1 objects */
655 if (meta->ib1base == obj->gpuaddr) {
656
657 snapshot->ib1dumped = active_ib_is_parsed(obj->gpuaddr,
658 obj->size, obj->entry->priv);
659 if (-E2BIG == adreno_ib_create_object_list(device,
660 obj->entry->priv,
661 obj->gpuaddr, obj->size >> 2,
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530662 snapshot->ib2base,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700663 &ib_obj_list))
664 ib_max_objs = 1;
665 if (ib_obj_list) {
666 /* freeze the IB objects in the IB */
667 snapshot_freeze_obj_list(snapshot,
668 obj->entry->priv,
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530669 ib_obj_list);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700670 adreno_ib_destroy_obj_list(ib_obj_list);
671 }
672 }
673
674
675 if (meta->ib2base == obj->gpuaddr)
676 snapshot->ib2dumped = active_ib_is_parsed(obj->gpuaddr,
677 obj->size, obj->entry->priv);
678
679 /* Write the sub-header for the section */
680 header->gpuaddr = obj->gpuaddr;
681 header->ptbase =
682 kgsl_mmu_pagetable_get_ttbr0(obj->entry->priv->pagetable);
683 header->size = obj->size >> 2;
684
685 /* Write the contents of the ib */
686 memcpy((void *)dst, (void *)src, (size_t) obj->size);
687 /* Write the contents of the ib */
688
689 return obj->size + sizeof(*header);
690}
691
692/* Dump another item on the current pending list */
693static void dump_object(struct kgsl_device *device, int obj,
694 struct kgsl_snapshot *snapshot)
695{
696 struct snapshot_ib_meta meta;
697
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530698 meta.snapshot = snapshot;
699 meta.obj = &objbuf[obj];
700 meta.ib1base = snapshot->ib1base;
701 meta.ib1size = snapshot->ib1size;
702 meta.ib2base = snapshot->ib2base;
703 meta.ib2size = snapshot->ib2size;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700704
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530705 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_IB_V2,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700706 snapshot, snapshot_ib, &meta);
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530707 if (objbuf[obj].entry) {
708 kgsl_memdesc_unmap(&(objbuf[obj].entry->memdesc));
709 kgsl_mem_entry_put(objbuf[obj].entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700710 }
711}
712
713/* setup_fault process - Find kgsl_process_private struct that caused the fault
714 *
715 * Find the faulting process based what the dispatcher thinks happened and
716 * what the hardware is using for the current pagetable. The process struct
717 * will be used to look up GPU addresses that are encountered while parsing
718 * the GPU state.
719 */
720static void setup_fault_process(struct kgsl_device *device,
721 struct kgsl_snapshot *snapshot,
722 struct kgsl_process_private *process)
723{
724 u64 hw_ptbase, proc_ptbase;
725
726 if (process != NULL && !kgsl_process_private_get(process))
727 process = NULL;
728
729 /* Get the physical address of the MMU pagetable */
730 hw_ptbase = kgsl_mmu_get_current_ttbr0(&device->mmu);
731
732 /* if we have an input process, make sure the ptbases match */
733 if (process) {
734 proc_ptbase = kgsl_mmu_pagetable_get_ttbr0(process->pagetable);
735 /* agreement! No need to check further */
736 if (hw_ptbase == proc_ptbase)
737 goto done;
738
739 kgsl_process_private_put(process);
740 process = NULL;
741 KGSL_CORE_ERR("snapshot: ptbase mismatch hw %llx sw %llx\n",
742 hw_ptbase, proc_ptbase);
743 }
744
745 /* try to find the right pagetable by walking the process list */
746 if (kgsl_mmu_is_perprocess(&device->mmu)) {
747 struct kgsl_process_private *tmp;
748
749 mutex_lock(&kgsl_driver.process_mutex);
750 list_for_each_entry(tmp, &kgsl_driver.process_list, list) {
751 u64 pt_ttbr0;
752
753 pt_ttbr0 = kgsl_mmu_pagetable_get_ttbr0(tmp->pagetable);
754 if ((pt_ttbr0 == hw_ptbase)
755 && kgsl_process_private_get(tmp)) {
756 process = tmp;
757 break;
758 }
759 }
760 mutex_unlock(&kgsl_driver.process_mutex);
761 }
762done:
763 snapshot->process = process;
764}
765
766/* Snapshot a global memory buffer */
767static size_t snapshot_global(struct kgsl_device *device, u8 *buf,
768 size_t remain, void *priv)
769{
770 struct kgsl_memdesc *memdesc = priv;
771
772 struct kgsl_snapshot_gpu_object_v2 *header =
773 (struct kgsl_snapshot_gpu_object_v2 *)buf;
774
775 u8 *ptr = buf + sizeof(*header);
776
777 if (memdesc->size == 0)
778 return 0;
779
780 if (remain < (memdesc->size + sizeof(*header))) {
781 KGSL_CORE_ERR("snapshot: Not enough memory for the memdesc\n");
782 return 0;
783 }
784
785 if (memdesc->hostptr == NULL) {
786 KGSL_CORE_ERR(
787 "snapshot: no kernel mapping for global object 0x%016llX\n",
788 memdesc->gpuaddr);
789 return 0;
790 }
791
792 header->size = memdesc->size >> 2;
793 header->gpuaddr = memdesc->gpuaddr;
794 header->ptbase = MMU_DEFAULT_TTBR0(device);
795 header->type = SNAPSHOT_GPU_OBJECT_GLOBAL;
796
797 memcpy(ptr, memdesc->hostptr, memdesc->size);
798
799 return memdesc->size + sizeof(*header);
800}
801
802/* Snapshot IOMMU specific buffers */
803static void adreno_snapshot_iommu(struct kgsl_device *device,
804 struct kgsl_snapshot *snapshot)
805{
806 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
807 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
808
809 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
810 snapshot, snapshot_global, &iommu->setstate);
811
812 if (ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION))
813 kgsl_snapshot_add_section(device,
814 KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
815 snapshot, snapshot_global, &iommu->smmu_info);
816}
817
818static void adreno_snapshot_ringbuffer(struct kgsl_device *device,
819 struct kgsl_snapshot *snapshot, struct adreno_ringbuffer *rb)
820{
821 struct snapshot_rb_params params = {
822 .snapshot = snapshot,
823 .rb = rb,
824 };
825
826 if (rb == NULL)
827 return;
828
829 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_RB_V2, snapshot,
830 snapshot_rb, &params);
831}
832
833/* adreno_snapshot - Snapshot the Adreno GPU state
834 * @device - KGSL device to snapshot
835 * @snapshot - Pointer to the snapshot instance
836 * @context - context that caused the fault, if known by the driver
837 * This is a hook function called by kgsl_snapshot to snapshot the
838 * Adreno specific information for the GPU snapshot. In turn, this function
839 * calls the GPU specific snapshot function to get core specific information.
840 */
841void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
842 struct kgsl_context *context)
843{
844 unsigned int i;
845 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
846 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
847
848 ib_max_objs = 0;
849 /* Reset the list of objects */
850 objbufptr = 0;
851
852 snapshot_frozen_objsize = 0;
853
Carter Cooperb88b7082017-09-14 09:03:26 -0600854 setup_fault_process(device, snapshot,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700855 context ? context->proc_priv : NULL);
856
Shrenuj Bansald197bf62017-04-07 11:00:09 -0700857 /* Add GPU specific sections - registers mainly, but other stuff too */
858 if (gpudev->snapshot)
859 gpudev->snapshot(adreno_dev, snapshot);
860
861 /* Dumping these buffers is useless if the GX is not on */
862 if (gpudev->gx_is_on)
863 if (!gpudev->gx_is_on(adreno_dev))
864 return;
865
Shrenuj Bansala419c792016-10-20 14:05:11 -0700866 adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
867 ADRENO_REG_CP_IB1_BASE_HI, &snapshot->ib1base);
868 adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ, &snapshot->ib1size);
869 adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB2_BASE,
870 ADRENO_REG_CP_IB2_BASE_HI, &snapshot->ib2base);
871 adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &snapshot->ib2size);
872
873 snapshot->ib1dumped = false;
874 snapshot->ib2dumped = false;
875
876 adreno_snapshot_ringbuffer(device, snapshot, adreno_dev->cur_rb);
877
878 /* Dump the prev ringbuffer */
879 if (adreno_dev->prev_rb != adreno_dev->cur_rb)
880 adreno_snapshot_ringbuffer(device, snapshot,
881 adreno_dev->prev_rb);
882
883 if ((adreno_dev->next_rb != adreno_dev->prev_rb) &&
884 (adreno_dev->next_rb != adreno_dev->cur_rb))
885 adreno_snapshot_ringbuffer(device, snapshot,
886 adreno_dev->next_rb);
887
Shrenuj Bansala419c792016-10-20 14:05:11 -0700888 /* Dump selected global buffers */
889 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
890 snapshot, snapshot_global, &device->memstore);
891
892 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
893 snapshot, snapshot_global,
894 &adreno_dev->pwron_fixup);
895
896 if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_IOMMU)
897 adreno_snapshot_iommu(device, snapshot);
898
899 /*
900 * Add a section that lists (gpuaddr, size, memtype) tuples of the
901 * hanging process
902 */
903 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_MEMLIST_V2,
904 snapshot, snapshot_capture_mem_list, snapshot->process);
905 /*
906 * Make sure that the last IB1 that was being executed is dumped.
907 * Since this was the last IB1 that was processed, we should have
908 * already added it to the list during the ringbuffer parse but we
909 * want to be double plus sure.
910 * The problem is that IB size from the register is the unprocessed size
911 * of the buffer not the original size, so if we didn't catch this
912 * buffer being directly used in the RB, then we might not be able to
Rajesh Kemisetti78f5bc92020-01-21 18:30:12 +0530913 * dump the whole thing. Try to dump the maximum possible size from the
914 * IB1 base address till the end of memdesc size so that we dont miss
915 * what we are interested in. Print a warning message so we can try to
Shrenuj Bansala419c792016-10-20 14:05:11 -0700916 * figure how often this really happens.
917 */
918
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530919 if (-ENOENT == find_object(snapshot->ib1base, snapshot->process) &&
920 snapshot->ib1size) {
Rajesh Kemisetti78f5bc92020-01-21 18:30:12 +0530921 struct kgsl_mem_entry *entry;
922 u64 ibsize;
923
924 entry = kgsl_sharedmem_find(snapshot->process,
925 snapshot->ib1base);
926 if (entry == NULL) {
927 KGSL_CORE_ERR(
928 "Can't find a memory entry containing IB1BASE %16llx\n",
929 snapshot->ib1base);
930 } else {
931 ibsize = entry->memdesc.size -
932 (snapshot->ib1base - entry->memdesc.gpuaddr);
933 kgsl_mem_entry_put(entry);
934
935 kgsl_snapshot_push_object(snapshot->process,
936 snapshot->ib1base, ibsize >> 2);
937 KGSL_CORE_ERR(
938 "CP_IB1_BASE is not found in the ringbuffer. Dumping %llx dwords of the buffer\n",
939 ibsize >> 2);
940 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700941 }
942
943 /*
944 * Add the last parsed IB2 to the list. The IB2 should be found as we
945 * parse the objects below, but we try to add it to the list first, so
946 * it too can be parsed. Don't print an error message in this case - if
947 * the IB2 is found during parsing, the list will be updated with the
948 * correct size.
949 */
950
Hareesh Gundu9c6b1fa2017-01-06 15:37:09 +0530951 if (-ENOENT == find_object(snapshot->ib2base, snapshot->process)) {
952 kgsl_snapshot_push_object(snapshot->process, snapshot->ib2base,
953 snapshot->ib2size);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700954 }
955
956 /*
957 * Go through the list of found objects and dump each one. As the IBs
958 * are parsed, more objects might be found, and objbufptr will increase
959 */
960 for (i = 0; i < objbufptr; i++)
961 dump_object(device, i, snapshot);
962
963 /*
964 * Incase snapshot static blob is running out of memory, Add Active IB1
965 * and IB2 entries to obj_list so that active ib's can be dumped to
966 * snapshot dynamic blob.
967 */
968 if (!snapshot->ib1dumped || !snapshot->ib2dumped)
969 kgsl_snapshot_add_active_ib_obj_list(device, snapshot);
970
971 if (ib_max_objs)
972 KGSL_CORE_ERR("Max objects found in IB\n");
973 if (snapshot_frozen_objsize)
974 KGSL_CORE_ERR("GPU snapshot froze %zdKb of GPU buffers\n",
975 snapshot_frozen_objsize / 1024);
976
977}
978
979/*
980 * adreno_snapshot_cp_roq - Dump CP merciu data in snapshot
981 * @device: Device being snapshotted
982 * @remain: Bytes remaining in snapshot memory
983 * @priv: Size of merciu data in Dwords
984 */
985size_t adreno_snapshot_cp_merciu(struct kgsl_device *device, u8 *buf,
986 size_t remain, void *priv)
987{
988 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
989 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
990 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
991 int i, size = *((int *)priv);
992
993 /* The MERCIU data is two dwords per entry */
994 size = size << 1;
995
996 if (remain < DEBUG_SECTION_SZ(size)) {
997 SNAPSHOT_ERR_NOMEM(device, "CP MERCIU DEBUG");
998 return 0;
999 }
1000
1001 header->type = SNAPSHOT_DEBUG_CP_MERCIU;
1002 header->size = size;
1003
1004 adreno_writereg(adreno_dev, ADRENO_REG_CP_MERCIU_ADDR, 0x0);
1005
1006 for (i = 0; i < size; i++) {
1007 adreno_readreg(adreno_dev, ADRENO_REG_CP_MERCIU_DATA,
1008 &data[(i * 2)]);
1009 adreno_readreg(adreno_dev, ADRENO_REG_CP_MERCIU_DATA2,
1010 &data[(i * 2) + 1]);
1011 }
1012
1013 return DEBUG_SECTION_SZ(size);
1014}
1015
1016/*
1017 * adreno_snapshot_cp_roq - Dump ROQ data in snapshot
1018 * @device: Device being snapshotted
1019 * @remain: Bytes remaining in snapshot memory
1020 * @priv: Size of ROQ data in Dwords
1021 */
1022size_t adreno_snapshot_cp_roq(struct kgsl_device *device, u8 *buf,
1023 size_t remain, void *priv)
1024{
1025 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1026 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1027 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1028 int i, size = *((int *)priv);
1029
1030 if (remain < DEBUG_SECTION_SZ(size)) {
1031 SNAPSHOT_ERR_NOMEM(device, "CP ROQ DEBUG");
1032 return 0;
1033 }
1034
1035 header->type = SNAPSHOT_DEBUG_CP_ROQ;
1036 header->size = size;
1037
1038 adreno_writereg(adreno_dev, ADRENO_REG_CP_ROQ_ADDR, 0x0);
1039 for (i = 0; i < size; i++)
1040 adreno_readreg(adreno_dev, ADRENO_REG_CP_ROQ_DATA, &data[i]);
1041
1042 return DEBUG_SECTION_SZ(size);
1043}
1044
1045/*
1046 * adreno_snapshot_cp_pm4_ram() - Dump PM4 data in snapshot
1047 * @device: Device being snapshotted
1048 * @buf: Snapshot memory
1049 * @remain: Number of bytes left in snapshot memory
1050 * @priv: Unused
1051 */
1052size_t adreno_snapshot_cp_pm4_ram(struct kgsl_device *device, u8 *buf,
1053 size_t remain, void *priv)
1054{
1055 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1056 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1057 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1058 int i;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001059 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
1060 size_t size = fw->size - 1;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001061
1062 if (remain < DEBUG_SECTION_SZ(size)) {
1063 SNAPSHOT_ERR_NOMEM(device, "CP PM4 RAM DEBUG");
1064 return 0;
1065 }
1066
1067 header->type = SNAPSHOT_DEBUG_CP_PM4_RAM;
1068 header->size = size;
1069
1070 /*
1071 * Read the firmware from the GPU rather than use our cache in order to
1072 * try to catch mis-programming or corruption in the hardware. We do
1073 * use the cached version of the size, however, instead of trying to
1074 * maintain always changing hardcoded constants
1075 */
1076
1077 adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_RAM_RADDR, 0x0);
1078 for (i = 0; i < size; i++)
1079 adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_RAM_DATA, &data[i]);
1080
1081 return DEBUG_SECTION_SZ(size);
1082}
1083
1084/*
1085 * adreno_snapshot_cp_pfp_ram() - Dump the PFP data on snapshot
1086 * @device: Device being snapshotted
1087 * @buf: Snapshot memory
1088 * @remain: Amount of butes left in snapshot memory
1089 * @priv: Unused
1090 */
1091size_t adreno_snapshot_cp_pfp_ram(struct kgsl_device *device, u8 *buf,
1092 size_t remain, void *priv)
1093{
1094 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1095 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1096 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001097 int i;
1098 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
1099 int size = fw->size - 1;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001100
1101 if (remain < DEBUG_SECTION_SZ(size)) {
1102 SNAPSHOT_ERR_NOMEM(device, "CP PFP RAM DEBUG");
1103 return 0;
1104 }
1105
1106 header->type = SNAPSHOT_DEBUG_CP_PFP_RAM;
1107 header->size = size;
1108
1109 /*
1110 * Read the firmware from the GPU rather than use our cache in order to
1111 * try to catch mis-programming or corruption in the hardware. We do
1112 * use the cached version of the size, however, instead of trying to
1113 * maintain always changing hardcoded constants
1114 */
1115 adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_ADDR, 0x0);
1116 for (i = 0; i < size; i++)
1117 adreno_readreg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_DATA,
1118 &data[i]);
1119
1120 return DEBUG_SECTION_SZ(size);
1121}
1122
1123/*
1124 * adreno_snapshot_vpc_memory() - Save VPC data in snapshot
1125 * @device: Device being snapshotted
1126 * @buf: Snapshot memory
1127 * @remain: Number of bytes left in snapshot memory
1128 * @priv: Private data for VPC if any
1129 */
1130size_t adreno_snapshot_vpc_memory(struct kgsl_device *device, u8 *buf,
1131 size_t remain, void *priv)
1132{
1133 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1134 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1135 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1136 int vpc_mem_size = *((int *)priv);
1137 size_t size = VPC_MEMORY_BANKS * vpc_mem_size;
1138 int bank, addr, i = 0;
1139
1140 if (remain < DEBUG_SECTION_SZ(size)) {
1141 SNAPSHOT_ERR_NOMEM(device, "VPC MEMORY");
1142 return 0;
1143 }
1144
1145 header->type = SNAPSHOT_DEBUG_VPC_MEMORY;
1146 header->size = size;
1147
1148 for (bank = 0; bank < VPC_MEMORY_BANKS; bank++) {
1149 for (addr = 0; addr < vpc_mem_size; addr++) {
1150 unsigned int val = bank | (addr << 4);
1151
1152 adreno_writereg(adreno_dev,
1153 ADRENO_REG_VPC_DEBUG_RAM_SEL, val);
1154 adreno_readreg(adreno_dev,
1155 ADRENO_REG_VPC_DEBUG_RAM_READ, &data[i++]);
1156 }
1157 }
1158
1159 return DEBUG_SECTION_SZ(size);
1160}
1161
1162/*
1163 * adreno_snapshot_cp_meq() - Save CP MEQ data in snapshot
1164 * @device: Device being snapshotted
1165 * @buf: Snapshot memory
1166 * @remain: Number of bytes left in snapshot memory
1167 * @priv: Contains the size of MEQ data
1168 */
1169size_t adreno_snapshot_cp_meq(struct kgsl_device *device, u8 *buf,
1170 size_t remain, void *priv)
1171{
1172 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1173 struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
1174 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
1175 int i;
1176 int cp_meq_sz = *((int *)priv);
1177
1178 if (remain < DEBUG_SECTION_SZ(cp_meq_sz)) {
1179 SNAPSHOT_ERR_NOMEM(device, "CP MEQ DEBUG");
1180 return 0;
1181 }
1182
1183 header->type = SNAPSHOT_DEBUG_CP_MEQ;
1184 header->size = cp_meq_sz;
1185
1186 adreno_writereg(adreno_dev, ADRENO_REG_CP_MEQ_ADDR, 0x0);
1187 for (i = 0; i < cp_meq_sz; i++)
1188 adreno_readreg(adreno_dev, ADRENO_REG_CP_MEQ_DATA, &data[i]);
1189
1190 return DEBUG_SECTION_SZ(cp_meq_sz);
1191}
1192
1193static const struct adreno_vbif_snapshot_registers *vbif_registers(
1194 struct adreno_device *adreno_dev,
1195 const struct adreno_vbif_snapshot_registers *list,
1196 unsigned int count)
1197{
1198 unsigned int version;
1199 unsigned int i;
1200
1201 adreno_readreg(adreno_dev, ADRENO_REG_VBIF_VERSION, &version);
1202
1203 for (i = 0; i < count; i++) {
1204 if ((list[i].version & list[i].mask) ==
1205 (version & list[i].mask))
1206 return &list[i];
1207 }
1208
1209 KGSL_CORE_ERR(
1210 "snapshot: Registers for VBIF version %X register were not dumped\n",
1211 version);
1212
1213 return NULL;
1214}
1215
1216void adreno_snapshot_registers(struct kgsl_device *device,
1217 struct kgsl_snapshot *snapshot,
1218 const unsigned int *regs, unsigned int count)
1219{
1220 struct kgsl_snapshot_registers r;
1221
1222 r.regs = regs;
1223 r.count = count;
1224
1225 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
1226 kgsl_snapshot_dump_registers, &r);
1227}
1228
1229void adreno_snapshot_vbif_registers(struct kgsl_device *device,
1230 struct kgsl_snapshot *snapshot,
1231 const struct adreno_vbif_snapshot_registers *list,
1232 unsigned int count)
1233{
1234 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1235 struct kgsl_snapshot_registers regs;
1236 const struct adreno_vbif_snapshot_registers *vbif;
1237
1238 vbif = vbif_registers(adreno_dev, list, count);
1239
1240 if (vbif != NULL) {
1241 regs.regs = vbif->registers;
1242 regs.count = vbif->count;
1243
1244 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
1245 snapshot, kgsl_snapshot_dump_registers, &regs);
1246 }
1247}