blob: e704db75207aad03034e01005edb8771bde73cf9 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/export.h>
14#include <linux/time.h>
15#include <linux/sysfs.h>
16#include <linux/utsname.h>
17#include <linux/sched.h>
18#include <linux/idr.h>
19
20#include "kgsl.h"
21#include "kgsl_log.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24#include "kgsl_snapshot.h"
25#include "adreno_cp_parser.h"
26
27/* Placeholder for list of ib objects that contain all objects in that IB */
28
29struct kgsl_snapshot_cp_obj {
30 struct adreno_ib_object_list *ib_obj_list;
31 struct list_head node;
32};
33
34struct snapshot_obj_itr {
35 u8 *buf; /* Buffer pointer to write to */
36 int pos; /* Current position in the sequence */
37 loff_t offset; /* file offset to start writing from */
38 size_t remain; /* Bytes remaining in buffer */
39 size_t write; /* Bytes written so far */
40};
41
42static void obj_itr_init(struct snapshot_obj_itr *itr, u8 *buf,
43 loff_t offset, size_t remain)
44{
45 itr->buf = buf;
46 itr->offset = offset;
47 itr->remain = remain;
48 itr->pos = 0;
49 itr->write = 0;
50}
51
52static int obj_itr_out(struct snapshot_obj_itr *itr, void *src, int size)
53{
54 if (itr->remain == 0)
55 return 0;
56
57 if ((itr->pos + size) <= itr->offset)
58 goto done;
59
60 /* Handle the case that offset is in the middle of the buffer */
61
62 if (itr->offset > itr->pos) {
63 src += (itr->offset - itr->pos);
64 size -= (itr->offset - itr->pos);
65
66 /* Advance pos to the offset start */
67 itr->pos = itr->offset;
68 }
69
70 if (size > itr->remain)
71 size = itr->remain;
72
73 memcpy(itr->buf, src, size);
74
75 itr->buf += size;
76 itr->write += size;
77 itr->remain -= size;
78
79done:
80 itr->pos += size;
81 return size;
82}
83
84/* idr_for_each function to count the number of contexts */
85
86static int snapshot_context_count(int id, void *ptr, void *data)
87{
88 int *count = data;
89 *count = *count + 1;
90
91 return 0;
92}
93
94/*
95 * To simplify the iterator loop use a global pointer instead of trying
96 * to pass around double star references to the snapshot data
97 */
98
99static u8 *_ctxtptr;
100
101static int snapshot_context_info(int id, void *ptr, void *data)
102{
103 struct kgsl_snapshot_linux_context_v2 *header =
104 (struct kgsl_snapshot_linux_context_v2 *)_ctxtptr;
105 struct kgsl_context *context = ptr;
106 struct kgsl_device *device;
107
108 device = context->device;
109
110 header->id = id;
111
112 /* Future-proof for per-context timestamps - for now, just
113 * return the global timestamp for all contexts
114 */
115
116 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
117 &header->timestamp_queued);
118 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED,
119 &header->timestamp_consumed);
120 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
121 &header->timestamp_retired);
122
123 _ctxtptr += sizeof(struct kgsl_snapshot_linux_context_v2);
124
125 return 0;
126}
127
128/* Snapshot the Linux specific information */
129static size_t snapshot_os(struct kgsl_device *device,
130 u8 *buf, size_t remain, void *priv)
131{
132 struct kgsl_snapshot_linux_v2 *header =
133 (struct kgsl_snapshot_linux_v2 *)buf;
134 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
135 int ctxtcount = 0;
136 size_t size = sizeof(*header);
137 struct kgsl_context *context;
138
139 /*
140 * Figure out how many active contexts there are - these will
141 * be appended on the end of the structure
142 */
143
144 read_lock(&device->context_lock);
145 idr_for_each(&device->context_idr, snapshot_context_count, &ctxtcount);
146 read_unlock(&device->context_lock);
147
148 size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context_v2);
149
150 /* Make sure there is enough room for the data */
151 if (remain < size) {
152 SNAPSHOT_ERR_NOMEM(device, "OS");
153 return 0;
154 }
155
156 memset(header, 0, sizeof(*header));
157
158 header->osid = KGSL_SNAPSHOT_OS_LINUX_V3;
159
160 /* Get the kernel build information */
Deepak Kumar86bc8842017-02-23 17:49:37 +0530161 strlcpy(header->release, init_utsname()->release,
162 sizeof(header->release));
163 strlcpy(header->version, init_utsname()->version,
164 sizeof(header->version));
Shrenuj Bansala419c792016-10-20 14:05:11 -0700165
166 /* Get the Unix time for the timestamp */
167 header->seconds = get_seconds();
168
169 /* Remember the power information */
170 header->power_flags = pwr->power_flags;
171 header->power_level = pwr->active_pwrlevel;
172 header->power_interval_timeout = pwr->interval_timeout;
173 header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]);
174
175 /*
176 * Save the last active context from global index since its more
177 * reliable than currrent RB index
178 */
179 kgsl_sharedmem_readl(&device->memstore, &header->current_context,
180 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
181
182 context = kgsl_context_get(device, header->current_context);
183
184 /* Get the current PT base */
Carter Cooper4300d0f42017-08-25 14:28:50 -0600185 if (!IS_ERR(priv))
186 header->ptbase = kgsl_mmu_get_current_ttbr0(&device->mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700187
188 /* And the PID for the task leader */
189 if (context) {
190 header->pid = context->tid;
191 strlcpy(header->comm, context->proc_priv->comm,
192 sizeof(header->comm));
193 kgsl_context_put(context);
194 context = NULL;
195 }
196
197 header->ctxtcount = ctxtcount;
198
199 _ctxtptr = buf + sizeof(*header);
200 /* append information for each context */
201
202 read_lock(&device->context_lock);
203 idr_for_each(&device->context_idr, snapshot_context_info, NULL);
204 read_unlock(&device->context_lock);
205
206 /* Return the size of the data segment */
207 return size;
208}
209
210static void kgsl_snapshot_put_object(struct kgsl_snapshot_object *obj)
211{
212 list_del(&obj->node);
213
214 obj->entry->memdesc.priv &= ~KGSL_MEMDESC_FROZEN;
215 kgsl_mem_entry_put(obj->entry);
216
217 kfree(obj);
218}
219
220/**
221 * kgsl_snapshot_have_object() - return 1 if the object has been processed
222 * @snapshot: the snapshot data
223 * @process: The process that owns the the object to freeze
224 * @gpuaddr: The gpu address of the object to freeze
225 * @size: the size of the object (may not always be the size of the region)
226 *
227 * Return 1 if the object is already in the list - this can save us from
228 * having to parse the same thing over again. There are 2 lists that are
229 * tracking objects so check for the object in both lists
230 */
231int kgsl_snapshot_have_object(struct kgsl_snapshot *snapshot,
232 struct kgsl_process_private *process,
233 uint64_t gpuaddr, uint64_t size)
234{
235 struct kgsl_snapshot_object *obj;
236 struct kgsl_snapshot_cp_obj *obj_cp;
237 struct adreno_ib_object *ib_obj;
238 int i;
239
240 /* Check whether the object is tracked already in ib list */
241 list_for_each_entry(obj_cp, &snapshot->cp_list, node) {
242 if (obj_cp->ib_obj_list == NULL
243 || obj_cp->ib_obj_list->num_objs == 0)
244 continue;
245
246 ib_obj = &(obj_cp->ib_obj_list->obj_list[0]);
247 if (ib_obj->entry == NULL || ib_obj->entry->priv != process)
248 continue;
249
250 for (i = 0; i < obj_cp->ib_obj_list->num_objs; i++) {
251 ib_obj = &(obj_cp->ib_obj_list->obj_list[i]);
252 if ((gpuaddr >= ib_obj->gpuaddr) &&
253 ((gpuaddr + size) <=
254 (ib_obj->gpuaddr + ib_obj->size)))
255 return 1;
256 }
257 }
258
259 list_for_each_entry(obj, &snapshot->obj_list, node) {
260 if (obj->entry == NULL || obj->entry->priv != process)
261 continue;
262
263 if ((gpuaddr >= obj->gpuaddr) &&
264 ((gpuaddr + size) <= (obj->gpuaddr + obj->size)))
265 return 1;
266 }
267
268 return 0;
269}
270EXPORT_SYMBOL(kgsl_snapshot_have_object);
271
272/**
273 * kgsl_snapshot_get_object() - Mark a GPU buffer to be frozen
274 * @snapshot: The snapshot data
275 * @process: The process that owns the object we want to freeze
276 * @gpuaddr: The gpu address of the object to freeze
277 * @size: the size of the object (may not always be the size of the region)
278 * @type: the type of object being saved (shader, vbo, etc)
279 *
280 * Mark and freeze a GPU buffer object. This will prevent it from being
281 * freed until it can be copied out as part of the snapshot dump. Returns the
282 * size of the object being frozen
283 */
284int kgsl_snapshot_get_object(struct kgsl_snapshot *snapshot,
285 struct kgsl_process_private *process, uint64_t gpuaddr,
286 uint64_t size, unsigned int type)
287{
288 struct kgsl_mem_entry *entry;
289 struct kgsl_snapshot_object *obj;
290 uint64_t offset;
291 int ret = -EINVAL;
292 unsigned int mem_type;
293
294 if (!gpuaddr)
295 return 0;
296
297 entry = kgsl_sharedmem_find(process, gpuaddr);
298
299 if (entry == NULL) {
300 KGSL_CORE_ERR("Unable to find GPU buffer 0x%016llX\n", gpuaddr);
301 return -EINVAL;
302 }
303
304 /* We can't freeze external memory, because we don't own it */
305 if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_MASK)
306 goto err_put;
307 /*
308 * Do not save texture and render targets in snapshot,
309 * they can be just too big
310 */
311
312 mem_type = kgsl_memdesc_get_memtype(&entry->memdesc);
313 if (mem_type == KGSL_MEMTYPE_TEXTURE ||
314 mem_type == KGSL_MEMTYPE_EGL_SURFACE ||
315 mem_type == KGSL_MEMTYPE_EGL_IMAGE) {
316 ret = 0;
317 goto err_put;
318 }
319
320 /* Do not save sparse memory */
321 if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT ||
322 entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_PHYS) {
323 ret = 0;
324 goto err_put;
325 }
326
327 /*
328 * size indicates the number of bytes in the region to save. This might
329 * not always be the entire size of the region because some buffers are
330 * sub-allocated from a larger region. However, if size 0 was passed
331 * thats a flag that the caller wants to capture the entire buffer
332 */
333
334 if (size == 0) {
335 size = entry->memdesc.size;
336 offset = 0;
337
338 /* Adjust the gpuaddr to the start of the object */
339 gpuaddr = entry->memdesc.gpuaddr;
340 } else {
341 offset = gpuaddr - entry->memdesc.gpuaddr;
342 }
343
344 if (size + offset > entry->memdesc.size) {
345 KGSL_CORE_ERR("Invalid size for GPU buffer 0x%016llX\n",
346 gpuaddr);
347 goto err_put;
348 }
349
350 /* If the buffer is already on the list, skip it */
351 list_for_each_entry(obj, &snapshot->obj_list, node) {
352 /* combine the range with existing object if they overlap */
353 if (obj->entry->priv == process && obj->type == type &&
354 kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
355 gpuaddr, size)) {
356 uint64_t end1 = obj->gpuaddr + obj->size;
357 uint64_t end2 = gpuaddr + size;
358
359 if (obj->gpuaddr > gpuaddr)
360 obj->gpuaddr = gpuaddr;
361 if (end1 > end2)
362 obj->size = end1 - obj->gpuaddr;
363 else
364 obj->size = end2 - obj->gpuaddr;
365 obj->offset = obj->gpuaddr - entry->memdesc.gpuaddr;
366 ret = 0;
367 goto err_put;
368 }
369 }
370
371 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
372
373 if (obj == NULL)
374 goto err_put;
375
376 obj->type = type;
377 obj->entry = entry;
378 obj->gpuaddr = gpuaddr;
379 obj->size = size;
380 obj->offset = offset;
381
382 list_add(&obj->node, &snapshot->obj_list);
383
384 /*
385 * Return the size of the entire mem entry that was frozen - this gets
386 * used for tracking how much memory is frozen for a hang. Also, mark
387 * the memory entry as frozen. If the entry was already marked as
388 * frozen, then another buffer already got to it. In that case, return
389 * 0 so it doesn't get counted twice
390 */
391
392 ret = (entry->memdesc.priv & KGSL_MEMDESC_FROZEN) ? 0
393 : entry->memdesc.size;
394
395 entry->memdesc.priv |= KGSL_MEMDESC_FROZEN;
396
397 return ret;
398err_put:
399 kgsl_mem_entry_put(entry);
400 return ret;
401}
402EXPORT_SYMBOL(kgsl_snapshot_get_object);
403
404/**
405 * kgsl_snapshot_dump_registers - helper function to dump device registers
406 * @device - the device to dump registers from
407 * @snapshot - pointer to the start of the region of memory for the snapshot
408 * @remain - a pointer to the number of bytes remaining in the snapshot
409 * @priv - A pointer to the kgsl_snapshot_registers data
410 *
411 * Given an array of register ranges pairs (start,end [inclusive]), dump the
412 * registers into a snapshot register section. The snapshot region stores a
413 * part of dwords for each register - the word address of the register, and
414 * the value.
415 */
416size_t kgsl_snapshot_dump_registers(struct kgsl_device *device, u8 *buf,
417 size_t remain, void *priv)
418{
419 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
420 struct kgsl_snapshot_registers *regs = priv;
421 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
422 int count = 0, j, k;
423
424 /* Figure out how many registers we are going to dump */
425
426 for (j = 0; j < regs->count; j++) {
427 int start = regs->regs[j * 2];
428 int end = regs->regs[j * 2 + 1];
429
430 count += (end - start + 1);
431 }
432
433 if (remain < (count * 8) + sizeof(*header)) {
434 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
435 return 0;
436 }
437
438 for (j = 0; j < regs->count; j++) {
439 unsigned int start = regs->regs[j * 2];
440 unsigned int end = regs->regs[j * 2 + 1];
441
442 for (k = start; k <= end; k++) {
443 unsigned int val;
444
445 kgsl_regread(device, k, &val);
446 *data++ = k;
447 *data++ = val;
448 }
449 }
450
451 header->count = count;
452
453 /* Return the size of the section */
454 return (count * 8) + sizeof(*header);
455}
456EXPORT_SYMBOL(kgsl_snapshot_dump_registers);
457
458struct kgsl_snapshot_indexed_registers {
459 unsigned int index;
460 unsigned int data;
461 unsigned int start;
462 unsigned int count;
463};
464
465static size_t kgsl_snapshot_dump_indexed_regs(struct kgsl_device *device,
466 u8 *buf, size_t remain, void *priv)
467{
468 struct kgsl_snapshot_indexed_registers *iregs = priv;
469 struct kgsl_snapshot_indexed_regs *header =
470 (struct kgsl_snapshot_indexed_regs *)buf;
471 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
472 int i;
473
474 if (remain < (iregs->count * 4) + sizeof(*header)) {
475 SNAPSHOT_ERR_NOMEM(device, "INDEXED REGS");
476 return 0;
477 }
478
479 header->index_reg = iregs->index;
480 header->data_reg = iregs->data;
481 header->count = iregs->count;
482 header->start = iregs->start;
483
484 for (i = 0; i < iregs->count; i++) {
485 kgsl_regwrite(device, iregs->index, iregs->start + i);
486 kgsl_regread(device, iregs->data, &data[i]);
487 }
488
489 return (iregs->count * 4) + sizeof(*header);
490}
491
492/**
493 * kgsl_snapshot_indexed_registers - Add a set of indexed registers to the
494 * snapshot
495 * @device: Pointer to the KGSL device being snapshotted
496 * @snapshot: Snapshot instance
497 * @index: Offset for the index register
498 * @data: Offset for the data register
499 * @start: Index to start reading
500 * @count: Number of entries to read
501 *
502 * Dump the values from an indexed register group into the snapshot
503 */
504void kgsl_snapshot_indexed_registers(struct kgsl_device *device,
505 struct kgsl_snapshot *snapshot,
506 unsigned int index, unsigned int data,
507 unsigned int start,
508 unsigned int count)
509{
510 struct kgsl_snapshot_indexed_registers iregs;
511
512 iregs.index = index;
513 iregs.data = data;
514 iregs.start = start;
515 iregs.count = count;
516
517 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_INDEXED_REGS,
518 snapshot, kgsl_snapshot_dump_indexed_regs, &iregs);
519}
520EXPORT_SYMBOL(kgsl_snapshot_indexed_registers);
521
522/**
523 * kgsl_snapshot_add_section() - Add a new section to the GPU snapshot
524 * @device: the KGSL device being snapshotted
525 * @id: the section id
526 * @snapshot: pointer to the snapshot instance
527 * @func: Function pointer to fill the section
528 * @priv: Private pointer to pass to the function
529 *
530 * Set up a KGSL snapshot header by filling the memory with the callback
531 * function and adding the standard section header
532 */
533void kgsl_snapshot_add_section(struct kgsl_device *device, u16 id,
534 struct kgsl_snapshot *snapshot,
535 size_t (*func)(struct kgsl_device *, u8 *, size_t, void *),
536 void *priv)
537{
538 struct kgsl_snapshot_section_header *header =
539 (struct kgsl_snapshot_section_header *)snapshot->ptr;
540 u8 *data = snapshot->ptr + sizeof(*header);
541 size_t ret = 0;
542
543 /*
544 * Sanity check to make sure there is enough for the header. The
545 * callback will check to make sure there is enough for the rest
546 * of the data. If there isn't enough room then don't advance the
547 * pointer.
548 */
549
550 if (snapshot->remain < sizeof(*header))
551 return;
552
553 /* It is legal to have no function (i.e. - make an empty section) */
554 if (func) {
555 ret = func(device, data, snapshot->remain - sizeof(*header),
556 priv);
557
558 /*
559 * If there wasn't enough room for the data then don't bother
560 * setting up the header.
561 */
562
563 if (ret == 0)
564 return;
565 }
566
567 header->magic = SNAPSHOT_SECTION_MAGIC;
568 header->id = id;
569 header->size = ret + sizeof(*header);
570
571 snapshot->ptr += header->size;
572 snapshot->remain -= header->size;
573 snapshot->size += header->size;
574}
575
576/**
577 * kgsl_snapshot() - construct a device snapshot
578 * @device: device to snapshot
579 * @context: the context that is hung, might be NULL if unknown.
580 *
581 * Given a device, construct a binary snapshot dump of the current device state
582 * and store it in the device snapshot memory.
583 */
584void kgsl_device_snapshot(struct kgsl_device *device,
585 struct kgsl_context *context)
586{
587 struct kgsl_snapshot_header *header = device->snapshot_memory.ptr;
588 struct kgsl_snapshot *snapshot;
589 struct timespec boot;
590 phys_addr_t pa;
591
592 if (device->snapshot_memory.ptr == NULL) {
593 KGSL_DRV_ERR(device,
594 "snapshot: no snapshot memory available\n");
595 return;
596 }
597
598 if (WARN(!kgsl_state_is_awake(device),
599 "snapshot: device is powered off\n"))
600 return;
601
602 /* increment the hang count for good book keeping */
603 device->snapshot_faultcount++;
604
605 /*
606 * The first hang is always the one we are interested in. Don't capture
607 * a new snapshot instance if the old one hasn't been grabbed yet
608 */
609 if (device->snapshot != NULL)
610 return;
611
612 /* Allocate memory for the snapshot instance */
613 snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
614 if (snapshot == NULL)
615 return;
616
617 init_completion(&snapshot->dump_gate);
618 INIT_LIST_HEAD(&snapshot->obj_list);
619 INIT_LIST_HEAD(&snapshot->cp_list);
620 INIT_WORK(&snapshot->work, kgsl_snapshot_save_frozen_objs);
621
622 snapshot->start = device->snapshot_memory.ptr;
623 snapshot->ptr = device->snapshot_memory.ptr;
624 snapshot->remain = device->snapshot_memory.size;
625 atomic_set(&snapshot->sysfs_read, 0);
626
627 header = (struct kgsl_snapshot_header *) snapshot->ptr;
628
629 header->magic = SNAPSHOT_MAGIC;
630 header->gpuid = kgsl_gpuid(device, &header->chipid);
631
632 snapshot->ptr += sizeof(*header);
633 snapshot->remain -= sizeof(*header);
634 snapshot->size += sizeof(*header);
635
636 /* Build the Linux specific header */
Carter Cooper4300d0f42017-08-25 14:28:50 -0600637 /* Context err is implied a GMU fault, so limit dump */
Shrenuj Bansala419c792016-10-20 14:05:11 -0700638 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS,
Carter Cooper4300d0f42017-08-25 14:28:50 -0600639 snapshot, snapshot_os,
640 IS_ERR(context) ? context : NULL);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700641
642 /* Get the device specific sections */
643 if (device->ftbl->snapshot)
644 device->ftbl->snapshot(device, snapshot, context);
645
646 /*
647 * The timestamp is the seconds since boot so it is easier to match to
648 * the kernel log
649 */
650
651 getboottime(&boot);
652 snapshot->timestamp = get_seconds() - boot.tv_sec;
653
654 /* Store the instance in the device until it gets dumped */
655 device->snapshot = snapshot;
656
657 /* log buffer info to aid in ramdump fault tolerance */
658 pa = __pa(device->snapshot_memory.ptr);
659 KGSL_DRV_ERR(device, "snapshot created at pa %pa size %zd\n",
660 &pa, snapshot->size);
661
662 sysfs_notify(&device->snapshot_kobj, NULL, "timestamp");
663
664 /*
665 * Queue a work item that will save the IB data in snapshot into
666 * static memory to prevent loss of data due to overwriting of
667 * memory.
668 *
669 */
670 kgsl_schedule_work(&snapshot->work);
671}
672EXPORT_SYMBOL(kgsl_device_snapshot);
673
674/* An attribute for showing snapshot details */
675struct kgsl_snapshot_attribute {
676 struct attribute attr;
677 ssize_t (*show)(struct kgsl_device *device, char *buf);
678 ssize_t (*store)(struct kgsl_device *device, const char *buf,
679 size_t count);
680};
681
682/**
683 * kgsl_snapshot_process_ib_obj_list() - Go through the list of IB's which need
684 * to be dumped for snapshot and move them to the global snapshot list so
685 * they will get dumped when the global list is dumped
686 * @device: device being snapshotted
687 */
688static void kgsl_snapshot_process_ib_obj_list(struct kgsl_snapshot *snapshot)
689{
690 struct kgsl_snapshot_cp_obj *obj, *obj_temp;
691 struct adreno_ib_object *ib_obj;
692 int i;
693
694 list_for_each_entry_safe(obj, obj_temp, &snapshot->cp_list,
695 node) {
696 for (i = 0; i < obj->ib_obj_list->num_objs; i++) {
697 ib_obj = &(obj->ib_obj_list->obj_list[i]);
698 kgsl_snapshot_get_object(snapshot, ib_obj->entry->priv,
699 ib_obj->gpuaddr, ib_obj->size,
700 ib_obj->snapshot_obj_type);
701 }
702 list_del(&obj->node);
703 adreno_ib_destroy_obj_list(obj->ib_obj_list);
704 kfree(obj);
705 }
706}
707
708#define to_snapshot_attr(a) \
709container_of(a, struct kgsl_snapshot_attribute, attr)
710
711#define kobj_to_device(a) \
712container_of(a, struct kgsl_device, snapshot_kobj)
713
714/* Dump the sysfs binary data to the user */
715static ssize_t snapshot_show(struct file *filep, struct kobject *kobj,
716 struct bin_attribute *attr, char *buf, loff_t off,
717 size_t count)
718{
719 struct kgsl_device *device = kobj_to_device(kobj);
720 struct kgsl_snapshot *snapshot;
721 struct kgsl_snapshot_object *obj, *tmp;
722 struct kgsl_snapshot_section_header head;
723 struct snapshot_obj_itr itr;
724 int ret;
725
726 if (device == NULL)
727 return 0;
728
729 mutex_lock(&device->mutex);
730 snapshot = device->snapshot;
731 if (snapshot != NULL)
732 atomic_inc(&snapshot->sysfs_read);
733 mutex_unlock(&device->mutex);
734
735 /* Return nothing if we haven't taken a snapshot yet */
736 if (snapshot == NULL)
737 return 0;
738
739 /*
740 * Wait for the dump worker to finish. This is interruptible
741 * to allow userspace to bail if things go horribly wrong.
742 */
743 ret = wait_for_completion_interruptible(&snapshot->dump_gate);
744 if (ret) {
745 atomic_dec(&snapshot->sysfs_read);
746 return ret;
747 }
748
749 obj_itr_init(&itr, buf, off, count);
750
751 ret = obj_itr_out(&itr, snapshot->start, snapshot->size);
752 if (ret == 0)
753 goto done;
754
755 /* Dump the memory pool if it exists */
756 if (snapshot->mempool) {
757 ret = obj_itr_out(&itr, snapshot->mempool,
758 snapshot->mempool_size);
759 if (ret == 0)
760 goto done;
761 }
762
763 {
764 head.magic = SNAPSHOT_SECTION_MAGIC;
765 head.id = KGSL_SNAPSHOT_SECTION_END;
766 head.size = sizeof(head);
767
768 obj_itr_out(&itr, &head, sizeof(head));
769 }
770
771 /*
772 * Make sure everything has been written out before destroying things.
773 * The best way to confirm this is to go all the way through without
774 * writing any bytes - so only release if we get this far and
775 * itr->write is 0 and there are no concurrent reads pending
776 */
777
778 if (itr.write == 0) {
779 bool snapshot_free = false;
780
781 mutex_lock(&device->mutex);
782 if (atomic_dec_and_test(&snapshot->sysfs_read)) {
783 device->snapshot = NULL;
784 snapshot_free = true;
785 }
786 mutex_unlock(&device->mutex);
787
788 if (snapshot_free) {
789 list_for_each_entry_safe(obj, tmp,
790 &snapshot->obj_list, node)
791 kgsl_snapshot_put_object(obj);
792
793 if (snapshot->mempool)
794 vfree(snapshot->mempool);
795
796 kfree(snapshot);
797 KGSL_CORE_ERR("snapshot: objects released\n");
798 }
799 return 0;
800 }
801
802done:
803 atomic_dec(&snapshot->sysfs_read);
804 return itr.write;
805}
806
807/* Show the total number of hangs since device boot */
808static ssize_t faultcount_show(struct kgsl_device *device, char *buf)
809{
810 return snprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_faultcount);
811}
812
813/* Reset the total number of hangs since device boot */
814static ssize_t faultcount_store(struct kgsl_device *device, const char *buf,
815 size_t count)
816{
817 if (device && count > 0)
818 device->snapshot_faultcount = 0;
819
820 return count;
821}
822
823/* Show the force_panic request status */
824static ssize_t force_panic_show(struct kgsl_device *device, char *buf)
825{
826 return snprintf(buf, PAGE_SIZE, "%d\n", device->force_panic);
827}
828
829/* Store the panic request value to force_panic */
830static ssize_t force_panic_store(struct kgsl_device *device, const char *buf,
831 size_t count)
832{
833 unsigned int val = 0;
834 int ret;
835
836 if (device && count > 0)
837 device->force_panic = 0;
838
839 ret = kgsl_sysfs_store(buf, &val);
840
841 if (!ret && device)
842 device->force_panic = (bool)val;
843
844 return (ssize_t) ret < 0 ? ret : count;
845}
846
847/* Show the snapshot_crashdumper request status */
848static ssize_t snapshot_crashdumper_show(struct kgsl_device *device, char *buf)
849{
850 return snprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_crashdumper);
851}
852
853
854/* Store the value to snapshot_crashdumper*/
855static ssize_t snapshot_crashdumper_store(struct kgsl_device *device,
856 const char *buf, size_t count)
857{
858 unsigned int val = 0;
859 int ret;
860
861 if (device && count > 0)
862 device->snapshot_crashdumper = 1;
863
864 ret = kgsl_sysfs_store(buf, &val);
865
866 if (!ret && device)
867 device->snapshot_crashdumper = (bool)val;
868
869 return (ssize_t) ret < 0 ? ret : count;
870}
871
872/* Show the timestamp of the last collected snapshot */
873static ssize_t timestamp_show(struct kgsl_device *device, char *buf)
874{
875 unsigned long timestamp =
876 device->snapshot ? device->snapshot->timestamp : 0;
877
878 return snprintf(buf, PAGE_SIZE, "%lu\n", timestamp);
879}
880
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600881static ssize_t snapshot_legacy_show(struct kgsl_device *device, char *buf)
882{
883 return snprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_legacy);
884}
885
886static ssize_t snapshot_legacy_store(struct kgsl_device *device,
887 const char *buf, size_t count)
888{
889 unsigned int val = 0;
890 int ret;
891
892 ret = kgsl_sysfs_store(buf, &val);
893
894 if (!ret && device)
895 device->snapshot_legacy = (bool)val;
896
897 return (ssize_t) ret < 0 ? ret : count;
898}
899
Shrenuj Bansala419c792016-10-20 14:05:11 -0700900static struct bin_attribute snapshot_attr = {
901 .attr.name = "dump",
902 .attr.mode = 0444,
903 .size = 0,
904 .read = snapshot_show
905};
906
907#define SNAPSHOT_ATTR(_name, _mode, _show, _store) \
908struct kgsl_snapshot_attribute attr_##_name = { \
909 .attr = { .name = __stringify(_name), .mode = _mode }, \
910 .show = _show, \
911 .store = _store, \
912}
913
914static SNAPSHOT_ATTR(timestamp, 0444, timestamp_show, NULL);
915static SNAPSHOT_ATTR(faultcount, 0644, faultcount_show, faultcount_store);
916static SNAPSHOT_ATTR(force_panic, 0644, force_panic_show, force_panic_store);
917static SNAPSHOT_ATTR(snapshot_crashdumper, 0644, snapshot_crashdumper_show,
918 snapshot_crashdumper_store);
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600919static SNAPSHOT_ATTR(snapshot_legacy, 0644, snapshot_legacy_show,
920 snapshot_legacy_store);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700921
922static ssize_t snapshot_sysfs_show(struct kobject *kobj,
923 struct attribute *attr, char *buf)
924{
925 struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr);
926 struct kgsl_device *device = kobj_to_device(kobj);
927 ssize_t ret;
928
929 if (device && pattr->show)
930 ret = pattr->show(device, buf);
931 else
932 ret = -EIO;
933
934 return ret;
935}
936
937static ssize_t snapshot_sysfs_store(struct kobject *kobj,
938 struct attribute *attr, const char *buf, size_t count)
939{
940 struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr);
941 struct kgsl_device *device = kobj_to_device(kobj);
942 ssize_t ret;
943
944 if (device && pattr->store)
945 ret = pattr->store(device, buf, count);
946 else
947 ret = -EIO;
948
949 return ret;
950}
951
952static const struct sysfs_ops snapshot_sysfs_ops = {
953 .show = snapshot_sysfs_show,
954 .store = snapshot_sysfs_store,
955};
956
957static struct kobj_type ktype_snapshot = {
958 .sysfs_ops = &snapshot_sysfs_ops,
959};
960
961/**
962 * kgsl_device_snapshot_init() - add resources for the device GPU snapshot
963 * @device: The device to initialize
964 *
965 * Allocate memory for a GPU snapshot for the specified device,
966 * and create the sysfs files to manage it
967 */
968int kgsl_device_snapshot_init(struct kgsl_device *device)
969{
970 int ret;
971
972 if (kgsl_property_read_u32(device, "qcom,snapshot-size",
973 (unsigned int *) &(device->snapshot_memory.size)))
974 device->snapshot_memory.size = KGSL_SNAPSHOT_MEMSIZE;
975
976 /*
977 * Choosing a memory size of 0 is essentially the same as disabling
978 * snapshotting
979 */
980 if (device->snapshot_memory.size == 0)
981 return 0;
982
983 /*
984 * I'm not sure why anybody would choose to do so but make sure
985 * that we can at least fit the snapshot header in the requested
986 * region
987 */
988 if (device->snapshot_memory.size < sizeof(struct kgsl_snapshot_header))
989 device->snapshot_memory.size =
990 sizeof(struct kgsl_snapshot_header);
991
992 device->snapshot_memory.ptr = kzalloc(device->snapshot_memory.size,
993 GFP_KERNEL);
994
995 if (device->snapshot_memory.ptr == NULL)
996 return -ENOMEM;
997
998 device->snapshot = NULL;
999 device->snapshot_faultcount = 0;
1000 device->force_panic = 0;
1001 device->snapshot_crashdumper = 1;
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -06001002 device->snapshot_legacy = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001003
1004 ret = kobject_init_and_add(&device->snapshot_kobj, &ktype_snapshot,
1005 &device->dev->kobj, "snapshot");
1006 if (ret)
1007 goto done;
1008
1009 ret = sysfs_create_bin_file(&device->snapshot_kobj, &snapshot_attr);
1010 if (ret)
1011 goto done;
1012
1013 ret = sysfs_create_file(&device->snapshot_kobj, &attr_timestamp.attr);
1014 if (ret)
1015 goto done;
1016
1017 ret = sysfs_create_file(&device->snapshot_kobj, &attr_faultcount.attr);
1018 if (ret)
1019 goto done;
1020
1021 ret = sysfs_create_file(&device->snapshot_kobj,
1022 &attr_force_panic.attr);
1023 if (ret)
1024 goto done;
1025
1026 ret = sysfs_create_file(&device->snapshot_kobj,
1027 &attr_snapshot_crashdumper.attr);
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -06001028 if (ret)
1029 goto done;
1030
1031 ret = sysfs_create_file(&device->snapshot_kobj,
1032 &attr_snapshot_legacy.attr);
1033
Shrenuj Bansala419c792016-10-20 14:05:11 -07001034done:
1035 return ret;
1036}
1037EXPORT_SYMBOL(kgsl_device_snapshot_init);
1038
1039/**
1040 * kgsl_device_snapshot_close() - take down snapshot memory for a device
1041 * @device: Pointer to the kgsl_device
1042 *
1043 * Remove the sysfs files and free the memory allocated for the GPU
1044 * snapshot
1045 */
1046void kgsl_device_snapshot_close(struct kgsl_device *device)
1047{
1048 sysfs_remove_bin_file(&device->snapshot_kobj, &snapshot_attr);
1049 sysfs_remove_file(&device->snapshot_kobj, &attr_timestamp.attr);
1050
1051 kobject_put(&device->snapshot_kobj);
1052
1053 kfree(device->snapshot_memory.ptr);
1054
1055 device->snapshot_memory.ptr = NULL;
1056 device->snapshot_memory.size = 0;
1057 device->snapshot_faultcount = 0;
1058 device->force_panic = 0;
1059 device->snapshot_crashdumper = 1;
1060}
1061EXPORT_SYMBOL(kgsl_device_snapshot_close);
1062
1063/**
1064 * kgsl_snapshot_add_ib_obj_list() - Add a IB object list to the snapshot
1065 * object list
1066 * @device: the device that is being snapshotted
1067 * @ib_obj_list: The IB list that has objects required to execute an IB
1068 * @num_objs: Number of IB objects
1069 * @ptbase: The pagetable base in which the IB is mapped
1070 *
1071 * Adds a new IB to the list of IB objects maintained when getting snapshot
1072 * Returns 0 on success else -ENOMEM on error
1073 */
1074int kgsl_snapshot_add_ib_obj_list(struct kgsl_snapshot *snapshot,
1075 struct adreno_ib_object_list *ib_obj_list)
1076{
1077 struct kgsl_snapshot_cp_obj *obj;
1078
1079 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
1080 if (!obj)
1081 return -ENOMEM;
1082 obj->ib_obj_list = ib_obj_list;
1083 list_add(&obj->node, &snapshot->cp_list);
1084 return 0;
1085}
1086
1087static size_t _mempool_add_object(struct kgsl_snapshot *snapshot, u8 *data,
1088 struct kgsl_snapshot_object *obj)
1089{
1090 struct kgsl_snapshot_section_header *section =
1091 (struct kgsl_snapshot_section_header *)data;
1092 struct kgsl_snapshot_gpu_object_v2 *header =
1093 (struct kgsl_snapshot_gpu_object_v2 *)(data + sizeof(*section));
1094 u8 *dest = data + sizeof(*section) + sizeof(*header);
1095 uint64_t size;
1096
1097 size = obj->size;
1098
1099 if (!kgsl_memdesc_map(&obj->entry->memdesc)) {
1100 KGSL_CORE_ERR("snapshot: failed to map GPU object\n");
1101 return 0;
1102 }
1103
1104 section->magic = SNAPSHOT_SECTION_MAGIC;
1105 section->id = KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2;
1106 section->size = size + sizeof(*header) + sizeof(*section);
1107
1108 header->size = size >> 2;
1109 header->gpuaddr = obj->gpuaddr;
1110 header->ptbase =
1111 kgsl_mmu_pagetable_get_ttbr0(obj->entry->priv->pagetable);
1112 header->type = obj->type;
1113
1114 if (kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
1115 snapshot->ib1base, snapshot->ib1size))
1116 snapshot->ib1dumped = true;
1117
1118 if (kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
1119 snapshot->ib2base, snapshot->ib2size))
1120 snapshot->ib2dumped = true;
1121
1122 memcpy(dest, obj->entry->memdesc.hostptr + obj->offset, size);
1123 kgsl_memdesc_unmap(&obj->entry->memdesc);
1124
1125 return section->size;
1126}
1127
1128/**
1129 * kgsl_snapshot_save_frozen_objs() - Save the objects frozen in snapshot into
1130 * memory so that the data reported in these objects is correct when snapshot
1131 * is taken
1132 * @work: The work item that scheduled this work
1133 */
1134void kgsl_snapshot_save_frozen_objs(struct work_struct *work)
1135{
1136 struct kgsl_snapshot *snapshot = container_of(work,
1137 struct kgsl_snapshot, work);
1138 struct kgsl_device *device = kgsl_get_device(KGSL_DEVICE_3D0);
1139 struct kgsl_snapshot_object *obj, *tmp;
1140 size_t size = 0;
1141 void *ptr;
1142
1143 if (IS_ERR_OR_NULL(device))
1144 return;
1145
1146 kgsl_snapshot_process_ib_obj_list(snapshot);
1147
1148 list_for_each_entry(obj, &snapshot->obj_list, node) {
1149 obj->size = ALIGN(obj->size, 4);
1150
1151 size += ((size_t) obj->size +
1152 sizeof(struct kgsl_snapshot_gpu_object_v2) +
1153 sizeof(struct kgsl_snapshot_section_header));
1154 }
1155
1156 if (size == 0)
1157 goto done;
1158
1159 snapshot->mempool = vmalloc(size);
1160
1161 ptr = snapshot->mempool;
1162 snapshot->mempool_size = 0;
1163
1164 /* even if vmalloc fails, make sure we clean up the obj_list */
1165 list_for_each_entry_safe(obj, tmp, &snapshot->obj_list, node) {
1166 if (snapshot->mempool) {
1167 size_t ret = _mempool_add_object(snapshot, ptr, obj);
1168
1169 ptr += ret;
1170 snapshot->mempool_size += ret;
1171 }
1172
1173 kgsl_snapshot_put_object(obj);
1174 }
1175done:
1176 /*
1177 * Get rid of the process struct here, so that it doesn't sit
1178 * around until someone bothers to read the snapshot file.
1179 */
1180 kgsl_process_private_put(snapshot->process);
1181 snapshot->process = NULL;
1182
1183 if (snapshot->ib1base && !snapshot->ib1dumped)
1184 KGSL_DRV_ERR(device,
1185 "snapshot: Active IB1:%016llx not dumped\n",
1186 snapshot->ib1base);
1187 else if (snapshot->ib2base && !snapshot->ib2dumped)
1188 KGSL_DRV_ERR(device,
1189 "snapshot: Active IB2:%016llx not dumped\n",
1190 snapshot->ib2base);
1191
1192 complete_all(&snapshot->dump_gate);
1193 BUG_ON(device->force_panic);
1194}