blob: 40d239c1b6bca7b0ec44203ee448a387d29f9d1b [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/export.h>
14#include <linux/time.h>
15#include <linux/sysfs.h>
16#include <linux/utsname.h>
17#include <linux/sched.h>
18#include <linux/idr.h>
19
20#include "kgsl.h"
21#include "kgsl_log.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24#include "kgsl_snapshot.h"
25#include "adreno_cp_parser.h"
26
27/* Placeholder for list of ib objects that contain all objects in that IB */
28
29struct kgsl_snapshot_cp_obj {
30 struct adreno_ib_object_list *ib_obj_list;
31 struct list_head node;
32};
33
34struct snapshot_obj_itr {
35 u8 *buf; /* Buffer pointer to write to */
36 int pos; /* Current position in the sequence */
37 loff_t offset; /* file offset to start writing from */
38 size_t remain; /* Bytes remaining in buffer */
39 size_t write; /* Bytes written so far */
40};
41
42static void obj_itr_init(struct snapshot_obj_itr *itr, u8 *buf,
43 loff_t offset, size_t remain)
44{
45 itr->buf = buf;
46 itr->offset = offset;
47 itr->remain = remain;
48 itr->pos = 0;
49 itr->write = 0;
50}
51
52static int obj_itr_out(struct snapshot_obj_itr *itr, void *src, int size)
53{
54 if (itr->remain == 0)
55 return 0;
56
57 if ((itr->pos + size) <= itr->offset)
58 goto done;
59
60 /* Handle the case that offset is in the middle of the buffer */
61
62 if (itr->offset > itr->pos) {
63 src += (itr->offset - itr->pos);
64 size -= (itr->offset - itr->pos);
65
66 /* Advance pos to the offset start */
67 itr->pos = itr->offset;
68 }
69
70 if (size > itr->remain)
71 size = itr->remain;
72
73 memcpy(itr->buf, src, size);
74
75 itr->buf += size;
76 itr->write += size;
77 itr->remain -= size;
78
79done:
80 itr->pos += size;
81 return size;
82}
83
84/* idr_for_each function to count the number of contexts */
85
86static int snapshot_context_count(int id, void *ptr, void *data)
87{
88 int *count = data;
89 *count = *count + 1;
90
91 return 0;
92}
93
94/*
95 * To simplify the iterator loop use a global pointer instead of trying
96 * to pass around double star references to the snapshot data
97 */
98
99static u8 *_ctxtptr;
100
101static int snapshot_context_info(int id, void *ptr, void *data)
102{
103 struct kgsl_snapshot_linux_context_v2 *header =
104 (struct kgsl_snapshot_linux_context_v2 *)_ctxtptr;
105 struct kgsl_context *context = ptr;
106 struct kgsl_device *device;
107
108 device = context->device;
109
110 header->id = id;
111
112 /* Future-proof for per-context timestamps - for now, just
113 * return the global timestamp for all contexts
114 */
115
116 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
117 &header->timestamp_queued);
118 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED,
119 &header->timestamp_consumed);
120 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
121 &header->timestamp_retired);
122
123 _ctxtptr += sizeof(struct kgsl_snapshot_linux_context_v2);
124
125 return 0;
126}
127
128/* Snapshot the Linux specific information */
129static size_t snapshot_os(struct kgsl_device *device,
130 u8 *buf, size_t remain, void *priv)
131{
132 struct kgsl_snapshot_linux_v2 *header =
133 (struct kgsl_snapshot_linux_v2 *)buf;
134 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
135 int ctxtcount = 0;
136 size_t size = sizeof(*header);
137 struct kgsl_context *context;
138
139 /*
140 * Figure out how many active contexts there are - these will
141 * be appended on the end of the structure
142 */
143
144 read_lock(&device->context_lock);
145 idr_for_each(&device->context_idr, snapshot_context_count, &ctxtcount);
146 read_unlock(&device->context_lock);
147
148 size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context_v2);
149
150 /* Make sure there is enough room for the data */
151 if (remain < size) {
152 SNAPSHOT_ERR_NOMEM(device, "OS");
153 return 0;
154 }
155
156 memset(header, 0, sizeof(*header));
157
158 header->osid = KGSL_SNAPSHOT_OS_LINUX_V3;
159
160 /* Get the kernel build information */
Deepak Kumar86bc8842017-02-23 17:49:37 +0530161 strlcpy(header->release, init_utsname()->release,
162 sizeof(header->release));
163 strlcpy(header->version, init_utsname()->version,
164 sizeof(header->version));
Shrenuj Bansala419c792016-10-20 14:05:11 -0700165
166 /* Get the Unix time for the timestamp */
167 header->seconds = get_seconds();
168
169 /* Remember the power information */
170 header->power_flags = pwr->power_flags;
171 header->power_level = pwr->active_pwrlevel;
172 header->power_interval_timeout = pwr->interval_timeout;
173 header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]);
174
175 /*
176 * Save the last active context from global index since its more
177 * reliable than currrent RB index
178 */
179 kgsl_sharedmem_readl(&device->memstore, &header->current_context,
180 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
181
182 context = kgsl_context_get(device, header->current_context);
183
184 /* Get the current PT base */
185 header->ptbase = kgsl_mmu_get_current_ttbr0(&device->mmu);
186
187 /* And the PID for the task leader */
188 if (context) {
189 header->pid = context->tid;
190 strlcpy(header->comm, context->proc_priv->comm,
191 sizeof(header->comm));
192 kgsl_context_put(context);
193 context = NULL;
194 }
195
196 header->ctxtcount = ctxtcount;
197
198 _ctxtptr = buf + sizeof(*header);
199 /* append information for each context */
200
201 read_lock(&device->context_lock);
202 idr_for_each(&device->context_idr, snapshot_context_info, NULL);
203 read_unlock(&device->context_lock);
204
205 /* Return the size of the data segment */
206 return size;
207}
208
209static void kgsl_snapshot_put_object(struct kgsl_snapshot_object *obj)
210{
211 list_del(&obj->node);
212
213 obj->entry->memdesc.priv &= ~KGSL_MEMDESC_FROZEN;
214 kgsl_mem_entry_put(obj->entry);
215
216 kfree(obj);
217}
218
219/**
220 * kgsl_snapshot_have_object() - return 1 if the object has been processed
221 * @snapshot: the snapshot data
222 * @process: The process that owns the the object to freeze
223 * @gpuaddr: The gpu address of the object to freeze
224 * @size: the size of the object (may not always be the size of the region)
225 *
226 * Return 1 if the object is already in the list - this can save us from
227 * having to parse the same thing over again. There are 2 lists that are
228 * tracking objects so check for the object in both lists
229 */
230int kgsl_snapshot_have_object(struct kgsl_snapshot *snapshot,
231 struct kgsl_process_private *process,
232 uint64_t gpuaddr, uint64_t size)
233{
234 struct kgsl_snapshot_object *obj;
235 struct kgsl_snapshot_cp_obj *obj_cp;
236 struct adreno_ib_object *ib_obj;
237 int i;
238
239 /* Check whether the object is tracked already in ib list */
240 list_for_each_entry(obj_cp, &snapshot->cp_list, node) {
241 if (obj_cp->ib_obj_list == NULL
242 || obj_cp->ib_obj_list->num_objs == 0)
243 continue;
244
245 ib_obj = &(obj_cp->ib_obj_list->obj_list[0]);
246 if (ib_obj->entry == NULL || ib_obj->entry->priv != process)
247 continue;
248
249 for (i = 0; i < obj_cp->ib_obj_list->num_objs; i++) {
250 ib_obj = &(obj_cp->ib_obj_list->obj_list[i]);
251 if ((gpuaddr >= ib_obj->gpuaddr) &&
252 ((gpuaddr + size) <=
253 (ib_obj->gpuaddr + ib_obj->size)))
254 return 1;
255 }
256 }
257
258 list_for_each_entry(obj, &snapshot->obj_list, node) {
259 if (obj->entry == NULL || obj->entry->priv != process)
260 continue;
261
262 if ((gpuaddr >= obj->gpuaddr) &&
263 ((gpuaddr + size) <= (obj->gpuaddr + obj->size)))
264 return 1;
265 }
266
267 return 0;
268}
269EXPORT_SYMBOL(kgsl_snapshot_have_object);
270
271/**
272 * kgsl_snapshot_get_object() - Mark a GPU buffer to be frozen
273 * @snapshot: The snapshot data
274 * @process: The process that owns the object we want to freeze
275 * @gpuaddr: The gpu address of the object to freeze
276 * @size: the size of the object (may not always be the size of the region)
277 * @type: the type of object being saved (shader, vbo, etc)
278 *
279 * Mark and freeze a GPU buffer object. This will prevent it from being
280 * freed until it can be copied out as part of the snapshot dump. Returns the
281 * size of the object being frozen
282 */
283int kgsl_snapshot_get_object(struct kgsl_snapshot *snapshot,
284 struct kgsl_process_private *process, uint64_t gpuaddr,
285 uint64_t size, unsigned int type)
286{
287 struct kgsl_mem_entry *entry;
288 struct kgsl_snapshot_object *obj;
289 uint64_t offset;
290 int ret = -EINVAL;
291 unsigned int mem_type;
292
293 if (!gpuaddr)
294 return 0;
295
296 entry = kgsl_sharedmem_find(process, gpuaddr);
297
298 if (entry == NULL) {
299 KGSL_CORE_ERR("Unable to find GPU buffer 0x%016llX\n", gpuaddr);
300 return -EINVAL;
301 }
302
303 /* We can't freeze external memory, because we don't own it */
304 if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_MASK)
305 goto err_put;
306 /*
307 * Do not save texture and render targets in snapshot,
308 * they can be just too big
309 */
310
311 mem_type = kgsl_memdesc_get_memtype(&entry->memdesc);
312 if (mem_type == KGSL_MEMTYPE_TEXTURE ||
313 mem_type == KGSL_MEMTYPE_EGL_SURFACE ||
314 mem_type == KGSL_MEMTYPE_EGL_IMAGE) {
315 ret = 0;
316 goto err_put;
317 }
318
319 /* Do not save sparse memory */
320 if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT ||
321 entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_PHYS) {
322 ret = 0;
323 goto err_put;
324 }
325
326 /*
327 * size indicates the number of bytes in the region to save. This might
328 * not always be the entire size of the region because some buffers are
329 * sub-allocated from a larger region. However, if size 0 was passed
330 * thats a flag that the caller wants to capture the entire buffer
331 */
332
333 if (size == 0) {
334 size = entry->memdesc.size;
335 offset = 0;
336
337 /* Adjust the gpuaddr to the start of the object */
338 gpuaddr = entry->memdesc.gpuaddr;
339 } else {
340 offset = gpuaddr - entry->memdesc.gpuaddr;
341 }
342
343 if (size + offset > entry->memdesc.size) {
344 KGSL_CORE_ERR("Invalid size for GPU buffer 0x%016llX\n",
345 gpuaddr);
346 goto err_put;
347 }
348
349 /* If the buffer is already on the list, skip it */
350 list_for_each_entry(obj, &snapshot->obj_list, node) {
351 /* combine the range with existing object if they overlap */
352 if (obj->entry->priv == process && obj->type == type &&
353 kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
354 gpuaddr, size)) {
355 uint64_t end1 = obj->gpuaddr + obj->size;
356 uint64_t end2 = gpuaddr + size;
357
358 if (obj->gpuaddr > gpuaddr)
359 obj->gpuaddr = gpuaddr;
360 if (end1 > end2)
361 obj->size = end1 - obj->gpuaddr;
362 else
363 obj->size = end2 - obj->gpuaddr;
364 obj->offset = obj->gpuaddr - entry->memdesc.gpuaddr;
365 ret = 0;
366 goto err_put;
367 }
368 }
369
370 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
371
372 if (obj == NULL)
373 goto err_put;
374
375 obj->type = type;
376 obj->entry = entry;
377 obj->gpuaddr = gpuaddr;
378 obj->size = size;
379 obj->offset = offset;
380
381 list_add(&obj->node, &snapshot->obj_list);
382
383 /*
384 * Return the size of the entire mem entry that was frozen - this gets
385 * used for tracking how much memory is frozen for a hang. Also, mark
386 * the memory entry as frozen. If the entry was already marked as
387 * frozen, then another buffer already got to it. In that case, return
388 * 0 so it doesn't get counted twice
389 */
390
391 ret = (entry->memdesc.priv & KGSL_MEMDESC_FROZEN) ? 0
392 : entry->memdesc.size;
393
394 entry->memdesc.priv |= KGSL_MEMDESC_FROZEN;
395
396 return ret;
397err_put:
398 kgsl_mem_entry_put(entry);
399 return ret;
400}
401EXPORT_SYMBOL(kgsl_snapshot_get_object);
402
403/**
404 * kgsl_snapshot_dump_registers - helper function to dump device registers
405 * @device - the device to dump registers from
406 * @snapshot - pointer to the start of the region of memory for the snapshot
407 * @remain - a pointer to the number of bytes remaining in the snapshot
408 * @priv - A pointer to the kgsl_snapshot_registers data
409 *
410 * Given an array of register ranges pairs (start,end [inclusive]), dump the
411 * registers into a snapshot register section. The snapshot region stores a
412 * part of dwords for each register - the word address of the register, and
413 * the value.
414 */
415size_t kgsl_snapshot_dump_registers(struct kgsl_device *device, u8 *buf,
416 size_t remain, void *priv)
417{
418 struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
419 struct kgsl_snapshot_registers *regs = priv;
420 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
421 int count = 0, j, k;
422
423 /* Figure out how many registers we are going to dump */
424
425 for (j = 0; j < regs->count; j++) {
426 int start = regs->regs[j * 2];
427 int end = regs->regs[j * 2 + 1];
428
429 count += (end - start + 1);
430 }
431
432 if (remain < (count * 8) + sizeof(*header)) {
433 SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
434 return 0;
435 }
436
437 for (j = 0; j < regs->count; j++) {
438 unsigned int start = regs->regs[j * 2];
439 unsigned int end = regs->regs[j * 2 + 1];
440
441 for (k = start; k <= end; k++) {
442 unsigned int val;
443
444 kgsl_regread(device, k, &val);
445 *data++ = k;
446 *data++ = val;
447 }
448 }
449
450 header->count = count;
451
452 /* Return the size of the section */
453 return (count * 8) + sizeof(*header);
454}
455EXPORT_SYMBOL(kgsl_snapshot_dump_registers);
456
457struct kgsl_snapshot_indexed_registers {
458 unsigned int index;
459 unsigned int data;
460 unsigned int start;
461 unsigned int count;
462};
463
464static size_t kgsl_snapshot_dump_indexed_regs(struct kgsl_device *device,
465 u8 *buf, size_t remain, void *priv)
466{
467 struct kgsl_snapshot_indexed_registers *iregs = priv;
468 struct kgsl_snapshot_indexed_regs *header =
469 (struct kgsl_snapshot_indexed_regs *)buf;
470 unsigned int *data = (unsigned int *)(buf + sizeof(*header));
471 int i;
472
473 if (remain < (iregs->count * 4) + sizeof(*header)) {
474 SNAPSHOT_ERR_NOMEM(device, "INDEXED REGS");
475 return 0;
476 }
477
478 header->index_reg = iregs->index;
479 header->data_reg = iregs->data;
480 header->count = iregs->count;
481 header->start = iregs->start;
482
483 for (i = 0; i < iregs->count; i++) {
484 kgsl_regwrite(device, iregs->index, iregs->start + i);
485 kgsl_regread(device, iregs->data, &data[i]);
486 }
487
488 return (iregs->count * 4) + sizeof(*header);
489}
490
491/**
492 * kgsl_snapshot_indexed_registers - Add a set of indexed registers to the
493 * snapshot
494 * @device: Pointer to the KGSL device being snapshotted
495 * @snapshot: Snapshot instance
496 * @index: Offset for the index register
497 * @data: Offset for the data register
498 * @start: Index to start reading
499 * @count: Number of entries to read
500 *
501 * Dump the values from an indexed register group into the snapshot
502 */
503void kgsl_snapshot_indexed_registers(struct kgsl_device *device,
504 struct kgsl_snapshot *snapshot,
505 unsigned int index, unsigned int data,
506 unsigned int start,
507 unsigned int count)
508{
509 struct kgsl_snapshot_indexed_registers iregs;
510
511 iregs.index = index;
512 iregs.data = data;
513 iregs.start = start;
514 iregs.count = count;
515
516 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_INDEXED_REGS,
517 snapshot, kgsl_snapshot_dump_indexed_regs, &iregs);
518}
519EXPORT_SYMBOL(kgsl_snapshot_indexed_registers);
520
521/**
522 * kgsl_snapshot_add_section() - Add a new section to the GPU snapshot
523 * @device: the KGSL device being snapshotted
524 * @id: the section id
525 * @snapshot: pointer to the snapshot instance
526 * @func: Function pointer to fill the section
527 * @priv: Private pointer to pass to the function
528 *
529 * Set up a KGSL snapshot header by filling the memory with the callback
530 * function and adding the standard section header
531 */
532void kgsl_snapshot_add_section(struct kgsl_device *device, u16 id,
533 struct kgsl_snapshot *snapshot,
534 size_t (*func)(struct kgsl_device *, u8 *, size_t, void *),
535 void *priv)
536{
537 struct kgsl_snapshot_section_header *header =
538 (struct kgsl_snapshot_section_header *)snapshot->ptr;
539 u8 *data = snapshot->ptr + sizeof(*header);
540 size_t ret = 0;
541
542 /*
543 * Sanity check to make sure there is enough for the header. The
544 * callback will check to make sure there is enough for the rest
545 * of the data. If there isn't enough room then don't advance the
546 * pointer.
547 */
548
549 if (snapshot->remain < sizeof(*header))
550 return;
551
552 /* It is legal to have no function (i.e. - make an empty section) */
553 if (func) {
554 ret = func(device, data, snapshot->remain - sizeof(*header),
555 priv);
556
557 /*
558 * If there wasn't enough room for the data then don't bother
559 * setting up the header.
560 */
561
562 if (ret == 0)
563 return;
564 }
565
566 header->magic = SNAPSHOT_SECTION_MAGIC;
567 header->id = id;
568 header->size = ret + sizeof(*header);
569
570 snapshot->ptr += header->size;
571 snapshot->remain -= header->size;
572 snapshot->size += header->size;
573}
574
575/**
576 * kgsl_snapshot() - construct a device snapshot
577 * @device: device to snapshot
578 * @context: the context that is hung, might be NULL if unknown.
579 *
580 * Given a device, construct a binary snapshot dump of the current device state
581 * and store it in the device snapshot memory.
582 */
583void kgsl_device_snapshot(struct kgsl_device *device,
584 struct kgsl_context *context)
585{
586 struct kgsl_snapshot_header *header = device->snapshot_memory.ptr;
587 struct kgsl_snapshot *snapshot;
588 struct timespec boot;
589 phys_addr_t pa;
590
591 if (device->snapshot_memory.ptr == NULL) {
592 KGSL_DRV_ERR(device,
593 "snapshot: no snapshot memory available\n");
594 return;
595 }
596
597 if (WARN(!kgsl_state_is_awake(device),
598 "snapshot: device is powered off\n"))
599 return;
600
601 /* increment the hang count for good book keeping */
602 device->snapshot_faultcount++;
603
604 /*
605 * The first hang is always the one we are interested in. Don't capture
606 * a new snapshot instance if the old one hasn't been grabbed yet
607 */
608 if (device->snapshot != NULL)
609 return;
610
611 /* Allocate memory for the snapshot instance */
612 snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
613 if (snapshot == NULL)
614 return;
615
616 init_completion(&snapshot->dump_gate);
617 INIT_LIST_HEAD(&snapshot->obj_list);
618 INIT_LIST_HEAD(&snapshot->cp_list);
619 INIT_WORK(&snapshot->work, kgsl_snapshot_save_frozen_objs);
620
621 snapshot->start = device->snapshot_memory.ptr;
622 snapshot->ptr = device->snapshot_memory.ptr;
623 snapshot->remain = device->snapshot_memory.size;
624 atomic_set(&snapshot->sysfs_read, 0);
625
626 header = (struct kgsl_snapshot_header *) snapshot->ptr;
627
628 header->magic = SNAPSHOT_MAGIC;
629 header->gpuid = kgsl_gpuid(device, &header->chipid);
630
631 snapshot->ptr += sizeof(*header);
632 snapshot->remain -= sizeof(*header);
633 snapshot->size += sizeof(*header);
634
635 /* Build the Linux specific header */
636 kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS,
637 snapshot, snapshot_os, NULL);
638
639 /* Get the device specific sections */
640 if (device->ftbl->snapshot)
641 device->ftbl->snapshot(device, snapshot, context);
642
643 /*
644 * The timestamp is the seconds since boot so it is easier to match to
645 * the kernel log
646 */
647
648 getboottime(&boot);
649 snapshot->timestamp = get_seconds() - boot.tv_sec;
650
651 /* Store the instance in the device until it gets dumped */
652 device->snapshot = snapshot;
653
654 /* log buffer info to aid in ramdump fault tolerance */
655 pa = __pa(device->snapshot_memory.ptr);
656 KGSL_DRV_ERR(device, "snapshot created at pa %pa size %zd\n",
657 &pa, snapshot->size);
658
659 sysfs_notify(&device->snapshot_kobj, NULL, "timestamp");
660
661 /*
662 * Queue a work item that will save the IB data in snapshot into
663 * static memory to prevent loss of data due to overwriting of
664 * memory.
665 *
666 */
667 kgsl_schedule_work(&snapshot->work);
668}
669EXPORT_SYMBOL(kgsl_device_snapshot);
670
671/* An attribute for showing snapshot details */
672struct kgsl_snapshot_attribute {
673 struct attribute attr;
674 ssize_t (*show)(struct kgsl_device *device, char *buf);
675 ssize_t (*store)(struct kgsl_device *device, const char *buf,
676 size_t count);
677};
678
679/**
680 * kgsl_snapshot_process_ib_obj_list() - Go through the list of IB's which need
681 * to be dumped for snapshot and move them to the global snapshot list so
682 * they will get dumped when the global list is dumped
683 * @device: device being snapshotted
684 */
685static void kgsl_snapshot_process_ib_obj_list(struct kgsl_snapshot *snapshot)
686{
687 struct kgsl_snapshot_cp_obj *obj, *obj_temp;
688 struct adreno_ib_object *ib_obj;
689 int i;
690
691 list_for_each_entry_safe(obj, obj_temp, &snapshot->cp_list,
692 node) {
693 for (i = 0; i < obj->ib_obj_list->num_objs; i++) {
694 ib_obj = &(obj->ib_obj_list->obj_list[i]);
695 kgsl_snapshot_get_object(snapshot, ib_obj->entry->priv,
696 ib_obj->gpuaddr, ib_obj->size,
697 ib_obj->snapshot_obj_type);
698 }
699 list_del(&obj->node);
700 adreno_ib_destroy_obj_list(obj->ib_obj_list);
701 kfree(obj);
702 }
703}
704
705#define to_snapshot_attr(a) \
706container_of(a, struct kgsl_snapshot_attribute, attr)
707
708#define kobj_to_device(a) \
709container_of(a, struct kgsl_device, snapshot_kobj)
710
711/* Dump the sysfs binary data to the user */
712static ssize_t snapshot_show(struct file *filep, struct kobject *kobj,
713 struct bin_attribute *attr, char *buf, loff_t off,
714 size_t count)
715{
716 struct kgsl_device *device = kobj_to_device(kobj);
717 struct kgsl_snapshot *snapshot;
718 struct kgsl_snapshot_object *obj, *tmp;
719 struct kgsl_snapshot_section_header head;
720 struct snapshot_obj_itr itr;
721 int ret;
722
723 if (device == NULL)
724 return 0;
725
726 mutex_lock(&device->mutex);
727 snapshot = device->snapshot;
728 if (snapshot != NULL)
729 atomic_inc(&snapshot->sysfs_read);
730 mutex_unlock(&device->mutex);
731
732 /* Return nothing if we haven't taken a snapshot yet */
733 if (snapshot == NULL)
734 return 0;
735
736 /*
737 * Wait for the dump worker to finish. This is interruptible
738 * to allow userspace to bail if things go horribly wrong.
739 */
740 ret = wait_for_completion_interruptible(&snapshot->dump_gate);
741 if (ret) {
742 atomic_dec(&snapshot->sysfs_read);
743 return ret;
744 }
745
746 obj_itr_init(&itr, buf, off, count);
747
748 ret = obj_itr_out(&itr, snapshot->start, snapshot->size);
749 if (ret == 0)
750 goto done;
751
752 /* Dump the memory pool if it exists */
753 if (snapshot->mempool) {
754 ret = obj_itr_out(&itr, snapshot->mempool,
755 snapshot->mempool_size);
756 if (ret == 0)
757 goto done;
758 }
759
760 {
761 head.magic = SNAPSHOT_SECTION_MAGIC;
762 head.id = KGSL_SNAPSHOT_SECTION_END;
763 head.size = sizeof(head);
764
765 obj_itr_out(&itr, &head, sizeof(head));
766 }
767
768 /*
769 * Make sure everything has been written out before destroying things.
770 * The best way to confirm this is to go all the way through without
771 * writing any bytes - so only release if we get this far and
772 * itr->write is 0 and there are no concurrent reads pending
773 */
774
775 if (itr.write == 0) {
776 bool snapshot_free = false;
777
778 mutex_lock(&device->mutex);
779 if (atomic_dec_and_test(&snapshot->sysfs_read)) {
780 device->snapshot = NULL;
781 snapshot_free = true;
782 }
783 mutex_unlock(&device->mutex);
784
785 if (snapshot_free) {
786 list_for_each_entry_safe(obj, tmp,
787 &snapshot->obj_list, node)
788 kgsl_snapshot_put_object(obj);
789
790 if (snapshot->mempool)
791 vfree(snapshot->mempool);
792
793 kfree(snapshot);
794 KGSL_CORE_ERR("snapshot: objects released\n");
795 }
796 return 0;
797 }
798
799done:
800 atomic_dec(&snapshot->sysfs_read);
801 return itr.write;
802}
803
804/* Show the total number of hangs since device boot */
805static ssize_t faultcount_show(struct kgsl_device *device, char *buf)
806{
807 return snprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_faultcount);
808}
809
810/* Reset the total number of hangs since device boot */
811static ssize_t faultcount_store(struct kgsl_device *device, const char *buf,
812 size_t count)
813{
814 if (device && count > 0)
815 device->snapshot_faultcount = 0;
816
817 return count;
818}
819
820/* Show the force_panic request status */
821static ssize_t force_panic_show(struct kgsl_device *device, char *buf)
822{
823 return snprintf(buf, PAGE_SIZE, "%d\n", device->force_panic);
824}
825
826/* Store the panic request value to force_panic */
827static ssize_t force_panic_store(struct kgsl_device *device, const char *buf,
828 size_t count)
829{
830 unsigned int val = 0;
831 int ret;
832
833 if (device && count > 0)
834 device->force_panic = 0;
835
836 ret = kgsl_sysfs_store(buf, &val);
837
838 if (!ret && device)
839 device->force_panic = (bool)val;
840
841 return (ssize_t) ret < 0 ? ret : count;
842}
843
844/* Show the snapshot_crashdumper request status */
845static ssize_t snapshot_crashdumper_show(struct kgsl_device *device, char *buf)
846{
847 return snprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_crashdumper);
848}
849
850
851/* Store the value to snapshot_crashdumper*/
852static ssize_t snapshot_crashdumper_store(struct kgsl_device *device,
853 const char *buf, size_t count)
854{
855 unsigned int val = 0;
856 int ret;
857
858 if (device && count > 0)
859 device->snapshot_crashdumper = 1;
860
861 ret = kgsl_sysfs_store(buf, &val);
862
863 if (!ret && device)
864 device->snapshot_crashdumper = (bool)val;
865
866 return (ssize_t) ret < 0 ? ret : count;
867}
868
869/* Show the timestamp of the last collected snapshot */
870static ssize_t timestamp_show(struct kgsl_device *device, char *buf)
871{
872 unsigned long timestamp =
873 device->snapshot ? device->snapshot->timestamp : 0;
874
875 return snprintf(buf, PAGE_SIZE, "%lu\n", timestamp);
876}
877
878static struct bin_attribute snapshot_attr = {
879 .attr.name = "dump",
880 .attr.mode = 0444,
881 .size = 0,
882 .read = snapshot_show
883};
884
885#define SNAPSHOT_ATTR(_name, _mode, _show, _store) \
886struct kgsl_snapshot_attribute attr_##_name = { \
887 .attr = { .name = __stringify(_name), .mode = _mode }, \
888 .show = _show, \
889 .store = _store, \
890}
891
892static SNAPSHOT_ATTR(timestamp, 0444, timestamp_show, NULL);
893static SNAPSHOT_ATTR(faultcount, 0644, faultcount_show, faultcount_store);
894static SNAPSHOT_ATTR(force_panic, 0644, force_panic_show, force_panic_store);
895static SNAPSHOT_ATTR(snapshot_crashdumper, 0644, snapshot_crashdumper_show,
896 snapshot_crashdumper_store);
897
898static ssize_t snapshot_sysfs_show(struct kobject *kobj,
899 struct attribute *attr, char *buf)
900{
901 struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr);
902 struct kgsl_device *device = kobj_to_device(kobj);
903 ssize_t ret;
904
905 if (device && pattr->show)
906 ret = pattr->show(device, buf);
907 else
908 ret = -EIO;
909
910 return ret;
911}
912
913static ssize_t snapshot_sysfs_store(struct kobject *kobj,
914 struct attribute *attr, const char *buf, size_t count)
915{
916 struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr);
917 struct kgsl_device *device = kobj_to_device(kobj);
918 ssize_t ret;
919
920 if (device && pattr->store)
921 ret = pattr->store(device, buf, count);
922 else
923 ret = -EIO;
924
925 return ret;
926}
927
928static const struct sysfs_ops snapshot_sysfs_ops = {
929 .show = snapshot_sysfs_show,
930 .store = snapshot_sysfs_store,
931};
932
933static struct kobj_type ktype_snapshot = {
934 .sysfs_ops = &snapshot_sysfs_ops,
935};
936
937/**
938 * kgsl_device_snapshot_init() - add resources for the device GPU snapshot
939 * @device: The device to initialize
940 *
941 * Allocate memory for a GPU snapshot for the specified device,
942 * and create the sysfs files to manage it
943 */
944int kgsl_device_snapshot_init(struct kgsl_device *device)
945{
946 int ret;
947
948 if (kgsl_property_read_u32(device, "qcom,snapshot-size",
949 (unsigned int *) &(device->snapshot_memory.size)))
950 device->snapshot_memory.size = KGSL_SNAPSHOT_MEMSIZE;
951
952 /*
953 * Choosing a memory size of 0 is essentially the same as disabling
954 * snapshotting
955 */
956 if (device->snapshot_memory.size == 0)
957 return 0;
958
959 /*
960 * I'm not sure why anybody would choose to do so but make sure
961 * that we can at least fit the snapshot header in the requested
962 * region
963 */
964 if (device->snapshot_memory.size < sizeof(struct kgsl_snapshot_header))
965 device->snapshot_memory.size =
966 sizeof(struct kgsl_snapshot_header);
967
968 device->snapshot_memory.ptr = kzalloc(device->snapshot_memory.size,
969 GFP_KERNEL);
970
971 if (device->snapshot_memory.ptr == NULL)
972 return -ENOMEM;
973
974 device->snapshot = NULL;
975 device->snapshot_faultcount = 0;
976 device->force_panic = 0;
977 device->snapshot_crashdumper = 1;
978
979 ret = kobject_init_and_add(&device->snapshot_kobj, &ktype_snapshot,
980 &device->dev->kobj, "snapshot");
981 if (ret)
982 goto done;
983
984 ret = sysfs_create_bin_file(&device->snapshot_kobj, &snapshot_attr);
985 if (ret)
986 goto done;
987
988 ret = sysfs_create_file(&device->snapshot_kobj, &attr_timestamp.attr);
989 if (ret)
990 goto done;
991
992 ret = sysfs_create_file(&device->snapshot_kobj, &attr_faultcount.attr);
993 if (ret)
994 goto done;
995
996 ret = sysfs_create_file(&device->snapshot_kobj,
997 &attr_force_panic.attr);
998 if (ret)
999 goto done;
1000
1001 ret = sysfs_create_file(&device->snapshot_kobj,
1002 &attr_snapshot_crashdumper.attr);
1003done:
1004 return ret;
1005}
1006EXPORT_SYMBOL(kgsl_device_snapshot_init);
1007
1008/**
1009 * kgsl_device_snapshot_close() - take down snapshot memory for a device
1010 * @device: Pointer to the kgsl_device
1011 *
1012 * Remove the sysfs files and free the memory allocated for the GPU
1013 * snapshot
1014 */
1015void kgsl_device_snapshot_close(struct kgsl_device *device)
1016{
1017 sysfs_remove_bin_file(&device->snapshot_kobj, &snapshot_attr);
1018 sysfs_remove_file(&device->snapshot_kobj, &attr_timestamp.attr);
1019
1020 kobject_put(&device->snapshot_kobj);
1021
1022 kfree(device->snapshot_memory.ptr);
1023
1024 device->snapshot_memory.ptr = NULL;
1025 device->snapshot_memory.size = 0;
1026 device->snapshot_faultcount = 0;
1027 device->force_panic = 0;
1028 device->snapshot_crashdumper = 1;
1029}
1030EXPORT_SYMBOL(kgsl_device_snapshot_close);
1031
1032/**
1033 * kgsl_snapshot_add_ib_obj_list() - Add a IB object list to the snapshot
1034 * object list
1035 * @device: the device that is being snapshotted
1036 * @ib_obj_list: The IB list that has objects required to execute an IB
1037 * @num_objs: Number of IB objects
1038 * @ptbase: The pagetable base in which the IB is mapped
1039 *
1040 * Adds a new IB to the list of IB objects maintained when getting snapshot
1041 * Returns 0 on success else -ENOMEM on error
1042 */
1043int kgsl_snapshot_add_ib_obj_list(struct kgsl_snapshot *snapshot,
1044 struct adreno_ib_object_list *ib_obj_list)
1045{
1046 struct kgsl_snapshot_cp_obj *obj;
1047
1048 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
1049 if (!obj)
1050 return -ENOMEM;
1051 obj->ib_obj_list = ib_obj_list;
1052 list_add(&obj->node, &snapshot->cp_list);
1053 return 0;
1054}
1055
1056static size_t _mempool_add_object(struct kgsl_snapshot *snapshot, u8 *data,
1057 struct kgsl_snapshot_object *obj)
1058{
1059 struct kgsl_snapshot_section_header *section =
1060 (struct kgsl_snapshot_section_header *)data;
1061 struct kgsl_snapshot_gpu_object_v2 *header =
1062 (struct kgsl_snapshot_gpu_object_v2 *)(data + sizeof(*section));
1063 u8 *dest = data + sizeof(*section) + sizeof(*header);
1064 uint64_t size;
1065
1066 size = obj->size;
1067
1068 if (!kgsl_memdesc_map(&obj->entry->memdesc)) {
1069 KGSL_CORE_ERR("snapshot: failed to map GPU object\n");
1070 return 0;
1071 }
1072
1073 section->magic = SNAPSHOT_SECTION_MAGIC;
1074 section->id = KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2;
1075 section->size = size + sizeof(*header) + sizeof(*section);
1076
1077 header->size = size >> 2;
1078 header->gpuaddr = obj->gpuaddr;
1079 header->ptbase =
1080 kgsl_mmu_pagetable_get_ttbr0(obj->entry->priv->pagetable);
1081 header->type = obj->type;
1082
1083 if (kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
1084 snapshot->ib1base, snapshot->ib1size))
1085 snapshot->ib1dumped = true;
1086
1087 if (kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
1088 snapshot->ib2base, snapshot->ib2size))
1089 snapshot->ib2dumped = true;
1090
1091 memcpy(dest, obj->entry->memdesc.hostptr + obj->offset, size);
1092 kgsl_memdesc_unmap(&obj->entry->memdesc);
1093
1094 return section->size;
1095}
1096
1097/**
1098 * kgsl_snapshot_save_frozen_objs() - Save the objects frozen in snapshot into
1099 * memory so that the data reported in these objects is correct when snapshot
1100 * is taken
1101 * @work: The work item that scheduled this work
1102 */
1103void kgsl_snapshot_save_frozen_objs(struct work_struct *work)
1104{
1105 struct kgsl_snapshot *snapshot = container_of(work,
1106 struct kgsl_snapshot, work);
1107 struct kgsl_device *device = kgsl_get_device(KGSL_DEVICE_3D0);
1108 struct kgsl_snapshot_object *obj, *tmp;
1109 size_t size = 0;
1110 void *ptr;
1111
1112 if (IS_ERR_OR_NULL(device))
1113 return;
1114
1115 kgsl_snapshot_process_ib_obj_list(snapshot);
1116
1117 list_for_each_entry(obj, &snapshot->obj_list, node) {
1118 obj->size = ALIGN(obj->size, 4);
1119
1120 size += ((size_t) obj->size +
1121 sizeof(struct kgsl_snapshot_gpu_object_v2) +
1122 sizeof(struct kgsl_snapshot_section_header));
1123 }
1124
1125 if (size == 0)
1126 goto done;
1127
1128 snapshot->mempool = vmalloc(size);
1129
1130 ptr = snapshot->mempool;
1131 snapshot->mempool_size = 0;
1132
1133 /* even if vmalloc fails, make sure we clean up the obj_list */
1134 list_for_each_entry_safe(obj, tmp, &snapshot->obj_list, node) {
1135 if (snapshot->mempool) {
1136 size_t ret = _mempool_add_object(snapshot, ptr, obj);
1137
1138 ptr += ret;
1139 snapshot->mempool_size += ret;
1140 }
1141
1142 kgsl_snapshot_put_object(obj);
1143 }
1144done:
1145 /*
1146 * Get rid of the process struct here, so that it doesn't sit
1147 * around until someone bothers to read the snapshot file.
1148 */
1149 kgsl_process_private_put(snapshot->process);
1150 snapshot->process = NULL;
1151
1152 if (snapshot->ib1base && !snapshot->ib1dumped)
1153 KGSL_DRV_ERR(device,
1154 "snapshot: Active IB1:%016llx not dumped\n",
1155 snapshot->ib1base);
1156 else if (snapshot->ib2base && !snapshot->ib2dumped)
1157 KGSL_DRV_ERR(device,
1158 "snapshot: Active IB2:%016llx not dumped\n",
1159 snapshot->ib2base);
1160
1161 complete_all(&snapshot->dump_gate);
1162 BUG_ON(device->force_panic);
1163}