blob: 9d839b4fd8f78678535373ecab81603697248639 [file] [log] [blame]
Lucas Stachf6ffbd42018-05-08 16:20:54 +02001// SPDX-License-Identifier: GPL-2.0
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01002/*
Lucas Stachf6ffbd42018-05-08 16:20:54 +02003 * Copyright (C) 2015-2018 Etnaviv Project
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01004 */
5
6#include <linux/devcoredump.h>
Lucas Stachea1f5722017-01-16 16:09:51 +01007#include "etnaviv_cmdbuf.h"
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01008#include "etnaviv_dump.h"
9#include "etnaviv_gem.h"
10#include "etnaviv_gpu.h"
11#include "etnaviv_mmu.h"
Lucas Stach6d7a20c2017-12-06 10:53:27 +010012#include "etnaviv_sched.h"
The etnaviv authorsa8c21a52015-12-03 18:21:29 +010013#include "state.xml.h"
14#include "state_hi.xml.h"
15
Lucas Stach6d7a20c2017-12-06 10:53:27 +010016static bool etnaviv_dump_core = true;
17module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
18
The etnaviv authorsa8c21a52015-12-03 18:21:29 +010019struct core_dump_iterator {
20 void *start;
21 struct etnaviv_dump_object_header *hdr;
22 void *data;
23};
24
25static const unsigned short etnaviv_dump_registers[] = {
26 VIVS_HI_AXI_STATUS,
27 VIVS_HI_CLOCK_CONTROL,
28 VIVS_HI_IDLE_STATE,
29 VIVS_HI_AXI_CONFIG,
30 VIVS_HI_INTR_ENBL,
31 VIVS_HI_CHIP_IDENTITY,
32 VIVS_HI_CHIP_FEATURE,
33 VIVS_HI_CHIP_MODEL,
34 VIVS_HI_CHIP_REV,
35 VIVS_HI_CHIP_DATE,
36 VIVS_HI_CHIP_TIME,
37 VIVS_HI_CHIP_MINOR_FEATURE_0,
38 VIVS_HI_CACHE_CONTROL,
39 VIVS_HI_AXI_CONTROL,
40 VIVS_PM_POWER_CONTROLS,
41 VIVS_PM_MODULE_CONTROLS,
42 VIVS_PM_MODULE_STATUS,
43 VIVS_PM_PULSE_EATER,
44 VIVS_MC_MMU_FE_PAGE_TABLE,
45 VIVS_MC_MMU_TX_PAGE_TABLE,
46 VIVS_MC_MMU_PE_PAGE_TABLE,
47 VIVS_MC_MMU_PEZ_PAGE_TABLE,
48 VIVS_MC_MMU_RA_PAGE_TABLE,
49 VIVS_MC_DEBUG_MEMORY,
50 VIVS_MC_MEMORY_BASE_ADDR_RA,
51 VIVS_MC_MEMORY_BASE_ADDR_FE,
52 VIVS_MC_MEMORY_BASE_ADDR_TX,
53 VIVS_MC_MEMORY_BASE_ADDR_PEZ,
54 VIVS_MC_MEMORY_BASE_ADDR_PE,
55 VIVS_MC_MEMORY_TIMING_CONTROL,
56 VIVS_MC_BUS_CONFIG,
57 VIVS_FE_DMA_STATUS,
58 VIVS_FE_DMA_DEBUG_STATE,
59 VIVS_FE_DMA_ADDRESS,
60 VIVS_FE_DMA_LOW,
61 VIVS_FE_DMA_HIGH,
62 VIVS_FE_AUTO_FLUSH,
63};
64
65static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
66 u32 type, void *data_end)
67{
68 struct etnaviv_dump_object_header *hdr = iter->hdr;
69
70 hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
71 hdr->type = cpu_to_le32(type);
72 hdr->file_offset = cpu_to_le32(iter->data - iter->start);
73 hdr->file_size = cpu_to_le32(data_end - iter->data);
74
75 iter->hdr++;
76 iter->data += hdr->file_size;
77}
78
79static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
80 struct etnaviv_gpu *gpu)
81{
82 struct etnaviv_dump_registers *reg = iter->data;
83 unsigned int i;
84
85 for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
86 reg->reg = etnaviv_dump_registers[i];
87 reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
88 }
89
90 etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
91}
92
93static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
94 struct etnaviv_gpu *gpu, size_t mmu_size)
95{
96 etnaviv_iommu_dump(gpu->mmu, iter->data);
97
98 etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
99}
100
101static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
102 void *ptr, size_t size, u64 iova)
103{
104 memcpy(iter->data, ptr, size);
105
106 iter->hdr->iova = cpu_to_le64(iova);
107
108 etnaviv_core_dump_header(iter, type, iter->data + size);
109}
110
111void etnaviv_core_dump(struct etnaviv_gpu *gpu)
112{
113 struct core_dump_iterator iter;
114 struct etnaviv_vram_mapping *vram;
115 struct etnaviv_gem_object *obj;
Lucas Stach2f9225d2017-11-24 16:56:37 +0100116 struct etnaviv_gem_submit *submit;
Lucas Stach6d7a20c2017-12-06 10:53:27 +0100117 struct drm_sched_job *s_job;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100118 unsigned int n_obj, n_bomap_pages;
119 size_t file_size, mmu_size;
120 __le64 *bomap, *bomap_start;
121
Lucas Stach6d7a20c2017-12-06 10:53:27 +0100122 /* Only catch the first event, or when manually re-armed */
123 if (!etnaviv_dump_core)
124 return;
125 etnaviv_dump_core = false;
126
Lucas Stachb7ca3f32019-05-21 14:53:40 +0200127 mutex_lock(&gpu->mmu->lock);
128
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100129 mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
130
131 /* We always dump registers, mmu, ring and end marker */
132 n_obj = 4;
133 n_bomap_pages = 0;
134 file_size = ARRAY_SIZE(etnaviv_dump_registers) *
135 sizeof(struct etnaviv_dump_registers) +
Lucas Stach2f9225d2017-11-24 16:56:37 +0100136 mmu_size + gpu->buffer.size;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100137
138 /* Add in the active command buffers */
Lucas Stach6d7a20c2017-12-06 10:53:27 +0100139 spin_lock(&gpu->sched.job_list_lock);
140 list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
141 submit = to_etnaviv_submit(s_job);
Lucas Stach2f9225d2017-11-24 16:56:37 +0100142 file_size += submit->cmdbuf.size;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100143 n_obj++;
144 }
Lucas Stach6d7a20c2017-12-06 10:53:27 +0100145 spin_unlock(&gpu->sched.job_list_lock);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100146
147 /* Add in the active buffer objects */
148 list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
149 if (!vram->use)
150 continue;
151
152 obj = vram->object;
153 file_size += obj->base.size;
154 n_bomap_pages += obj->base.size >> PAGE_SHIFT;
155 n_obj++;
156 }
157
158 /* If we have any buffer objects, add a bomap object */
159 if (n_bomap_pages) {
160 file_size += n_bomap_pages * sizeof(__le64);
161 n_obj++;
162 }
163
164 /* Add the size of the headers */
165 file_size += sizeof(*iter.hdr) * n_obj;
166
167 /* Allocate the file in vmalloc memory, it's likely to be big */
Michal Hocko19809c22017-05-08 15:57:44 -0700168 iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
169 PAGE_KERNEL);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100170 if (!iter.start) {
Lucas Stachb7ca3f32019-05-21 14:53:40 +0200171 mutex_unlock(&gpu->mmu->lock);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100172 dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
173 return;
174 }
175
176 /* Point the data member after the headers */
177 iter.hdr = iter.start;
178 iter.data = &iter.hdr[n_obj];
179
180 memset(iter.hdr, 0, iter.data - iter.start);
181
182 etnaviv_core_dump_registers(&iter, gpu);
183 etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
Lucas Stach2f9225d2017-11-24 16:56:37 +0100184 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
185 gpu->buffer.size,
186 etnaviv_cmdbuf_get_va(&gpu->buffer));
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100187
Lucas Stach6d7a20c2017-12-06 10:53:27 +0100188 spin_lock(&gpu->sched.job_list_lock);
189 list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
190 submit = to_etnaviv_submit(s_job);
Lucas Stach2f9225d2017-11-24 16:56:37 +0100191 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
192 submit->cmdbuf.vaddr, submit->cmdbuf.size,
193 etnaviv_cmdbuf_get_va(&submit->cmdbuf));
Lucas Stach6d7a20c2017-12-06 10:53:27 +0100194 }
195 spin_unlock(&gpu->sched.job_list_lock);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100196
197 /* Reserve space for the bomap */
198 if (n_bomap_pages) {
199 bomap_start = bomap = iter.data;
200 memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
201 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
202 bomap + n_bomap_pages);
203 } else {
204 /* Silence warning */
205 bomap_start = bomap = NULL;
206 }
207
208 list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
209 struct page **pages;
210 void *vaddr;
211
212 if (vram->use == 0)
213 continue;
214
215 obj = vram->object;
216
Lucas Stach339073e2016-01-22 12:03:03 +0100217 mutex_lock(&obj->lock);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100218 pages = etnaviv_gem_get_pages(obj);
Lucas Stach339073e2016-01-22 12:03:03 +0100219 mutex_unlock(&obj->lock);
Dan Carpenterb6c6eac2019-01-14 13:49:46 +0300220 if (!IS_ERR(pages)) {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100221 int j;
222
223 iter.hdr->data[0] = bomap - bomap_start;
224
225 for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
226 *bomap++ = cpu_to_le64(page_to_phys(*pages++));
227 }
228
229 iter.hdr->iova = cpu_to_le64(vram->iova);
230
Lucas Stachce3088f2016-01-26 18:10:32 +0100231 vaddr = etnaviv_gem_vmap(&obj->base);
Lucas Stach9f07bb02016-01-25 15:37:28 +0100232 if (vaddr)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100233 memcpy(iter.data, vaddr, obj->base.size);
234
235 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
236 obj->base.size);
237 }
238
Lucas Stachb7ca3f32019-05-21 14:53:40 +0200239 mutex_unlock(&gpu->mmu->lock);
240
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100241 etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
242
243 dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
244}