Lucas Stach | f6ffbd4 | 2018-05-08 16:20:54 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 2 | /* |
Lucas Stach | f6ffbd4 | 2018-05-08 16:20:54 +0200 | [diff] [blame] | 3 | * Copyright (C) 2015-2018 Etnaviv Project |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/devcoredump.h> |
Lucas Stach | ea1f572 | 2017-01-16 16:09:51 +0100 | [diff] [blame] | 7 | #include "etnaviv_cmdbuf.h" |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 8 | #include "etnaviv_dump.h" |
| 9 | #include "etnaviv_gem.h" |
| 10 | #include "etnaviv_gpu.h" |
| 11 | #include "etnaviv_mmu.h" |
Lucas Stach | 6d7a20c | 2017-12-06 10:53:27 +0100 | [diff] [blame] | 12 | #include "etnaviv_sched.h" |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 13 | #include "state.xml.h" |
| 14 | #include "state_hi.xml.h" |
| 15 | |
Lucas Stach | 6d7a20c | 2017-12-06 10:53:27 +0100 | [diff] [blame] | 16 | static bool etnaviv_dump_core = true; |
| 17 | module_param_named(dump_core, etnaviv_dump_core, bool, 0600); |
| 18 | |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 19 | struct core_dump_iterator { |
| 20 | void *start; |
| 21 | struct etnaviv_dump_object_header *hdr; |
| 22 | void *data; |
| 23 | }; |
| 24 | |
| 25 | static const unsigned short etnaviv_dump_registers[] = { |
| 26 | VIVS_HI_AXI_STATUS, |
| 27 | VIVS_HI_CLOCK_CONTROL, |
| 28 | VIVS_HI_IDLE_STATE, |
| 29 | VIVS_HI_AXI_CONFIG, |
| 30 | VIVS_HI_INTR_ENBL, |
| 31 | VIVS_HI_CHIP_IDENTITY, |
| 32 | VIVS_HI_CHIP_FEATURE, |
| 33 | VIVS_HI_CHIP_MODEL, |
| 34 | VIVS_HI_CHIP_REV, |
| 35 | VIVS_HI_CHIP_DATE, |
| 36 | VIVS_HI_CHIP_TIME, |
| 37 | VIVS_HI_CHIP_MINOR_FEATURE_0, |
| 38 | VIVS_HI_CACHE_CONTROL, |
| 39 | VIVS_HI_AXI_CONTROL, |
| 40 | VIVS_PM_POWER_CONTROLS, |
| 41 | VIVS_PM_MODULE_CONTROLS, |
| 42 | VIVS_PM_MODULE_STATUS, |
| 43 | VIVS_PM_PULSE_EATER, |
| 44 | VIVS_MC_MMU_FE_PAGE_TABLE, |
| 45 | VIVS_MC_MMU_TX_PAGE_TABLE, |
| 46 | VIVS_MC_MMU_PE_PAGE_TABLE, |
| 47 | VIVS_MC_MMU_PEZ_PAGE_TABLE, |
| 48 | VIVS_MC_MMU_RA_PAGE_TABLE, |
| 49 | VIVS_MC_DEBUG_MEMORY, |
| 50 | VIVS_MC_MEMORY_BASE_ADDR_RA, |
| 51 | VIVS_MC_MEMORY_BASE_ADDR_FE, |
| 52 | VIVS_MC_MEMORY_BASE_ADDR_TX, |
| 53 | VIVS_MC_MEMORY_BASE_ADDR_PEZ, |
| 54 | VIVS_MC_MEMORY_BASE_ADDR_PE, |
| 55 | VIVS_MC_MEMORY_TIMING_CONTROL, |
| 56 | VIVS_MC_BUS_CONFIG, |
| 57 | VIVS_FE_DMA_STATUS, |
| 58 | VIVS_FE_DMA_DEBUG_STATE, |
| 59 | VIVS_FE_DMA_ADDRESS, |
| 60 | VIVS_FE_DMA_LOW, |
| 61 | VIVS_FE_DMA_HIGH, |
| 62 | VIVS_FE_AUTO_FLUSH, |
| 63 | }; |
| 64 | |
| 65 | static void etnaviv_core_dump_header(struct core_dump_iterator *iter, |
| 66 | u32 type, void *data_end) |
| 67 | { |
| 68 | struct etnaviv_dump_object_header *hdr = iter->hdr; |
| 69 | |
| 70 | hdr->magic = cpu_to_le32(ETDUMP_MAGIC); |
| 71 | hdr->type = cpu_to_le32(type); |
| 72 | hdr->file_offset = cpu_to_le32(iter->data - iter->start); |
| 73 | hdr->file_size = cpu_to_le32(data_end - iter->data); |
| 74 | |
| 75 | iter->hdr++; |
| 76 | iter->data += hdr->file_size; |
| 77 | } |
| 78 | |
| 79 | static void etnaviv_core_dump_registers(struct core_dump_iterator *iter, |
| 80 | struct etnaviv_gpu *gpu) |
| 81 | { |
| 82 | struct etnaviv_dump_registers *reg = iter->data; |
| 83 | unsigned int i; |
| 84 | |
| 85 | for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) { |
| 86 | reg->reg = etnaviv_dump_registers[i]; |
| 87 | reg->value = gpu_read(gpu, etnaviv_dump_registers[i]); |
| 88 | } |
| 89 | |
| 90 | etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg); |
| 91 | } |
| 92 | |
| 93 | static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter, |
| 94 | struct etnaviv_gpu *gpu, size_t mmu_size) |
| 95 | { |
| 96 | etnaviv_iommu_dump(gpu->mmu, iter->data); |
| 97 | |
| 98 | etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size); |
| 99 | } |
| 100 | |
| 101 | static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type, |
| 102 | void *ptr, size_t size, u64 iova) |
| 103 | { |
| 104 | memcpy(iter->data, ptr, size); |
| 105 | |
| 106 | iter->hdr->iova = cpu_to_le64(iova); |
| 107 | |
| 108 | etnaviv_core_dump_header(iter, type, iter->data + size); |
| 109 | } |
| 110 | |
| 111 | void etnaviv_core_dump(struct etnaviv_gpu *gpu) |
| 112 | { |
| 113 | struct core_dump_iterator iter; |
| 114 | struct etnaviv_vram_mapping *vram; |
| 115 | struct etnaviv_gem_object *obj; |
Lucas Stach | 2f9225d | 2017-11-24 16:56:37 +0100 | [diff] [blame] | 116 | struct etnaviv_gem_submit *submit; |
Lucas Stach | 6d7a20c | 2017-12-06 10:53:27 +0100 | [diff] [blame] | 117 | struct drm_sched_job *s_job; |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 118 | unsigned int n_obj, n_bomap_pages; |
| 119 | size_t file_size, mmu_size; |
| 120 | __le64 *bomap, *bomap_start; |
| 121 | |
Lucas Stach | 6d7a20c | 2017-12-06 10:53:27 +0100 | [diff] [blame] | 122 | /* Only catch the first event, or when manually re-armed */ |
| 123 | if (!etnaviv_dump_core) |
| 124 | return; |
| 125 | etnaviv_dump_core = false; |
| 126 | |
Lucas Stach | b7ca3f3 | 2019-05-21 14:53:40 +0200 | [diff] [blame] | 127 | mutex_lock(&gpu->mmu->lock); |
| 128 | |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 129 | mmu_size = etnaviv_iommu_dump_size(gpu->mmu); |
| 130 | |
| 131 | /* We always dump registers, mmu, ring and end marker */ |
| 132 | n_obj = 4; |
| 133 | n_bomap_pages = 0; |
| 134 | file_size = ARRAY_SIZE(etnaviv_dump_registers) * |
| 135 | sizeof(struct etnaviv_dump_registers) + |
Lucas Stach | 2f9225d | 2017-11-24 16:56:37 +0100 | [diff] [blame] | 136 | mmu_size + gpu->buffer.size; |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 137 | |
| 138 | /* Add in the active command buffers */ |
Lucas Stach | 6d7a20c | 2017-12-06 10:53:27 +0100 | [diff] [blame] | 139 | spin_lock(&gpu->sched.job_list_lock); |
| 140 | list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) { |
| 141 | submit = to_etnaviv_submit(s_job); |
Lucas Stach | 2f9225d | 2017-11-24 16:56:37 +0100 | [diff] [blame] | 142 | file_size += submit->cmdbuf.size; |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 143 | n_obj++; |
| 144 | } |
Lucas Stach | 6d7a20c | 2017-12-06 10:53:27 +0100 | [diff] [blame] | 145 | spin_unlock(&gpu->sched.job_list_lock); |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 146 | |
| 147 | /* Add in the active buffer objects */ |
| 148 | list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) { |
| 149 | if (!vram->use) |
| 150 | continue; |
| 151 | |
| 152 | obj = vram->object; |
| 153 | file_size += obj->base.size; |
| 154 | n_bomap_pages += obj->base.size >> PAGE_SHIFT; |
| 155 | n_obj++; |
| 156 | } |
| 157 | |
| 158 | /* If we have any buffer objects, add a bomap object */ |
| 159 | if (n_bomap_pages) { |
| 160 | file_size += n_bomap_pages * sizeof(__le64); |
| 161 | n_obj++; |
| 162 | } |
| 163 | |
| 164 | /* Add the size of the headers */ |
| 165 | file_size += sizeof(*iter.hdr) * n_obj; |
| 166 | |
| 167 | /* Allocate the file in vmalloc memory, it's likely to be big */ |
Michal Hocko | 19809c2 | 2017-05-08 15:57:44 -0700 | [diff] [blame] | 168 | iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, |
| 169 | PAGE_KERNEL); |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 170 | if (!iter.start) { |
Lucas Stach | b7ca3f3 | 2019-05-21 14:53:40 +0200 | [diff] [blame] | 171 | mutex_unlock(&gpu->mmu->lock); |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 172 | dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); |
| 173 | return; |
| 174 | } |
| 175 | |
| 176 | /* Point the data member after the headers */ |
| 177 | iter.hdr = iter.start; |
| 178 | iter.data = &iter.hdr[n_obj]; |
| 179 | |
| 180 | memset(iter.hdr, 0, iter.data - iter.start); |
| 181 | |
| 182 | etnaviv_core_dump_registers(&iter, gpu); |
| 183 | etnaviv_core_dump_mmu(&iter, gpu, mmu_size); |
Lucas Stach | 2f9225d | 2017-11-24 16:56:37 +0100 | [diff] [blame] | 184 | etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr, |
| 185 | gpu->buffer.size, |
| 186 | etnaviv_cmdbuf_get_va(&gpu->buffer)); |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 187 | |
Lucas Stach | 6d7a20c | 2017-12-06 10:53:27 +0100 | [diff] [blame] | 188 | spin_lock(&gpu->sched.job_list_lock); |
| 189 | list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) { |
| 190 | submit = to_etnaviv_submit(s_job); |
Lucas Stach | 2f9225d | 2017-11-24 16:56:37 +0100 | [diff] [blame] | 191 | etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, |
| 192 | submit->cmdbuf.vaddr, submit->cmdbuf.size, |
| 193 | etnaviv_cmdbuf_get_va(&submit->cmdbuf)); |
Lucas Stach | 6d7a20c | 2017-12-06 10:53:27 +0100 | [diff] [blame] | 194 | } |
| 195 | spin_unlock(&gpu->sched.job_list_lock); |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 196 | |
| 197 | /* Reserve space for the bomap */ |
| 198 | if (n_bomap_pages) { |
| 199 | bomap_start = bomap = iter.data; |
| 200 | memset(bomap, 0, sizeof(*bomap) * n_bomap_pages); |
| 201 | etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP, |
| 202 | bomap + n_bomap_pages); |
| 203 | } else { |
| 204 | /* Silence warning */ |
| 205 | bomap_start = bomap = NULL; |
| 206 | } |
| 207 | |
| 208 | list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) { |
| 209 | struct page **pages; |
| 210 | void *vaddr; |
| 211 | |
| 212 | if (vram->use == 0) |
| 213 | continue; |
| 214 | |
| 215 | obj = vram->object; |
| 216 | |
Lucas Stach | 339073e | 2016-01-22 12:03:03 +0100 | [diff] [blame] | 217 | mutex_lock(&obj->lock); |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 218 | pages = etnaviv_gem_get_pages(obj); |
Lucas Stach | 339073e | 2016-01-22 12:03:03 +0100 | [diff] [blame] | 219 | mutex_unlock(&obj->lock); |
Dan Carpenter | b6c6eac | 2019-01-14 13:49:46 +0300 | [diff] [blame] | 220 | if (!IS_ERR(pages)) { |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 221 | int j; |
| 222 | |
| 223 | iter.hdr->data[0] = bomap - bomap_start; |
| 224 | |
| 225 | for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++) |
| 226 | *bomap++ = cpu_to_le64(page_to_phys(*pages++)); |
| 227 | } |
| 228 | |
| 229 | iter.hdr->iova = cpu_to_le64(vram->iova); |
| 230 | |
Lucas Stach | ce3088f | 2016-01-26 18:10:32 +0100 | [diff] [blame] | 231 | vaddr = etnaviv_gem_vmap(&obj->base); |
Lucas Stach | 9f07bb0 | 2016-01-25 15:37:28 +0100 | [diff] [blame] | 232 | if (vaddr) |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 233 | memcpy(iter.data, vaddr, obj->base.size); |
| 234 | |
| 235 | etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data + |
| 236 | obj->base.size); |
| 237 | } |
| 238 | |
Lucas Stach | b7ca3f3 | 2019-05-21 14:53:40 +0200 | [diff] [blame] | 239 | mutex_unlock(&gpu->mmu->lock); |
| 240 | |
The etnaviv authors | a8c21a5 | 2015-12-03 18:21:29 +0100 | [diff] [blame] | 241 | etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); |
| 242 | |
| 243 | dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); |
| 244 | } |