Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1 | /* |
| 2 | * fs/proc/vmcore.c Interface for accessing the crash |
| 3 | * dump from the system's previous life. |
| 4 | * Heavily borrowed from fs/proc/kcore.c |
| 5 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) |
| 6 | * Copyright (C) IBM Corporation, 2004. All rights reserved |
| 7 | * |
| 8 | */ |
| 9 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> |
David Howells | 2f96b8c | 2013-04-12 00:10:25 +0100 | [diff] [blame] | 11 | #include <linux/kcore.h> |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 12 | #include <linux/user.h> |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 13 | #include <linux/elf.h> |
| 14 | #include <linux/elfcore.h> |
Paul Gortmaker | afeacc8 | 2011-05-26 16:00:52 -0400 | [diff] [blame] | 15 | #include <linux/export.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/slab.h> |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 17 | #include <linux/highmem.h> |
Andrew Morton | 87ebdc0 | 2013-02-27 17:03:16 -0800 | [diff] [blame] | 18 | #include <linux/printk.h> |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 19 | #include <linux/bootmem.h> |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/crash_dump.h> |
| 22 | #include <linux/list.h> |
HATAYAMA Daisuke | 8308697 | 2013-07-03 15:02:23 -0700 | [diff] [blame] | 23 | #include <linux/vmalloc.h> |
Michael Holzheu | 9cb2181 | 2013-09-11 14:24:51 -0700 | [diff] [blame] | 24 | #include <linux/pagemap.h> |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 25 | #include <asm/uaccess.h> |
| 26 | #include <asm/io.h> |
David Howells | 2f96b8c | 2013-04-12 00:10:25 +0100 | [diff] [blame] | 27 | #include "internal.h" |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 28 | |
| 29 | /* List representing chunks of contiguous memory areas and their offsets in |
| 30 | * vmcore file. |
| 31 | */ |
| 32 | static LIST_HEAD(vmcore_list); |
| 33 | |
| 34 | /* Stores the pointer to the buffer containing kernel elf core headers. */ |
| 35 | static char *elfcorebuf; |
| 36 | static size_t elfcorebuf_sz; |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 37 | static size_t elfcorebuf_sz_orig; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 38 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 39 | static char *elfnotes_buf; |
| 40 | static size_t elfnotes_sz; |
| 41 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 42 | /* Total size of vmcore file. */ |
| 43 | static u64 vmcore_size; |
| 44 | |
Fabian Frederick | a05e16ad | 2014-06-06 14:37:04 -0700 | [diff] [blame] | 45 | static struct proc_dir_entry *proc_vmcore; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 46 | |
Olaf Hering | 997c136 | 2011-05-26 16:25:54 -0700 | [diff] [blame] | 47 | /* |
| 48 | * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error |
| 49 | * The called function has to take care of module refcounting. |
| 50 | */ |
| 51 | static int (*oldmem_pfn_is_ram)(unsigned long pfn); |
| 52 | |
| 53 | int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)) |
| 54 | { |
| 55 | if (oldmem_pfn_is_ram) |
| 56 | return -EBUSY; |
| 57 | oldmem_pfn_is_ram = fn; |
| 58 | return 0; |
| 59 | } |
| 60 | EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram); |
| 61 | |
| 62 | void unregister_oldmem_pfn_is_ram(void) |
| 63 | { |
| 64 | oldmem_pfn_is_ram = NULL; |
| 65 | wmb(); |
| 66 | } |
| 67 | EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram); |
| 68 | |
| 69 | static int pfn_is_ram(unsigned long pfn) |
| 70 | { |
| 71 | int (*fn)(unsigned long pfn); |
| 72 | /* pfn is ram unless fn() checks pagetype */ |
| 73 | int ret = 1; |
| 74 | |
| 75 | /* |
| 76 | * Ask hypervisor if the pfn is really ram. |
| 77 | * A ballooned page contains no data and reading from such a page |
| 78 | * will cause high load in the hypervisor. |
| 79 | */ |
| 80 | fn = oldmem_pfn_is_ram; |
| 81 | if (fn) |
| 82 | ret = fn(pfn); |
| 83 | |
| 84 | return ret; |
| 85 | } |
| 86 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 87 | /* Reads a page from the oldmem device from given offset. */ |
| 88 | static ssize_t read_from_oldmem(char *buf, size_t count, |
Vivek Goyal | 9e9e394 | 2006-01-11 12:17:37 -0800 | [diff] [blame] | 89 | u64 *ppos, int userbuf) |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 90 | { |
| 91 | unsigned long pfn, offset; |
| 92 | size_t nr_bytes; |
| 93 | ssize_t read = 0, tmp; |
| 94 | |
| 95 | if (!count) |
| 96 | return 0; |
| 97 | |
| 98 | offset = (unsigned long)(*ppos % PAGE_SIZE); |
| 99 | pfn = (unsigned long)(*ppos / PAGE_SIZE); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 100 | |
| 101 | do { |
| 102 | if (count > (PAGE_SIZE - offset)) |
| 103 | nr_bytes = PAGE_SIZE - offset; |
| 104 | else |
| 105 | nr_bytes = count; |
| 106 | |
Olaf Hering | 997c136 | 2011-05-26 16:25:54 -0700 | [diff] [blame] | 107 | /* If pfn is not ram, return zeros for sparse dump files */ |
| 108 | if (pfn_is_ram(pfn) == 0) |
| 109 | memset(buf, 0, nr_bytes); |
| 110 | else { |
| 111 | tmp = copy_oldmem_page(pfn, buf, nr_bytes, |
| 112 | offset, userbuf); |
| 113 | if (tmp < 0) |
| 114 | return tmp; |
| 115 | } |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 116 | *ppos += nr_bytes; |
| 117 | count -= nr_bytes; |
| 118 | buf += nr_bytes; |
| 119 | read += nr_bytes; |
| 120 | ++pfn; |
| 121 | offset = 0; |
| 122 | } while (count); |
| 123 | |
| 124 | return read; |
| 125 | } |
| 126 | |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 127 | /* |
| 128 | * Architectures may override this function to allocate ELF header in 2nd kernel |
| 129 | */ |
| 130 | int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) |
| 131 | { |
| 132 | return 0; |
| 133 | } |
| 134 | |
| 135 | /* |
| 136 | * Architectures may override this function to free header |
| 137 | */ |
| 138 | void __weak elfcorehdr_free(unsigned long long addr) |
| 139 | {} |
| 140 | |
| 141 | /* |
| 142 | * Architectures may override this function to read from ELF header |
| 143 | */ |
| 144 | ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) |
| 145 | { |
| 146 | return read_from_oldmem(buf, count, ppos, 0); |
| 147 | } |
| 148 | |
| 149 | /* |
| 150 | * Architectures may override this function to read from notes sections |
| 151 | */ |
| 152 | ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) |
| 153 | { |
| 154 | return read_from_oldmem(buf, count, ppos, 0); |
| 155 | } |
| 156 | |
Michael Holzheu | 9cb2181 | 2013-09-11 14:24:51 -0700 | [diff] [blame] | 157 | /* |
| 158 | * Architectures may override this function to map oldmem |
| 159 | */ |
| 160 | int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, |
| 161 | unsigned long from, unsigned long pfn, |
| 162 | unsigned long size, pgprot_t prot) |
| 163 | { |
| 164 | return remap_pfn_range(vma, from, pfn, size, prot); |
| 165 | } |
| 166 | |
| 167 | /* |
| 168 | * Copy to either kernel or user space |
| 169 | */ |
| 170 | static int copy_to(void *target, void *src, size_t size, int userbuf) |
| 171 | { |
| 172 | if (userbuf) { |
| 173 | if (copy_to_user((char __user *) target, src, size)) |
| 174 | return -EFAULT; |
| 175 | } else { |
| 176 | memcpy(target, src, size); |
| 177 | } |
| 178 | return 0; |
| 179 | } |
| 180 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 181 | /* Read from the ELF header and then the crash dump. On error, negative value is |
| 182 | * returned otherwise number of bytes read are returned. |
| 183 | */ |
Michael Holzheu | 9cb2181 | 2013-09-11 14:24:51 -0700 | [diff] [blame] | 184 | static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, |
| 185 | int userbuf) |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 186 | { |
| 187 | ssize_t acc = 0, tmp; |
Vivek Goyal | 80e8ff6 | 2006-04-10 22:54:10 -0700 | [diff] [blame] | 188 | size_t tsz; |
HATAYAMA Daisuke | b27eb18 | 2013-07-03 15:02:13 -0700 | [diff] [blame] | 189 | u64 start; |
| 190 | struct vmcore *m = NULL; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 191 | |
| 192 | if (buflen == 0 || *fpos >= vmcore_size) |
| 193 | return 0; |
| 194 | |
| 195 | /* trim buflen to not go beyond EOF */ |
| 196 | if (buflen > vmcore_size - *fpos) |
| 197 | buflen = vmcore_size - *fpos; |
| 198 | |
| 199 | /* Read ELF core header */ |
| 200 | if (*fpos < elfcorebuf_sz) { |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 201 | tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); |
Michael Holzheu | 9cb2181 | 2013-09-11 14:24:51 -0700 | [diff] [blame] | 202 | if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf)) |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 203 | return -EFAULT; |
| 204 | buflen -= tsz; |
| 205 | *fpos += tsz; |
| 206 | buffer += tsz; |
| 207 | acc += tsz; |
| 208 | |
| 209 | /* leave now if filled buffer already */ |
| 210 | if (buflen == 0) |
| 211 | return acc; |
| 212 | } |
| 213 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 214 | /* Read Elf note segment */ |
| 215 | if (*fpos < elfcorebuf_sz + elfnotes_sz) { |
| 216 | void *kaddr; |
| 217 | |
| 218 | tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); |
| 219 | kaddr = elfnotes_buf + *fpos - elfcorebuf_sz; |
Michael Holzheu | 9cb2181 | 2013-09-11 14:24:51 -0700 | [diff] [blame] | 220 | if (copy_to(buffer, kaddr, tsz, userbuf)) |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 221 | return -EFAULT; |
| 222 | buflen -= tsz; |
| 223 | *fpos += tsz; |
| 224 | buffer += tsz; |
| 225 | acc += tsz; |
| 226 | |
| 227 | /* leave now if filled buffer already */ |
| 228 | if (buflen == 0) |
| 229 | return acc; |
| 230 | } |
| 231 | |
HATAYAMA Daisuke | b27eb18 | 2013-07-03 15:02:13 -0700 | [diff] [blame] | 232 | list_for_each_entry(m, &vmcore_list, list) { |
| 233 | if (*fpos < m->offset + m->size) { |
Dave Young | 0b50a2d | 2016-03-17 14:21:03 -0700 | [diff] [blame] | 234 | tsz = (size_t)min_t(unsigned long long, |
| 235 | m->offset + m->size - *fpos, |
| 236 | buflen); |
HATAYAMA Daisuke | b27eb18 | 2013-07-03 15:02:13 -0700 | [diff] [blame] | 237 | start = m->paddr + *fpos - m->offset; |
Michael Holzheu | 9cb2181 | 2013-09-11 14:24:51 -0700 | [diff] [blame] | 238 | tmp = read_from_oldmem(buffer, tsz, &start, userbuf); |
HATAYAMA Daisuke | b27eb18 | 2013-07-03 15:02:13 -0700 | [diff] [blame] | 239 | if (tmp < 0) |
| 240 | return tmp; |
| 241 | buflen -= tsz; |
| 242 | *fpos += tsz; |
| 243 | buffer += tsz; |
| 244 | acc += tsz; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 245 | |
HATAYAMA Daisuke | b27eb18 | 2013-07-03 15:02:13 -0700 | [diff] [blame] | 246 | /* leave now if filled buffer already */ |
| 247 | if (buflen == 0) |
| 248 | return acc; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 249 | } |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 250 | } |
HATAYAMA Daisuke | b27eb18 | 2013-07-03 15:02:13 -0700 | [diff] [blame] | 251 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 252 | return acc; |
| 253 | } |
| 254 | |
Michael Holzheu | 9cb2181 | 2013-09-11 14:24:51 -0700 | [diff] [blame] | 255 | static ssize_t read_vmcore(struct file *file, char __user *buffer, |
| 256 | size_t buflen, loff_t *fpos) |
| 257 | { |
| 258 | return __read_vmcore((__force char *) buffer, buflen, fpos, 1); |
| 259 | } |
| 260 | |
| 261 | /* |
| 262 | * The vmcore fault handler uses the page cache and fills data using the |
| 263 | * standard __vmcore_read() function. |
| 264 | * |
| 265 | * On s390 the fault handler is used for memory regions that can't be mapped |
| 266 | * directly with remap_pfn_range(). |
| 267 | */ |
| 268 | static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 269 | { |
| 270 | #ifdef CONFIG_S390 |
| 271 | struct address_space *mapping = vma->vm_file->f_mapping; |
| 272 | pgoff_t index = vmf->pgoff; |
| 273 | struct page *page; |
| 274 | loff_t offset; |
| 275 | char *buf; |
| 276 | int rc; |
| 277 | |
| 278 | page = find_or_create_page(mapping, index, GFP_KERNEL); |
| 279 | if (!page) |
| 280 | return VM_FAULT_OOM; |
| 281 | if (!PageUptodate(page)) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 282 | offset = (loff_t) index << PAGE_SHIFT; |
Michael Holzheu | 9cb2181 | 2013-09-11 14:24:51 -0700 | [diff] [blame] | 283 | buf = __va((page_to_pfn(page) << PAGE_SHIFT)); |
| 284 | rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); |
| 285 | if (rc < 0) { |
| 286 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 287 | put_page(page); |
Michael Holzheu | 9cb2181 | 2013-09-11 14:24:51 -0700 | [diff] [blame] | 288 | return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; |
| 289 | } |
| 290 | SetPageUptodate(page); |
| 291 | } |
| 292 | unlock_page(page); |
| 293 | vmf->page = page; |
| 294 | return 0; |
| 295 | #else |
| 296 | return VM_FAULT_SIGBUS; |
| 297 | #endif |
| 298 | } |
| 299 | |
| 300 | static const struct vm_operations_struct vmcore_mmap_ops = { |
| 301 | .fault = mmap_vmcore_fault, |
| 302 | }; |
| 303 | |
HATAYAMA Daisuke | 8308697 | 2013-07-03 15:02:23 -0700 | [diff] [blame] | 304 | /** |
| 305 | * alloc_elfnotes_buf - allocate buffer for ELF note segment in |
| 306 | * vmalloc memory |
| 307 | * |
| 308 | * @notes_sz: size of buffer |
| 309 | * |
| 310 | * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap |
| 311 | * the buffer to user-space by means of remap_vmalloc_range(). |
| 312 | * |
| 313 | * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is |
| 314 | * disabled and there's no need to allow users to mmap the buffer. |
| 315 | */ |
| 316 | static inline char *alloc_elfnotes_buf(size_t notes_sz) |
| 317 | { |
| 318 | #ifdef CONFIG_MMU |
| 319 | return vmalloc_user(notes_sz); |
| 320 | #else |
| 321 | return vzalloc(notes_sz); |
| 322 | #endif |
| 323 | } |
| 324 | |
| 325 | /* |
| 326 | * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is |
| 327 | * essential for mmap_vmcore() in order to map physically |
| 328 | * non-contiguous objects (ELF header, ELF note segment and memory |
| 329 | * regions in the 1st kernel pointed to by PT_LOAD entries) into |
| 330 | * virtually contiguous user-space in ELF layout. |
| 331 | */ |
Michael Holzheu | 11e376a | 2013-09-11 14:24:53 -0700 | [diff] [blame] | 332 | #ifdef CONFIG_MMU |
Vitaly Kuznetsov | 0692ded | 2014-08-08 14:22:05 -0700 | [diff] [blame] | 333 | /* |
| 334 | * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages |
| 335 | * reported as not being ram with the zero page. |
| 336 | * |
| 337 | * @vma: vm_area_struct describing requested mapping |
| 338 | * @from: start remapping from |
| 339 | * @pfn: page frame number to start remapping to |
| 340 | * @size: remapping size |
| 341 | * @prot: protection bits |
| 342 | * |
| 343 | * Returns zero on success, -EAGAIN on failure. |
| 344 | */ |
| 345 | static int remap_oldmem_pfn_checked(struct vm_area_struct *vma, |
| 346 | unsigned long from, unsigned long pfn, |
| 347 | unsigned long size, pgprot_t prot) |
| 348 | { |
| 349 | unsigned long map_size; |
| 350 | unsigned long pos_start, pos_end, pos; |
| 351 | unsigned long zeropage_pfn = my_zero_pfn(0); |
| 352 | size_t len = 0; |
| 353 | |
| 354 | pos_start = pfn; |
| 355 | pos_end = pfn + (size >> PAGE_SHIFT); |
| 356 | |
| 357 | for (pos = pos_start; pos < pos_end; ++pos) { |
| 358 | if (!pfn_is_ram(pos)) { |
| 359 | /* |
| 360 | * We hit a page which is not ram. Remap the continuous |
| 361 | * region between pos_start and pos-1 and replace |
| 362 | * the non-ram page at pos with the zero page. |
| 363 | */ |
| 364 | if (pos > pos_start) { |
| 365 | /* Remap continuous region */ |
| 366 | map_size = (pos - pos_start) << PAGE_SHIFT; |
| 367 | if (remap_oldmem_pfn_range(vma, from + len, |
| 368 | pos_start, map_size, |
| 369 | prot)) |
| 370 | goto fail; |
| 371 | len += map_size; |
| 372 | } |
| 373 | /* Remap the zero page */ |
| 374 | if (remap_oldmem_pfn_range(vma, from + len, |
| 375 | zeropage_pfn, |
| 376 | PAGE_SIZE, prot)) |
| 377 | goto fail; |
| 378 | len += PAGE_SIZE; |
| 379 | pos_start = pos + 1; |
| 380 | } |
| 381 | } |
| 382 | if (pos > pos_start) { |
| 383 | /* Remap the rest */ |
| 384 | map_size = (pos - pos_start) << PAGE_SHIFT; |
| 385 | if (remap_oldmem_pfn_range(vma, from + len, pos_start, |
| 386 | map_size, prot)) |
| 387 | goto fail; |
| 388 | } |
| 389 | return 0; |
| 390 | fail: |
| 391 | do_munmap(vma->vm_mm, from, len); |
| 392 | return -EAGAIN; |
| 393 | } |
| 394 | |
| 395 | static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma, |
| 396 | unsigned long from, unsigned long pfn, |
| 397 | unsigned long size, pgprot_t prot) |
| 398 | { |
| 399 | /* |
| 400 | * Check if oldmem_pfn_is_ram was registered to avoid |
| 401 | * looping over all pages without a reason. |
| 402 | */ |
| 403 | if (oldmem_pfn_is_ram) |
| 404 | return remap_oldmem_pfn_checked(vma, from, pfn, size, prot); |
| 405 | else |
| 406 | return remap_oldmem_pfn_range(vma, from, pfn, size, prot); |
| 407 | } |
| 408 | |
HATAYAMA Daisuke | 8308697 | 2013-07-03 15:02:23 -0700 | [diff] [blame] | 409 | static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) |
| 410 | { |
| 411 | size_t size = vma->vm_end - vma->vm_start; |
| 412 | u64 start, end, len, tsz; |
| 413 | struct vmcore *m; |
| 414 | |
| 415 | start = (u64)vma->vm_pgoff << PAGE_SHIFT; |
| 416 | end = start + size; |
| 417 | |
| 418 | if (size > vmcore_size || end > vmcore_size) |
| 419 | return -EINVAL; |
| 420 | |
| 421 | if (vma->vm_flags & (VM_WRITE | VM_EXEC)) |
| 422 | return -EPERM; |
| 423 | |
| 424 | vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); |
| 425 | vma->vm_flags |= VM_MIXEDMAP; |
Michael Holzheu | 9cb2181 | 2013-09-11 14:24:51 -0700 | [diff] [blame] | 426 | vma->vm_ops = &vmcore_mmap_ops; |
HATAYAMA Daisuke | 8308697 | 2013-07-03 15:02:23 -0700 | [diff] [blame] | 427 | |
| 428 | len = 0; |
| 429 | |
| 430 | if (start < elfcorebuf_sz) { |
| 431 | u64 pfn; |
| 432 | |
| 433 | tsz = min(elfcorebuf_sz - (size_t)start, size); |
| 434 | pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT; |
| 435 | if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, |
| 436 | vma->vm_page_prot)) |
| 437 | return -EAGAIN; |
| 438 | size -= tsz; |
| 439 | start += tsz; |
| 440 | len += tsz; |
| 441 | |
| 442 | if (size == 0) |
| 443 | return 0; |
| 444 | } |
| 445 | |
| 446 | if (start < elfcorebuf_sz + elfnotes_sz) { |
| 447 | void *kaddr; |
| 448 | |
| 449 | tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); |
| 450 | kaddr = elfnotes_buf + start - elfcorebuf_sz; |
| 451 | if (remap_vmalloc_range_partial(vma, vma->vm_start + len, |
| 452 | kaddr, tsz)) |
| 453 | goto fail; |
| 454 | size -= tsz; |
| 455 | start += tsz; |
| 456 | len += tsz; |
| 457 | |
| 458 | if (size == 0) |
| 459 | return 0; |
| 460 | } |
| 461 | |
| 462 | list_for_each_entry(m, &vmcore_list, list) { |
| 463 | if (start < m->offset + m->size) { |
| 464 | u64 paddr = 0; |
| 465 | |
Dave Young | 0b50a2d | 2016-03-17 14:21:03 -0700 | [diff] [blame] | 466 | tsz = (size_t)min_t(unsigned long long, |
| 467 | m->offset + m->size - start, size); |
HATAYAMA Daisuke | 8308697 | 2013-07-03 15:02:23 -0700 | [diff] [blame] | 468 | paddr = m->paddr + start - m->offset; |
Vitaly Kuznetsov | 0692ded | 2014-08-08 14:22:05 -0700 | [diff] [blame] | 469 | if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len, |
| 470 | paddr >> PAGE_SHIFT, tsz, |
| 471 | vma->vm_page_prot)) |
HATAYAMA Daisuke | 8308697 | 2013-07-03 15:02:23 -0700 | [diff] [blame] | 472 | goto fail; |
| 473 | size -= tsz; |
| 474 | start += tsz; |
| 475 | len += tsz; |
| 476 | |
| 477 | if (size == 0) |
| 478 | return 0; |
| 479 | } |
| 480 | } |
| 481 | |
| 482 | return 0; |
| 483 | fail: |
| 484 | do_munmap(vma->vm_mm, vma->vm_start, len); |
| 485 | return -EAGAIN; |
| 486 | } |
| 487 | #else |
| 488 | static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) |
| 489 | { |
| 490 | return -ENOSYS; |
| 491 | } |
| 492 | #endif |
| 493 | |
Alexey Dobriyan | 5aa140c | 2008-10-06 14:36:31 +0400 | [diff] [blame] | 494 | static const struct file_operations proc_vmcore_operations = { |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 495 | .read = read_vmcore, |
Arnd Bergmann | c227e69 | 2010-09-22 13:04:54 -0700 | [diff] [blame] | 496 | .llseek = default_llseek, |
HATAYAMA Daisuke | 8308697 | 2013-07-03 15:02:23 -0700 | [diff] [blame] | 497 | .mmap = mmap_vmcore, |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 498 | }; |
| 499 | |
| 500 | static struct vmcore* __init get_new_element(void) |
| 501 | { |
Cyrill Gorcunov | 2f6d311 | 2009-06-17 16:26:00 -0700 | [diff] [blame] | 502 | return kzalloc(sizeof(struct vmcore), GFP_KERNEL); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 503 | } |
| 504 | |
HATAYAMA Daisuke | 591ff71 | 2013-07-03 15:02:22 -0700 | [diff] [blame] | 505 | static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz, |
| 506 | struct list_head *vc_list) |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 507 | { |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 508 | u64 size; |
HATAYAMA Daisuke | 591ff71 | 2013-07-03 15:02:22 -0700 | [diff] [blame] | 509 | struct vmcore *m; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 510 | |
HATAYAMA Daisuke | 591ff71 | 2013-07-03 15:02:22 -0700 | [diff] [blame] | 511 | size = elfsz + elfnotesegsz; |
| 512 | list_for_each_entry(m, vc_list, list) { |
| 513 | size += m->size; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 514 | } |
| 515 | return size; |
| 516 | } |
| 517 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 518 | /** |
| 519 | * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry |
| 520 | * |
| 521 | * @ehdr_ptr: ELF header |
| 522 | * |
| 523 | * This function updates p_memsz member of each PT_NOTE entry in the |
| 524 | * program header table pointed to by @ehdr_ptr to real size of ELF |
| 525 | * note segment. |
| 526 | */ |
| 527 | static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr) |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 528 | { |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 529 | int i, rc=0; |
| 530 | Elf64_Phdr *phdr_ptr; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 531 | Elf64_Nhdr *nhdr_ptr; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 532 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 533 | phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 534 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 535 | void *notes_section; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 536 | u64 offset, max_sz, sz, real_sz = 0; |
| 537 | if (phdr_ptr->p_type != PT_NOTE) |
| 538 | continue; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 539 | max_sz = phdr_ptr->p_memsz; |
| 540 | offset = phdr_ptr->p_offset; |
| 541 | notes_section = kmalloc(max_sz, GFP_KERNEL); |
| 542 | if (!notes_section) |
| 543 | return -ENOMEM; |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 544 | rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 545 | if (rc < 0) { |
| 546 | kfree(notes_section); |
| 547 | return rc; |
| 548 | } |
| 549 | nhdr_ptr = notes_section; |
Greg Pearson | 38dfac8 | 2014-02-10 14:25:36 -0800 | [diff] [blame] | 550 | while (nhdr_ptr->n_namesz != 0) { |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 551 | sz = sizeof(Elf64_Nhdr) + |
WANG Chao | 34b4776 | 2015-02-17 13:46:01 -0800 | [diff] [blame] | 552 | (((u64)nhdr_ptr->n_namesz + 3) & ~3) + |
| 553 | (((u64)nhdr_ptr->n_descsz + 3) & ~3); |
Greg Pearson | 38dfac8 | 2014-02-10 14:25:36 -0800 | [diff] [blame] | 554 | if ((real_sz + sz) > max_sz) { |
| 555 | pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", |
| 556 | nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); |
| 557 | break; |
| 558 | } |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 559 | real_sz += sz; |
| 560 | nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); |
| 561 | } |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 562 | kfree(notes_section); |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 563 | phdr_ptr->p_memsz = real_sz; |
Greg Pearson | 38dfac8 | 2014-02-10 14:25:36 -0800 | [diff] [blame] | 564 | if (real_sz == 0) { |
| 565 | pr_warn("Warning: Zero PT_NOTE entries found\n"); |
Greg Pearson | 38dfac8 | 2014-02-10 14:25:36 -0800 | [diff] [blame] | 566 | } |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 567 | } |
| 568 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 569 | return 0; |
| 570 | } |
| 571 | |
| 572 | /** |
| 573 | * get_note_number_and_size_elf64 - get the number of PT_NOTE program |
| 574 | * headers and sum of real size of their ELF note segment headers and |
| 575 | * data. |
| 576 | * |
| 577 | * @ehdr_ptr: ELF header |
| 578 | * @nr_ptnote: buffer for the number of PT_NOTE program headers |
| 579 | * @sz_ptnote: buffer for size of unique PT_NOTE program header |
| 580 | * |
| 581 | * This function is used to merge multiple PT_NOTE program headers |
| 582 | * into a unique single one. The resulting unique entry will have |
| 583 | * @sz_ptnote in its phdr->p_mem. |
| 584 | * |
| 585 | * It is assumed that program headers with PT_NOTE type pointed to by |
| 586 | * @ehdr_ptr has already been updated by update_note_header_size_elf64 |
| 587 | * and each of PT_NOTE program headers has actual ELF note segment |
| 588 | * size in its p_memsz member. |
| 589 | */ |
| 590 | static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr, |
| 591 | int *nr_ptnote, u64 *sz_ptnote) |
| 592 | { |
| 593 | int i; |
| 594 | Elf64_Phdr *phdr_ptr; |
| 595 | |
| 596 | *nr_ptnote = *sz_ptnote = 0; |
| 597 | |
| 598 | phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); |
| 599 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
| 600 | if (phdr_ptr->p_type != PT_NOTE) |
| 601 | continue; |
| 602 | *nr_ptnote += 1; |
| 603 | *sz_ptnote += phdr_ptr->p_memsz; |
| 604 | } |
| 605 | |
| 606 | return 0; |
| 607 | } |
| 608 | |
| 609 | /** |
| 610 | * copy_notes_elf64 - copy ELF note segments in a given buffer |
| 611 | * |
| 612 | * @ehdr_ptr: ELF header |
| 613 | * @notes_buf: buffer into which ELF note segments are copied |
| 614 | * |
| 615 | * This function is used to copy ELF note segment in the 1st kernel |
| 616 | * into the buffer @notes_buf in the 2nd kernel. It is assumed that |
| 617 | * size of the buffer @notes_buf is equal to or larger than sum of the |
| 618 | * real ELF note segment headers and data. |
| 619 | * |
| 620 | * It is assumed that program headers with PT_NOTE type pointed to by |
| 621 | * @ehdr_ptr has already been updated by update_note_header_size_elf64 |
| 622 | * and each of PT_NOTE program headers has actual ELF note segment |
| 623 | * size in its p_memsz member. |
| 624 | */ |
| 625 | static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf) |
| 626 | { |
| 627 | int i, rc=0; |
| 628 | Elf64_Phdr *phdr_ptr; |
| 629 | |
| 630 | phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1); |
| 631 | |
| 632 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
| 633 | u64 offset; |
| 634 | if (phdr_ptr->p_type != PT_NOTE) |
| 635 | continue; |
| 636 | offset = phdr_ptr->p_offset; |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 637 | rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, |
| 638 | &offset); |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 639 | if (rc < 0) |
| 640 | return rc; |
| 641 | notes_buf += phdr_ptr->p_memsz; |
| 642 | } |
| 643 | |
| 644 | return 0; |
| 645 | } |
| 646 | |
| 647 | /* Merges all the PT_NOTE headers into one. */ |
| 648 | static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, |
| 649 | char **notes_buf, size_t *notes_sz) |
| 650 | { |
| 651 | int i, nr_ptnote=0, rc=0; |
| 652 | char *tmp; |
| 653 | Elf64_Ehdr *ehdr_ptr; |
| 654 | Elf64_Phdr phdr; |
| 655 | u64 phdr_sz = 0, note_off; |
| 656 | |
| 657 | ehdr_ptr = (Elf64_Ehdr *)elfptr; |
| 658 | |
| 659 | rc = update_note_header_size_elf64(ehdr_ptr); |
| 660 | if (rc < 0) |
| 661 | return rc; |
| 662 | |
| 663 | rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz); |
| 664 | if (rc < 0) |
| 665 | return rc; |
| 666 | |
| 667 | *notes_sz = roundup(phdr_sz, PAGE_SIZE); |
HATAYAMA Daisuke | 8308697 | 2013-07-03 15:02:23 -0700 | [diff] [blame] | 668 | *notes_buf = alloc_elfnotes_buf(*notes_sz); |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 669 | if (!*notes_buf) |
| 670 | return -ENOMEM; |
| 671 | |
| 672 | rc = copy_notes_elf64(ehdr_ptr, *notes_buf); |
| 673 | if (rc < 0) |
| 674 | return rc; |
| 675 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 676 | /* Prepare merged PT_NOTE program header. */ |
| 677 | phdr.p_type = PT_NOTE; |
| 678 | phdr.p_flags = 0; |
| 679 | note_off = sizeof(Elf64_Ehdr) + |
| 680 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 681 | phdr.p_offset = roundup(note_off, PAGE_SIZE); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 682 | phdr.p_vaddr = phdr.p_paddr = 0; |
| 683 | phdr.p_filesz = phdr.p_memsz = phdr_sz; |
| 684 | phdr.p_align = 0; |
| 685 | |
| 686 | /* Add merged PT_NOTE program header*/ |
| 687 | tmp = elfptr + sizeof(Elf64_Ehdr); |
| 688 | memcpy(tmp, &phdr, sizeof(phdr)); |
| 689 | tmp += sizeof(phdr); |
| 690 | |
| 691 | /* Remove unwanted PT_NOTE program headers. */ |
| 692 | i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); |
| 693 | *elfsz = *elfsz - i; |
| 694 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 695 | memset(elfptr + *elfsz, 0, i); |
| 696 | *elfsz = roundup(*elfsz, PAGE_SIZE); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 697 | |
| 698 | /* Modify e_phnum to reflect merged headers. */ |
| 699 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; |
| 700 | |
| 701 | return 0; |
| 702 | } |
| 703 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 704 | /** |
| 705 | * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry |
| 706 | * |
| 707 | * @ehdr_ptr: ELF header |
| 708 | * |
| 709 | * This function updates p_memsz member of each PT_NOTE entry in the |
| 710 | * program header table pointed to by @ehdr_ptr to real size of ELF |
| 711 | * note segment. |
| 712 | */ |
| 713 | static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr) |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 714 | { |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 715 | int i, rc=0; |
| 716 | Elf32_Phdr *phdr_ptr; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 717 | Elf32_Nhdr *nhdr_ptr; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 718 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 719 | phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 720 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 721 | void *notes_section; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 722 | u64 offset, max_sz, sz, real_sz = 0; |
| 723 | if (phdr_ptr->p_type != PT_NOTE) |
| 724 | continue; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 725 | max_sz = phdr_ptr->p_memsz; |
| 726 | offset = phdr_ptr->p_offset; |
| 727 | notes_section = kmalloc(max_sz, GFP_KERNEL); |
| 728 | if (!notes_section) |
| 729 | return -ENOMEM; |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 730 | rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 731 | if (rc < 0) { |
| 732 | kfree(notes_section); |
| 733 | return rc; |
| 734 | } |
| 735 | nhdr_ptr = notes_section; |
Greg Pearson | 38dfac8 | 2014-02-10 14:25:36 -0800 | [diff] [blame] | 736 | while (nhdr_ptr->n_namesz != 0) { |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 737 | sz = sizeof(Elf32_Nhdr) + |
WANG Chao | 34b4776 | 2015-02-17 13:46:01 -0800 | [diff] [blame] | 738 | (((u64)nhdr_ptr->n_namesz + 3) & ~3) + |
| 739 | (((u64)nhdr_ptr->n_descsz + 3) & ~3); |
Greg Pearson | 38dfac8 | 2014-02-10 14:25:36 -0800 | [diff] [blame] | 740 | if ((real_sz + sz) > max_sz) { |
| 741 | pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", |
| 742 | nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); |
| 743 | break; |
| 744 | } |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 745 | real_sz += sz; |
| 746 | nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); |
| 747 | } |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 748 | kfree(notes_section); |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 749 | phdr_ptr->p_memsz = real_sz; |
Greg Pearson | 38dfac8 | 2014-02-10 14:25:36 -0800 | [diff] [blame] | 750 | if (real_sz == 0) { |
| 751 | pr_warn("Warning: Zero PT_NOTE entries found\n"); |
Greg Pearson | 38dfac8 | 2014-02-10 14:25:36 -0800 | [diff] [blame] | 752 | } |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 753 | } |
| 754 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 755 | return 0; |
| 756 | } |
| 757 | |
| 758 | /** |
| 759 | * get_note_number_and_size_elf32 - get the number of PT_NOTE program |
| 760 | * headers and sum of real size of their ELF note segment headers and |
| 761 | * data. |
| 762 | * |
| 763 | * @ehdr_ptr: ELF header |
| 764 | * @nr_ptnote: buffer for the number of PT_NOTE program headers |
| 765 | * @sz_ptnote: buffer for size of unique PT_NOTE program header |
| 766 | * |
| 767 | * This function is used to merge multiple PT_NOTE program headers |
| 768 | * into a unique single one. The resulting unique entry will have |
| 769 | * @sz_ptnote in its phdr->p_mem. |
| 770 | * |
| 771 | * It is assumed that program headers with PT_NOTE type pointed to by |
| 772 | * @ehdr_ptr has already been updated by update_note_header_size_elf32 |
| 773 | * and each of PT_NOTE program headers has actual ELF note segment |
| 774 | * size in its p_memsz member. |
| 775 | */ |
| 776 | static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr, |
| 777 | int *nr_ptnote, u64 *sz_ptnote) |
| 778 | { |
| 779 | int i; |
| 780 | Elf32_Phdr *phdr_ptr; |
| 781 | |
| 782 | *nr_ptnote = *sz_ptnote = 0; |
| 783 | |
| 784 | phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); |
| 785 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
| 786 | if (phdr_ptr->p_type != PT_NOTE) |
| 787 | continue; |
| 788 | *nr_ptnote += 1; |
| 789 | *sz_ptnote += phdr_ptr->p_memsz; |
| 790 | } |
| 791 | |
| 792 | return 0; |
| 793 | } |
| 794 | |
| 795 | /** |
| 796 | * copy_notes_elf32 - copy ELF note segments in a given buffer |
| 797 | * |
| 798 | * @ehdr_ptr: ELF header |
| 799 | * @notes_buf: buffer into which ELF note segments are copied |
| 800 | * |
| 801 | * This function is used to copy ELF note segment in the 1st kernel |
| 802 | * into the buffer @notes_buf in the 2nd kernel. It is assumed that |
| 803 | * size of the buffer @notes_buf is equal to or larger than sum of the |
| 804 | * real ELF note segment headers and data. |
| 805 | * |
| 806 | * It is assumed that program headers with PT_NOTE type pointed to by |
| 807 | * @ehdr_ptr has already been updated by update_note_header_size_elf32 |
| 808 | * and each of PT_NOTE program headers has actual ELF note segment |
| 809 | * size in its p_memsz member. |
| 810 | */ |
| 811 | static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf) |
| 812 | { |
| 813 | int i, rc=0; |
| 814 | Elf32_Phdr *phdr_ptr; |
| 815 | |
| 816 | phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1); |
| 817 | |
| 818 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
| 819 | u64 offset; |
| 820 | if (phdr_ptr->p_type != PT_NOTE) |
| 821 | continue; |
| 822 | offset = phdr_ptr->p_offset; |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 823 | rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, |
| 824 | &offset); |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 825 | if (rc < 0) |
| 826 | return rc; |
| 827 | notes_buf += phdr_ptr->p_memsz; |
| 828 | } |
| 829 | |
| 830 | return 0; |
| 831 | } |
| 832 | |
| 833 | /* Merges all the PT_NOTE headers into one. */ |
| 834 | static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, |
| 835 | char **notes_buf, size_t *notes_sz) |
| 836 | { |
| 837 | int i, nr_ptnote=0, rc=0; |
| 838 | char *tmp; |
| 839 | Elf32_Ehdr *ehdr_ptr; |
| 840 | Elf32_Phdr phdr; |
| 841 | u64 phdr_sz = 0, note_off; |
| 842 | |
| 843 | ehdr_ptr = (Elf32_Ehdr *)elfptr; |
| 844 | |
| 845 | rc = update_note_header_size_elf32(ehdr_ptr); |
| 846 | if (rc < 0) |
| 847 | return rc; |
| 848 | |
| 849 | rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz); |
| 850 | if (rc < 0) |
| 851 | return rc; |
| 852 | |
| 853 | *notes_sz = roundup(phdr_sz, PAGE_SIZE); |
HATAYAMA Daisuke | 8308697 | 2013-07-03 15:02:23 -0700 | [diff] [blame] | 854 | *notes_buf = alloc_elfnotes_buf(*notes_sz); |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 855 | if (!*notes_buf) |
| 856 | return -ENOMEM; |
| 857 | |
| 858 | rc = copy_notes_elf32(ehdr_ptr, *notes_buf); |
| 859 | if (rc < 0) |
| 860 | return rc; |
| 861 | |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 862 | /* Prepare merged PT_NOTE program header. */ |
| 863 | phdr.p_type = PT_NOTE; |
| 864 | phdr.p_flags = 0; |
| 865 | note_off = sizeof(Elf32_Ehdr) + |
| 866 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 867 | phdr.p_offset = roundup(note_off, PAGE_SIZE); |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 868 | phdr.p_vaddr = phdr.p_paddr = 0; |
| 869 | phdr.p_filesz = phdr.p_memsz = phdr_sz; |
| 870 | phdr.p_align = 0; |
| 871 | |
| 872 | /* Add merged PT_NOTE program header*/ |
| 873 | tmp = elfptr + sizeof(Elf32_Ehdr); |
| 874 | memcpy(tmp, &phdr, sizeof(phdr)); |
| 875 | tmp += sizeof(phdr); |
| 876 | |
| 877 | /* Remove unwanted PT_NOTE program headers. */ |
| 878 | i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); |
| 879 | *elfsz = *elfsz - i; |
| 880 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 881 | memset(elfptr + *elfsz, 0, i); |
| 882 | *elfsz = roundup(*elfsz, PAGE_SIZE); |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 883 | |
| 884 | /* Modify e_phnum to reflect merged headers. */ |
| 885 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; |
| 886 | |
| 887 | return 0; |
| 888 | } |
| 889 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 890 | /* Add memory chunks represented by program headers to vmcore list. Also update |
| 891 | * the new offset fields of exported program headers. */ |
| 892 | static int __init process_ptload_program_headers_elf64(char *elfptr, |
| 893 | size_t elfsz, |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 894 | size_t elfnotes_sz, |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 895 | struct list_head *vc_list) |
| 896 | { |
| 897 | int i; |
| 898 | Elf64_Ehdr *ehdr_ptr; |
| 899 | Elf64_Phdr *phdr_ptr; |
| 900 | loff_t vmcore_off; |
| 901 | struct vmcore *new; |
| 902 | |
| 903 | ehdr_ptr = (Elf64_Ehdr *)elfptr; |
| 904 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ |
| 905 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 906 | /* Skip Elf header, program headers and Elf note segment. */ |
| 907 | vmcore_off = elfsz + elfnotes_sz; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 908 | |
| 909 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
HATAYAMA Daisuke | 7f614cd | 2013-07-03 15:02:15 -0700 | [diff] [blame] | 910 | u64 paddr, start, end, size; |
| 911 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 912 | if (phdr_ptr->p_type != PT_LOAD) |
| 913 | continue; |
| 914 | |
HATAYAMA Daisuke | 7f614cd | 2013-07-03 15:02:15 -0700 | [diff] [blame] | 915 | paddr = phdr_ptr->p_offset; |
| 916 | start = rounddown(paddr, PAGE_SIZE); |
| 917 | end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); |
| 918 | size = end - start; |
| 919 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 920 | /* Add this contiguous chunk of memory to vmcore list.*/ |
| 921 | new = get_new_element(); |
| 922 | if (!new) |
| 923 | return -ENOMEM; |
HATAYAMA Daisuke | 7f614cd | 2013-07-03 15:02:15 -0700 | [diff] [blame] | 924 | new->paddr = start; |
| 925 | new->size = size; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 926 | list_add_tail(&new->list, vc_list); |
| 927 | |
| 928 | /* Update the program header offset. */ |
HATAYAMA Daisuke | 7f614cd | 2013-07-03 15:02:15 -0700 | [diff] [blame] | 929 | phdr_ptr->p_offset = vmcore_off + (paddr - start); |
| 930 | vmcore_off = vmcore_off + size; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 931 | } |
| 932 | return 0; |
| 933 | } |
| 934 | |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 935 | static int __init process_ptload_program_headers_elf32(char *elfptr, |
| 936 | size_t elfsz, |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 937 | size_t elfnotes_sz, |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 938 | struct list_head *vc_list) |
| 939 | { |
| 940 | int i; |
| 941 | Elf32_Ehdr *ehdr_ptr; |
| 942 | Elf32_Phdr *phdr_ptr; |
| 943 | loff_t vmcore_off; |
| 944 | struct vmcore *new; |
| 945 | |
| 946 | ehdr_ptr = (Elf32_Ehdr *)elfptr; |
| 947 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ |
| 948 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 949 | /* Skip Elf header, program headers and Elf note segment. */ |
| 950 | vmcore_off = elfsz + elfnotes_sz; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 951 | |
| 952 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
HATAYAMA Daisuke | 7f614cd | 2013-07-03 15:02:15 -0700 | [diff] [blame] | 953 | u64 paddr, start, end, size; |
| 954 | |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 955 | if (phdr_ptr->p_type != PT_LOAD) |
| 956 | continue; |
| 957 | |
HATAYAMA Daisuke | 7f614cd | 2013-07-03 15:02:15 -0700 | [diff] [blame] | 958 | paddr = phdr_ptr->p_offset; |
| 959 | start = rounddown(paddr, PAGE_SIZE); |
| 960 | end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); |
| 961 | size = end - start; |
| 962 | |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 963 | /* Add this contiguous chunk of memory to vmcore list.*/ |
| 964 | new = get_new_element(); |
| 965 | if (!new) |
| 966 | return -ENOMEM; |
HATAYAMA Daisuke | 7f614cd | 2013-07-03 15:02:15 -0700 | [diff] [blame] | 967 | new->paddr = start; |
| 968 | new->size = size; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 969 | list_add_tail(&new->list, vc_list); |
| 970 | |
| 971 | /* Update the program header offset */ |
HATAYAMA Daisuke | 7f614cd | 2013-07-03 15:02:15 -0700 | [diff] [blame] | 972 | phdr_ptr->p_offset = vmcore_off + (paddr - start); |
| 973 | vmcore_off = vmcore_off + size; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 974 | } |
| 975 | return 0; |
| 976 | } |
| 977 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 978 | /* Sets offset fields of vmcore elements. */ |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 979 | static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz, |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 980 | struct list_head *vc_list) |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 981 | { |
| 982 | loff_t vmcore_off; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 983 | struct vmcore *m; |
| 984 | |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 985 | /* Skip Elf header, program headers and Elf note segment. */ |
| 986 | vmcore_off = elfsz + elfnotes_sz; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 987 | |
| 988 | list_for_each_entry(m, vc_list, list) { |
| 989 | m->offset = vmcore_off; |
| 990 | vmcore_off += m->size; |
| 991 | } |
| 992 | } |
| 993 | |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 994 | static void free_elfcorebuf(void) |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 995 | { |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 996 | free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig)); |
| 997 | elfcorebuf = NULL; |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 998 | vfree(elfnotes_buf); |
| 999 | elfnotes_buf = NULL; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1000 | } |
| 1001 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1002 | static int __init parse_crash_elf64_headers(void) |
| 1003 | { |
| 1004 | int rc=0; |
| 1005 | Elf64_Ehdr ehdr; |
| 1006 | u64 addr; |
| 1007 | |
| 1008 | addr = elfcorehdr_addr; |
| 1009 | |
| 1010 | /* Read Elf header */ |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 1011 | rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1012 | if (rc < 0) |
| 1013 | return rc; |
| 1014 | |
| 1015 | /* Do some basic Verification. */ |
| 1016 | if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || |
| 1017 | (ehdr.e_type != ET_CORE) || |
Mika Westerberg | 9833c39 | 2010-11-19 09:29:24 +0100 | [diff] [blame] | 1018 | !vmcore_elf64_check_arch(&ehdr) || |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1019 | ehdr.e_ident[EI_CLASS] != ELFCLASS64 || |
| 1020 | ehdr.e_ident[EI_VERSION] != EV_CURRENT || |
| 1021 | ehdr.e_version != EV_CURRENT || |
| 1022 | ehdr.e_ehsize != sizeof(Elf64_Ehdr) || |
| 1023 | ehdr.e_phentsize != sizeof(Elf64_Phdr) || |
| 1024 | ehdr.e_phnum == 0) { |
Andrew Morton | 87ebdc0 | 2013-02-27 17:03:16 -0800 | [diff] [blame] | 1025 | pr_warn("Warning: Core image elf header is not sane\n"); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1026 | return -EINVAL; |
| 1027 | } |
| 1028 | |
| 1029 | /* Read in all elf headers. */ |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1030 | elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) + |
| 1031 | ehdr.e_phnum * sizeof(Elf64_Phdr); |
| 1032 | elfcorebuf_sz = elfcorebuf_sz_orig; |
| 1033 | elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
| 1034 | get_order(elfcorebuf_sz_orig)); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1035 | if (!elfcorebuf) |
| 1036 | return -ENOMEM; |
| 1037 | addr = elfcorehdr_addr; |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 1038 | rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1039 | if (rc < 0) |
| 1040 | goto fail; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1041 | |
| 1042 | /* Merge all PT_NOTE headers into one. */ |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 1043 | rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, |
| 1044 | &elfnotes_buf, &elfnotes_sz); |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1045 | if (rc) |
| 1046 | goto fail; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1047 | rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 1048 | elfnotes_sz, &vmcore_list); |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1049 | if (rc) |
| 1050 | goto fail; |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 1051 | set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1052 | return 0; |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1053 | fail: |
| 1054 | free_elfcorebuf(); |
| 1055 | return rc; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1056 | } |
| 1057 | |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1058 | static int __init parse_crash_elf32_headers(void) |
| 1059 | { |
| 1060 | int rc=0; |
| 1061 | Elf32_Ehdr ehdr; |
| 1062 | u64 addr; |
| 1063 | |
| 1064 | addr = elfcorehdr_addr; |
| 1065 | |
| 1066 | /* Read Elf header */ |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 1067 | rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr); |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1068 | if (rc < 0) |
| 1069 | return rc; |
| 1070 | |
| 1071 | /* Do some basic Verification. */ |
| 1072 | if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || |
| 1073 | (ehdr.e_type != ET_CORE) || |
Daniel Wagner | e55d531 | 2016-02-11 13:36:54 +0100 | [diff] [blame] | 1074 | !vmcore_elf32_check_arch(&ehdr) || |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1075 | ehdr.e_ident[EI_CLASS] != ELFCLASS32|| |
| 1076 | ehdr.e_ident[EI_VERSION] != EV_CURRENT || |
| 1077 | ehdr.e_version != EV_CURRENT || |
| 1078 | ehdr.e_ehsize != sizeof(Elf32_Ehdr) || |
| 1079 | ehdr.e_phentsize != sizeof(Elf32_Phdr) || |
| 1080 | ehdr.e_phnum == 0) { |
Andrew Morton | 87ebdc0 | 2013-02-27 17:03:16 -0800 | [diff] [blame] | 1081 | pr_warn("Warning: Core image elf header is not sane\n"); |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1082 | return -EINVAL; |
| 1083 | } |
| 1084 | |
| 1085 | /* Read in all elf headers. */ |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1086 | elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); |
| 1087 | elfcorebuf_sz = elfcorebuf_sz_orig; |
| 1088 | elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
| 1089 | get_order(elfcorebuf_sz_orig)); |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1090 | if (!elfcorebuf) |
| 1091 | return -ENOMEM; |
| 1092 | addr = elfcorehdr_addr; |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 1093 | rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1094 | if (rc < 0) |
| 1095 | goto fail; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1096 | |
| 1097 | /* Merge all PT_NOTE headers into one. */ |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 1098 | rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, |
| 1099 | &elfnotes_buf, &elfnotes_sz); |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1100 | if (rc) |
| 1101 | goto fail; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1102 | rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 1103 | elfnotes_sz, &vmcore_list); |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1104 | if (rc) |
| 1105 | goto fail; |
HATAYAMA Daisuke | 087350c | 2013-07-03 15:02:19 -0700 | [diff] [blame] | 1106 | set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1107 | return 0; |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1108 | fail: |
| 1109 | free_elfcorebuf(); |
| 1110 | return rc; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1111 | } |
| 1112 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1113 | static int __init parse_crash_elf_headers(void) |
| 1114 | { |
| 1115 | unsigned char e_ident[EI_NIDENT]; |
| 1116 | u64 addr; |
| 1117 | int rc=0; |
| 1118 | |
| 1119 | addr = elfcorehdr_addr; |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 1120 | rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1121 | if (rc < 0) |
| 1122 | return rc; |
| 1123 | if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { |
Andrew Morton | 87ebdc0 | 2013-02-27 17:03:16 -0800 | [diff] [blame] | 1124 | pr_warn("Warning: Core image elf header not found\n"); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1125 | return -EINVAL; |
| 1126 | } |
| 1127 | |
| 1128 | if (e_ident[EI_CLASS] == ELFCLASS64) { |
| 1129 | rc = parse_crash_elf64_headers(); |
| 1130 | if (rc) |
| 1131 | return rc; |
Vivek Goyal | 72658e9 | 2005-06-25 14:58:22 -0700 | [diff] [blame] | 1132 | } else if (e_ident[EI_CLASS] == ELFCLASS32) { |
| 1133 | rc = parse_crash_elf32_headers(); |
| 1134 | if (rc) |
| 1135 | return rc; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1136 | } else { |
Andrew Morton | 87ebdc0 | 2013-02-27 17:03:16 -0800 | [diff] [blame] | 1137 | pr_warn("Warning: Core image elf header is not sane\n"); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1138 | return -EINVAL; |
| 1139 | } |
HATAYAMA Daisuke | 591ff71 | 2013-07-03 15:02:22 -0700 | [diff] [blame] | 1140 | |
| 1141 | /* Determine vmcore size. */ |
| 1142 | vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, |
| 1143 | &vmcore_list); |
| 1144 | |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1145 | return 0; |
| 1146 | } |
| 1147 | |
| 1148 | /* Init function for vmcore module. */ |
| 1149 | static int __init vmcore_init(void) |
| 1150 | { |
| 1151 | int rc = 0; |
| 1152 | |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 1153 | /* Allow architectures to allocate ELF header in 2nd kernel */ |
| 1154 | rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size); |
| 1155 | if (rc) |
| 1156 | return rc; |
| 1157 | /* |
| 1158 | * If elfcorehdr= has been passed in cmdline or created in 2nd kernel, |
| 1159 | * then capture the dump. |
| 1160 | */ |
Simon Horman | 85a0ee3 | 2008-10-18 20:28:29 -0700 | [diff] [blame] | 1161 | if (!(is_vmcore_usable())) |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1162 | return rc; |
| 1163 | rc = parse_crash_elf_headers(); |
| 1164 | if (rc) { |
Andrew Morton | 87ebdc0 | 2013-02-27 17:03:16 -0800 | [diff] [blame] | 1165 | pr_warn("Kdump: vmcore not initialized\n"); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1166 | return rc; |
| 1167 | } |
Michael Holzheu | be8a8d0 | 2013-09-11 14:24:49 -0700 | [diff] [blame] | 1168 | elfcorehdr_free(elfcorehdr_addr); |
| 1169 | elfcorehdr_addr = ELFCORE_ADDR_ERR; |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1170 | |
Alexey Dobriyan | 5aa140c | 2008-10-06 14:36:31 +0400 | [diff] [blame] | 1171 | proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); |
Vivek Goyal | 666bfdd | 2005-06-25 14:58:21 -0700 | [diff] [blame] | 1172 | if (proc_vmcore) |
| 1173 | proc_vmcore->size = vmcore_size; |
| 1174 | return 0; |
| 1175 | } |
Paul Gortmaker | abaf378 | 2014-01-23 15:55:45 -0800 | [diff] [blame] | 1176 | fs_initcall(vmcore_init); |
Mahesh Salgaonkar | 16257393 | 2012-02-16 01:15:00 +0000 | [diff] [blame] | 1177 | |
| 1178 | /* Cleanup function for vmcore module. */ |
| 1179 | void vmcore_cleanup(void) |
| 1180 | { |
| 1181 | struct list_head *pos, *next; |
| 1182 | |
| 1183 | if (proc_vmcore) { |
David Howells | a8ca16e | 2013-04-12 17:27:28 +0100 | [diff] [blame] | 1184 | proc_remove(proc_vmcore); |
Mahesh Salgaonkar | 16257393 | 2012-02-16 01:15:00 +0000 | [diff] [blame] | 1185 | proc_vmcore = NULL; |
| 1186 | } |
| 1187 | |
| 1188 | /* clear the vmcore list. */ |
| 1189 | list_for_each_safe(pos, next, &vmcore_list) { |
| 1190 | struct vmcore *m; |
| 1191 | |
| 1192 | m = list_entry(pos, struct vmcore, list); |
| 1193 | list_del(&m->list); |
| 1194 | kfree(m); |
| 1195 | } |
HATAYAMA Daisuke | f2bdacd | 2013-07-03 15:02:14 -0700 | [diff] [blame] | 1196 | free_elfcorebuf(); |
Mahesh Salgaonkar | 16257393 | 2012-02-16 01:15:00 +0000 | [diff] [blame] | 1197 | } |