blob: e4c1bb85d9e8f4ef766c7bafd72969f80ad4a717 [file] [log] [blame]
Satya Durga Srinivasu Prabhala31c96172017-02-14 15:28:51 -08001/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
Kyle Yane45fa022016-08-29 11:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/workqueue.h>
15#include <linux/io.h>
16#include <linux/jiffies.h>
17#include <linux/sched.h>
18#include <linux/module.h>
Kyle Yane45fa022016-08-29 11:40:26 -070019#include <linux/fs.h>
20#include <linux/mm.h>
21#include <linux/slab.h>
22#include <linux/poll.h>
23#include <linux/uaccess.h>
24#include <linux/elf.h>
25#include <linux/wait.h>
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070026#include <linux/cdev.h>
Kyle Yane45fa022016-08-29 11:40:26 -070027#include <soc/qcom/ramdump.h>
28#include <linux/dma-mapping.h>
29#include <linux/of.h>
30
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070031
32#define RAMDUMP_NUM_DEVICES 256
33#define RAMDUMP_NAME "ramdump"
34
35static struct class *ramdump_class;
36static dev_t ramdump_dev;
37static DEFINE_MUTEX(rd_minor_mutex);
38static DEFINE_IDA(rd_minor_id);
39static bool ramdump_devnode_inited;
Kyle Yane45fa022016-08-29 11:40:26 -070040#define RAMDUMP_WAIT_MSECS 120000
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +053041#define MAX_STRTBL_SIZE 512
42#define MAX_NAME_LENGTH 16
Kyle Yane45fa022016-08-29 11:40:26 -070043
44struct ramdump_device {
45 char name[256];
46
47 unsigned int data_ready;
48 unsigned int consumer_present;
49 int ramdump_status;
50
51 struct completion ramdump_complete;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070052 struct cdev cdev;
53 struct device *dev;
Kyle Yane45fa022016-08-29 11:40:26 -070054
55 wait_queue_head_t dump_wait_q;
56 int nsegments;
57 struct ramdump_segment *segments;
58 size_t elfcore_size;
59 char *elfcore_buf;
60 unsigned long attrs;
61 bool complete_ramdump;
62};
63
64static int ramdump_open(struct inode *inode, struct file *filep)
65{
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070066 struct ramdump_device *rd_dev = container_of(inode->i_cdev,
67 struct ramdump_device, cdev);
Kyle Yane45fa022016-08-29 11:40:26 -070068 rd_dev->consumer_present = 1;
69 rd_dev->ramdump_status = 0;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070070 filep->private_data = rd_dev;
Kyle Yane45fa022016-08-29 11:40:26 -070071 return 0;
72}
73
74static int ramdump_release(struct inode *inode, struct file *filep)
75{
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070076
77 struct ramdump_device *rd_dev = container_of(inode->i_cdev,
78 struct ramdump_device, cdev);
Kyle Yane45fa022016-08-29 11:40:26 -070079 rd_dev->consumer_present = 0;
80 rd_dev->data_ready = 0;
81 complete(&rd_dev->ramdump_complete);
82 return 0;
83}
84
85static unsigned long offset_translate(loff_t user_offset,
86 struct ramdump_device *rd_dev, unsigned long *data_left,
87 void **vaddr)
88{
89 int i = 0;
90 *vaddr = NULL;
91
92 for (i = 0; i < rd_dev->nsegments; i++)
93 if (user_offset >= rd_dev->segments[i].size)
94 user_offset -= rd_dev->segments[i].size;
95 else
96 break;
97
98 if (i == rd_dev->nsegments) {
99 pr_debug("Ramdump(%s): offset_translate returning zero\n",
100 rd_dev->name);
101 *data_left = 0;
102 return 0;
103 }
104
105 *data_left = rd_dev->segments[i].size - user_offset;
106
107 pr_debug("Ramdump(%s): Returning address: %llx, data_left = %ld\n",
108 rd_dev->name, rd_dev->segments[i].address + user_offset,
109 *data_left);
110
111 if (rd_dev->segments[i].v_address)
112 *vaddr = rd_dev->segments[i].v_address + user_offset;
113
114 return rd_dev->segments[i].address + user_offset;
115}
116
117#define MAX_IOREMAP_SIZE SZ_1M
118
119static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
120 loff_t *pos)
121{
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700122 struct ramdump_device *rd_dev = filep->private_data;
Kyle Yane45fa022016-08-29 11:40:26 -0700123 void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
124 unsigned long data_left = 0, bytes_before, bytes_after;
125 unsigned long addr = 0;
126 size_t copy_size = 0, alignsize;
127 unsigned char *alignbuf = NULL, *finalbuf = NULL;
128 int ret = 0;
129 loff_t orig_pos = *pos;
130
131 if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
132 return -EAGAIN;
133
134 ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
135 if (ret)
136 return ret;
137
138 if (*pos < rd_dev->elfcore_size) {
139 copy_size = rd_dev->elfcore_size - *pos;
140 copy_size = min(copy_size, count);
141
142 if (copy_to_user(buf, rd_dev->elfcore_buf + *pos, copy_size)) {
143 ret = -EFAULT;
144 goto ramdump_done;
145 }
146 *pos += copy_size;
147 count -= copy_size;
148 buf += copy_size;
149 if (count == 0)
150 return copy_size;
151 }
152
153 addr = offset_translate(*pos - rd_dev->elfcore_size, rd_dev,
154 &data_left, &vaddr);
155
156 /* EOF check */
157 if (data_left == 0) {
158 pr_debug("Ramdump(%s): Ramdump complete. %lld bytes read.",
159 rd_dev->name, *pos);
160 rd_dev->ramdump_status = 0;
161 ret = 0;
162 goto ramdump_done;
163 }
164
165 copy_size = min_t(size_t, count, (size_t)MAX_IOREMAP_SIZE);
166 copy_size = min_t(unsigned long, (unsigned long)copy_size, data_left);
167
168 rd_dev->attrs = 0;
169 rd_dev->attrs |= DMA_ATTR_SKIP_ZEROING;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700170 device_mem = vaddr ?: dma_remap(rd_dev->dev->parent, NULL, addr,
Kyle Yane45fa022016-08-29 11:40:26 -0700171 copy_size, rd_dev->attrs);
172 origdevice_mem = device_mem;
173
174 if (device_mem == NULL) {
175 pr_err("Ramdump(%s): Unable to ioremap: addr %lx, size %zd\n",
176 rd_dev->name, addr, copy_size);
177 rd_dev->ramdump_status = -1;
178 ret = -ENOMEM;
179 goto ramdump_done;
180 }
181
182 alignbuf = kzalloc(copy_size, GFP_KERNEL);
183 if (!alignbuf) {
184 pr_err("Ramdump(%s): Unable to alloc mem for aligned buf\n",
185 rd_dev->name);
186 rd_dev->ramdump_status = -1;
187 ret = -ENOMEM;
188 goto ramdump_done;
189 }
190
191 finalbuf = alignbuf;
192 alignsize = copy_size;
193
194 if ((unsigned long)device_mem & 0x7) {
195 bytes_before = 8 - ((unsigned long)device_mem & 0x7);
196 memcpy_fromio(alignbuf, device_mem, bytes_before);
197 device_mem += bytes_before;
198 alignbuf += bytes_before;
199 alignsize -= bytes_before;
200 }
201
202 if (alignsize & 0x7) {
203 bytes_after = alignsize & 0x7;
204 memcpy(alignbuf, device_mem, alignsize - bytes_after);
205 device_mem += alignsize - bytes_after;
206 alignbuf += (alignsize - bytes_after);
207 alignsize = bytes_after;
208 memcpy_fromio(alignbuf, device_mem, alignsize);
209 } else
210 memcpy(alignbuf, device_mem, alignsize);
211
212 if (copy_to_user(buf, finalbuf, copy_size)) {
213 pr_err("Ramdump(%s): Couldn't copy all data to user.",
214 rd_dev->name);
215 rd_dev->ramdump_status = -1;
216 ret = -EFAULT;
217 goto ramdump_done;
218 }
219
220 kfree(finalbuf);
221 if (!vaddr && origdevice_mem)
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700222 dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
Kyle Yane45fa022016-08-29 11:40:26 -0700223
224 *pos += copy_size;
225
226 pr_debug("Ramdump(%s): Read %zd bytes from address %lx.",
227 rd_dev->name, copy_size, addr);
228
229 return *pos - orig_pos;
230
231ramdump_done:
232 if (!vaddr && origdevice_mem)
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700233 dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
Kyle Yane45fa022016-08-29 11:40:26 -0700234
235 kfree(finalbuf);
236 rd_dev->data_ready = 0;
237 *pos = 0;
238 complete(&rd_dev->ramdump_complete);
239 return ret;
240}
241
242static unsigned int ramdump_poll(struct file *filep,
243 struct poll_table_struct *wait)
244{
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700245 struct ramdump_device *rd_dev = filep->private_data;
Kyle Yane45fa022016-08-29 11:40:26 -0700246 unsigned int mask = 0;
247
248 if (rd_dev->data_ready)
249 mask |= (POLLIN | POLLRDNORM);
250
251 poll_wait(filep, &rd_dev->dump_wait_q, wait);
252 return mask;
253}
254
255static const struct file_operations ramdump_file_ops = {
256 .open = ramdump_open,
257 .release = ramdump_release,
258 .read = ramdump_read,
259 .poll = ramdump_poll
260};
261
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700262static int ramdump_devnode_init(void)
Kyle Yane45fa022016-08-29 11:40:26 -0700263{
264 int ret;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700265
266 ramdump_class = class_create(THIS_MODULE, RAMDUMP_NAME);
267 ret = alloc_chrdev_region(&ramdump_dev, 0, RAMDUMP_NUM_DEVICES,
268 RAMDUMP_NAME);
269 if (ret < 0) {
270 pr_warn("%s: unable to allocate major\n", __func__);
271 return ret;
272 }
273
274 ramdump_devnode_inited = true;
275
276 return 0;
277}
278
279void *create_ramdump_device(const char *dev_name, struct device *parent)
280{
281 int ret, minor;
Kyle Yane45fa022016-08-29 11:40:26 -0700282 struct ramdump_device *rd_dev;
283
284 if (!dev_name) {
285 pr_err("%s: Invalid device name.\n", __func__);
286 return NULL;
287 }
288
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700289 mutex_lock(&rd_minor_mutex);
290 if (!ramdump_devnode_inited) {
291 ret = ramdump_devnode_init();
292 if (ret)
293 return ERR_PTR(ret);
294 }
295 mutex_unlock(&rd_minor_mutex);
296
Kyle Yane45fa022016-08-29 11:40:26 -0700297 rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL);
298
299 if (!rd_dev) {
300 pr_err("%s: Couldn't alloc space for ramdump device!",
301 __func__);
302 return NULL;
303 }
304
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700305 /* get a minor number */
306 minor = ida_simple_get(&rd_minor_id, 0, RAMDUMP_NUM_DEVICES,
307 GFP_KERNEL);
308 if (minor < 0) {
309 pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
310 minor);
311 ret = -ENODEV;
312 goto fail_out_of_minors;
313 }
314
Kyle Yane45fa022016-08-29 11:40:26 -0700315 snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "ramdump_%s",
316 dev_name);
317
318 init_completion(&rd_dev->ramdump_complete);
Kyle Yane45fa022016-08-29 11:40:26 -0700319 if (parent) {
320 rd_dev->complete_ramdump = of_property_read_bool(
321 parent->of_node, "qcom,complete-ramdump");
322 if (!rd_dev->complete_ramdump)
323 dev_info(parent,
324 "for %s segments only will be dumped.", dev_name);
325 }
326
327 init_waitqueue_head(&rd_dev->dump_wait_q);
328
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700329 rd_dev->dev = device_create(ramdump_class, parent,
330 MKDEV(MAJOR(ramdump_dev), minor),
331 rd_dev, rd_dev->name);
332 if (IS_ERR(rd_dev->dev)) {
333 ret = PTR_ERR(rd_dev->dev);
334 pr_err("%s: device_create failed for %s (%d)", __func__,
Kyle Yane45fa022016-08-29 11:40:26 -0700335 dev_name, ret);
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700336 goto fail_return_minor;
337 }
338
339 cdev_init(&rd_dev->cdev, &ramdump_file_ops);
340
341 ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1);
342 if (ret < 0) {
343 pr_err("%s: cdev_add failed for %s (%d)", __func__,
344 dev_name, ret);
345 goto fail_cdev_add;
Kyle Yane45fa022016-08-29 11:40:26 -0700346 }
347
348 return (void *)rd_dev;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700349
350fail_cdev_add:
351 device_unregister(rd_dev->dev);
352fail_return_minor:
353 ida_simple_remove(&rd_minor_id, minor);
354fail_out_of_minors:
355 kfree(rd_dev);
356 return ERR_PTR(ret);
Kyle Yane45fa022016-08-29 11:40:26 -0700357}
358EXPORT_SYMBOL(create_ramdump_device);
359
360void destroy_ramdump_device(void *dev)
361{
362 struct ramdump_device *rd_dev = dev;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700363 int minor = MINOR(rd_dev->cdev.dev);
Kyle Yane45fa022016-08-29 11:40:26 -0700364
365 if (IS_ERR_OR_NULL(rd_dev))
366 return;
367
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700368 cdev_del(&rd_dev->cdev);
369 device_unregister(rd_dev->dev);
370 ida_simple_remove(&rd_minor_id, minor);
Kyle Yane45fa022016-08-29 11:40:26 -0700371 kfree(rd_dev);
372}
373EXPORT_SYMBOL(destroy_ramdump_device);
374
375static int _do_ramdump(void *handle, struct ramdump_segment *segments,
376 int nsegments, bool use_elf)
377{
378 int ret, i;
379 struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
380 Elf32_Phdr *phdr;
381 Elf32_Ehdr *ehdr;
382 unsigned long offset;
383
384 if (!rd_dev->consumer_present) {
385 pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
386 return -EPIPE;
387 }
388
389 if (rd_dev->complete_ramdump) {
390 for (i = 0; i < nsegments-1; i++)
391 segments[i].size =
Satya Durga Srinivasu Prabhala31c96172017-02-14 15:28:51 -0800392 segments[i + 1].address - segments[i].address;
Kyle Yane45fa022016-08-29 11:40:26 -0700393 }
394
395 rd_dev->segments = segments;
396 rd_dev->nsegments = nsegments;
397
398 if (use_elf) {
399 rd_dev->elfcore_size = sizeof(*ehdr) +
400 sizeof(*phdr) * nsegments;
401 ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
402 rd_dev->elfcore_buf = (char *)ehdr;
403 if (!rd_dev->elfcore_buf)
404 return -ENOMEM;
405
406 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
407 ehdr->e_ident[EI_CLASS] = ELFCLASS32;
408 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
409 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
410 ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
411 ehdr->e_type = ET_CORE;
412 ehdr->e_version = EV_CURRENT;
413 ehdr->e_phoff = sizeof(*ehdr);
414 ehdr->e_ehsize = sizeof(*ehdr);
415 ehdr->e_phentsize = sizeof(*phdr);
416 ehdr->e_phnum = nsegments;
417
418 offset = rd_dev->elfcore_size;
419 phdr = (Elf32_Phdr *)(ehdr + 1);
420 for (i = 0; i < nsegments; i++, phdr++) {
421 phdr->p_type = PT_LOAD;
422 phdr->p_offset = offset;
423 phdr->p_vaddr = phdr->p_paddr = segments[i].address;
424 phdr->p_filesz = phdr->p_memsz = segments[i].size;
425 phdr->p_flags = PF_R | PF_W | PF_X;
426 offset += phdr->p_filesz;
427 }
428 }
429
430 rd_dev->data_ready = 1;
431 rd_dev->ramdump_status = -1;
432
433 reinit_completion(&rd_dev->ramdump_complete);
434
435 /* Tell userspace that the data is ready */
436 wake_up(&rd_dev->dump_wait_q);
437
438 /* Wait (with a timeout) to let the ramdump complete */
439 ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
440 msecs_to_jiffies(RAMDUMP_WAIT_MSECS));
441
442 if (!ret) {
443 pr_err("Ramdump(%s): Timed out waiting for userspace.\n",
444 rd_dev->name);
445 ret = -EPIPE;
446 } else
447 ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
448
449 rd_dev->data_ready = 0;
450 rd_dev->elfcore_size = 0;
451 kfree(rd_dev->elfcore_buf);
452 rd_dev->elfcore_buf = NULL;
453 return ret;
454
455}
456
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530457static inline struct elf_shdr *elf_sheader(struct elfhdr *hdr)
458{
459 return (struct elf_shdr *)((size_t)hdr + (size_t)hdr->e_shoff);
460}
461
462static inline struct elf_shdr *elf_section(struct elfhdr *hdr, int idx)
463{
464 return &elf_sheader(hdr)[idx];
465}
466
467static inline char *elf_str_table(struct elfhdr *hdr)
468{
469 if (hdr->e_shstrndx == SHN_UNDEF)
470 return NULL;
471 return (char *)hdr + elf_section(hdr, hdr->e_shstrndx)->sh_offset;
472}
473
474static inline unsigned int set_section_name(const char *name,
475 struct elfhdr *ehdr)
476{
477 char *strtab = elf_str_table(ehdr);
478 static int strtable_idx = 1;
479 int idx, ret = 0;
480
481 idx = strtable_idx;
482 if ((strtab == NULL) || (name == NULL))
483 return 0;
484
485 ret = idx;
486 idx += strlcpy((strtab + idx), name, MAX_NAME_LENGTH);
487 strtable_idx = idx + 1;
488
489 return ret;
490}
491
492static int _do_minidump(void *handle, struct ramdump_segment *segments,
493 int nsegments)
494{
495 int ret, i;
496 struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
497 struct elfhdr *ehdr;
498 struct elf_shdr *shdr;
499 unsigned long offset, strtbl_off;
500
501 if (!rd_dev->consumer_present) {
502 pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
503 return -EPIPE;
504 }
505
506 rd_dev->segments = segments;
507 rd_dev->nsegments = nsegments;
508
509 rd_dev->elfcore_size = sizeof(*ehdr) +
510 (sizeof(*shdr) * (nsegments + 2)) + MAX_STRTBL_SIZE;
511 ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
512 rd_dev->elfcore_buf = (char *)ehdr;
513 if (!rd_dev->elfcore_buf)
514 return -ENOMEM;
515
516 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
517 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
518 ehdr->e_ident[EI_DATA] = ELF_DATA;
519 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
520 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
521 ehdr->e_type = ET_CORE;
522 ehdr->e_machine = ELF_ARCH;
523 ehdr->e_version = EV_CURRENT;
524 ehdr->e_ehsize = sizeof(*ehdr);
525 ehdr->e_shoff = sizeof(*ehdr);
526 ehdr->e_shentsize = sizeof(*shdr);
527 ehdr->e_shstrndx = 1;
528
529
530 offset = rd_dev->elfcore_size;
531 shdr = (struct elf_shdr *)(ehdr + 1);
532 strtbl_off = sizeof(*ehdr) + sizeof(*shdr) * (nsegments + 2);
533 shdr++;
534 shdr->sh_type = SHT_STRTAB;
535 shdr->sh_offset = (elf_addr_t)strtbl_off;
536 shdr->sh_size = MAX_STRTBL_SIZE;
537 shdr->sh_entsize = 0;
538 shdr->sh_flags = 0;
539 shdr->sh_name = set_section_name("STR_TBL", ehdr);
540 shdr++;
541
542 for (i = 0; i < nsegments; i++, shdr++) {
543 /* Update elf header */
544 shdr->sh_type = SHT_PROGBITS;
545 shdr->sh_name = set_section_name(segments[i].name, ehdr);
546 shdr->sh_addr = (elf_addr_t)segments[i].address;
547 shdr->sh_size = segments[i].size;
548 shdr->sh_flags = SHF_WRITE;
549 shdr->sh_offset = offset;
550 shdr->sh_entsize = 0;
551 offset += shdr->sh_size;
552 }
553 ehdr->e_shnum = nsegments + 2;
554
555 rd_dev->data_ready = 1;
556 rd_dev->ramdump_status = -1;
557
558 reinit_completion(&rd_dev->ramdump_complete);
559
560 /* Tell userspace that the data is ready */
561 wake_up(&rd_dev->dump_wait_q);
562
563 /* Wait (with a timeout) to let the ramdump complete */
564 ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
565 msecs_to_jiffies(RAMDUMP_WAIT_MSECS));
566
567 if (!ret) {
568 pr_err("Ramdump(%s): Timed out waiting for userspace.\n",
569 rd_dev->name);
570 ret = -EPIPE;
571 } else {
572 ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
573 }
574
575 rd_dev->data_ready = 0;
576 rd_dev->elfcore_size = 0;
577 kfree(rd_dev->elfcore_buf);
578 rd_dev->elfcore_buf = NULL;
579 return ret;
580}
581
Kyle Yane45fa022016-08-29 11:40:26 -0700582int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
583{
584 return _do_ramdump(handle, segments, nsegments, false);
585}
586EXPORT_SYMBOL(do_ramdump);
587
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530588int do_minidump(void *handle, struct ramdump_segment *segments, int nsegments)
589{
590 return _do_minidump(handle, segments, nsegments);
591}
592EXPORT_SYMBOL(do_minidump);
593
Kyle Yane45fa022016-08-29 11:40:26 -0700594int
595do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
596{
597 return _do_ramdump(handle, segments, nsegments, true);
598}
599EXPORT_SYMBOL(do_elf_ramdump);