blob: dd770628a9ceb003f1b27965f4066b73d8d2c907 [file] [log] [blame]
Satya Durga Srinivasu Prabhala31c96172017-02-14 15:28:51 -08001/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
Kyle Yane45fa022016-08-29 11:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/workqueue.h>
15#include <linux/io.h>
16#include <linux/jiffies.h>
17#include <linux/sched.h>
18#include <linux/module.h>
Kyle Yane45fa022016-08-29 11:40:26 -070019#include <linux/fs.h>
20#include <linux/mm.h>
21#include <linux/slab.h>
22#include <linux/poll.h>
23#include <linux/uaccess.h>
24#include <linux/elf.h>
25#include <linux/wait.h>
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070026#include <linux/cdev.h>
Kyle Yane45fa022016-08-29 11:40:26 -070027#include <soc/qcom/ramdump.h>
28#include <linux/dma-mapping.h>
29#include <linux/of.h>
30
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070031
32#define RAMDUMP_NUM_DEVICES 256
33#define RAMDUMP_NAME "ramdump"
34
35static struct class *ramdump_class;
36static dev_t ramdump_dev;
37static DEFINE_MUTEX(rd_minor_mutex);
38static DEFINE_IDA(rd_minor_id);
39static bool ramdump_devnode_inited;
Kyle Yane45fa022016-08-29 11:40:26 -070040#define RAMDUMP_WAIT_MSECS 120000
41
42struct ramdump_device {
43 char name[256];
44
45 unsigned int data_ready;
46 unsigned int consumer_present;
47 int ramdump_status;
48
49 struct completion ramdump_complete;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070050 struct cdev cdev;
51 struct device *dev;
Kyle Yane45fa022016-08-29 11:40:26 -070052
53 wait_queue_head_t dump_wait_q;
54 int nsegments;
55 struct ramdump_segment *segments;
56 size_t elfcore_size;
57 char *elfcore_buf;
58 unsigned long attrs;
59 bool complete_ramdump;
60};
61
62static int ramdump_open(struct inode *inode, struct file *filep)
63{
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070064 struct ramdump_device *rd_dev = container_of(inode->i_cdev,
65 struct ramdump_device, cdev);
Kyle Yane45fa022016-08-29 11:40:26 -070066 rd_dev->consumer_present = 1;
67 rd_dev->ramdump_status = 0;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070068 filep->private_data = rd_dev;
Kyle Yane45fa022016-08-29 11:40:26 -070069 return 0;
70}
71
72static int ramdump_release(struct inode *inode, struct file *filep)
73{
Vikram Mulukutla1b742de2017-06-19 11:31:25 -070074
75 struct ramdump_device *rd_dev = container_of(inode->i_cdev,
76 struct ramdump_device, cdev);
Kyle Yane45fa022016-08-29 11:40:26 -070077 rd_dev->consumer_present = 0;
78 rd_dev->data_ready = 0;
79 complete(&rd_dev->ramdump_complete);
80 return 0;
81}
82
83static unsigned long offset_translate(loff_t user_offset,
84 struct ramdump_device *rd_dev, unsigned long *data_left,
85 void **vaddr)
86{
87 int i = 0;
88 *vaddr = NULL;
89
90 for (i = 0; i < rd_dev->nsegments; i++)
91 if (user_offset >= rd_dev->segments[i].size)
92 user_offset -= rd_dev->segments[i].size;
93 else
94 break;
95
96 if (i == rd_dev->nsegments) {
97 pr_debug("Ramdump(%s): offset_translate returning zero\n",
98 rd_dev->name);
99 *data_left = 0;
100 return 0;
101 }
102
103 *data_left = rd_dev->segments[i].size - user_offset;
104
105 pr_debug("Ramdump(%s): Returning address: %llx, data_left = %ld\n",
106 rd_dev->name, rd_dev->segments[i].address + user_offset,
107 *data_left);
108
109 if (rd_dev->segments[i].v_address)
110 *vaddr = rd_dev->segments[i].v_address + user_offset;
111
112 return rd_dev->segments[i].address + user_offset;
113}
114
115#define MAX_IOREMAP_SIZE SZ_1M
116
117static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
118 loff_t *pos)
119{
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700120 struct ramdump_device *rd_dev = filep->private_data;
Kyle Yane45fa022016-08-29 11:40:26 -0700121 void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
122 unsigned long data_left = 0, bytes_before, bytes_after;
123 unsigned long addr = 0;
124 size_t copy_size = 0, alignsize;
125 unsigned char *alignbuf = NULL, *finalbuf = NULL;
126 int ret = 0;
127 loff_t orig_pos = *pos;
128
129 if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
130 return -EAGAIN;
131
132 ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
133 if (ret)
134 return ret;
135
136 if (*pos < rd_dev->elfcore_size) {
137 copy_size = rd_dev->elfcore_size - *pos;
138 copy_size = min(copy_size, count);
139
140 if (copy_to_user(buf, rd_dev->elfcore_buf + *pos, copy_size)) {
141 ret = -EFAULT;
142 goto ramdump_done;
143 }
144 *pos += copy_size;
145 count -= copy_size;
146 buf += copy_size;
147 if (count == 0)
148 return copy_size;
149 }
150
151 addr = offset_translate(*pos - rd_dev->elfcore_size, rd_dev,
152 &data_left, &vaddr);
153
154 /* EOF check */
155 if (data_left == 0) {
156 pr_debug("Ramdump(%s): Ramdump complete. %lld bytes read.",
157 rd_dev->name, *pos);
158 rd_dev->ramdump_status = 0;
159 ret = 0;
160 goto ramdump_done;
161 }
162
163 copy_size = min_t(size_t, count, (size_t)MAX_IOREMAP_SIZE);
164 copy_size = min_t(unsigned long, (unsigned long)copy_size, data_left);
165
166 rd_dev->attrs = 0;
167 rd_dev->attrs |= DMA_ATTR_SKIP_ZEROING;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700168 device_mem = vaddr ?: dma_remap(rd_dev->dev->parent, NULL, addr,
Kyle Yane45fa022016-08-29 11:40:26 -0700169 copy_size, rd_dev->attrs);
170 origdevice_mem = device_mem;
171
172 if (device_mem == NULL) {
173 pr_err("Ramdump(%s): Unable to ioremap: addr %lx, size %zd\n",
174 rd_dev->name, addr, copy_size);
175 rd_dev->ramdump_status = -1;
176 ret = -ENOMEM;
177 goto ramdump_done;
178 }
179
180 alignbuf = kzalloc(copy_size, GFP_KERNEL);
181 if (!alignbuf) {
182 pr_err("Ramdump(%s): Unable to alloc mem for aligned buf\n",
183 rd_dev->name);
184 rd_dev->ramdump_status = -1;
185 ret = -ENOMEM;
186 goto ramdump_done;
187 }
188
189 finalbuf = alignbuf;
190 alignsize = copy_size;
191
192 if ((unsigned long)device_mem & 0x7) {
193 bytes_before = 8 - ((unsigned long)device_mem & 0x7);
194 memcpy_fromio(alignbuf, device_mem, bytes_before);
195 device_mem += bytes_before;
196 alignbuf += bytes_before;
197 alignsize -= bytes_before;
198 }
199
200 if (alignsize & 0x7) {
201 bytes_after = alignsize & 0x7;
202 memcpy(alignbuf, device_mem, alignsize - bytes_after);
203 device_mem += alignsize - bytes_after;
204 alignbuf += (alignsize - bytes_after);
205 alignsize = bytes_after;
206 memcpy_fromio(alignbuf, device_mem, alignsize);
207 } else
208 memcpy(alignbuf, device_mem, alignsize);
209
210 if (copy_to_user(buf, finalbuf, copy_size)) {
211 pr_err("Ramdump(%s): Couldn't copy all data to user.",
212 rd_dev->name);
213 rd_dev->ramdump_status = -1;
214 ret = -EFAULT;
215 goto ramdump_done;
216 }
217
218 kfree(finalbuf);
219 if (!vaddr && origdevice_mem)
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700220 dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
Kyle Yane45fa022016-08-29 11:40:26 -0700221
222 *pos += copy_size;
223
224 pr_debug("Ramdump(%s): Read %zd bytes from address %lx.",
225 rd_dev->name, copy_size, addr);
226
227 return *pos - orig_pos;
228
229ramdump_done:
230 if (!vaddr && origdevice_mem)
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700231 dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
Kyle Yane45fa022016-08-29 11:40:26 -0700232
233 kfree(finalbuf);
234 rd_dev->data_ready = 0;
235 *pos = 0;
236 complete(&rd_dev->ramdump_complete);
237 return ret;
238}
239
240static unsigned int ramdump_poll(struct file *filep,
241 struct poll_table_struct *wait)
242{
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700243 struct ramdump_device *rd_dev = filep->private_data;
Kyle Yane45fa022016-08-29 11:40:26 -0700244 unsigned int mask = 0;
245
246 if (rd_dev->data_ready)
247 mask |= (POLLIN | POLLRDNORM);
248
249 poll_wait(filep, &rd_dev->dump_wait_q, wait);
250 return mask;
251}
252
253static const struct file_operations ramdump_file_ops = {
254 .open = ramdump_open,
255 .release = ramdump_release,
256 .read = ramdump_read,
257 .poll = ramdump_poll
258};
259
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700260static int ramdump_devnode_init(void)
Kyle Yane45fa022016-08-29 11:40:26 -0700261{
262 int ret;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700263
264 ramdump_class = class_create(THIS_MODULE, RAMDUMP_NAME);
265 ret = alloc_chrdev_region(&ramdump_dev, 0, RAMDUMP_NUM_DEVICES,
266 RAMDUMP_NAME);
267 if (ret < 0) {
268 pr_warn("%s: unable to allocate major\n", __func__);
269 return ret;
270 }
271
272 ramdump_devnode_inited = true;
273
274 return 0;
275}
276
277void *create_ramdump_device(const char *dev_name, struct device *parent)
278{
279 int ret, minor;
Kyle Yane45fa022016-08-29 11:40:26 -0700280 struct ramdump_device *rd_dev;
281
282 if (!dev_name) {
283 pr_err("%s: Invalid device name.\n", __func__);
284 return NULL;
285 }
286
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700287 mutex_lock(&rd_minor_mutex);
288 if (!ramdump_devnode_inited) {
289 ret = ramdump_devnode_init();
290 if (ret)
291 return ERR_PTR(ret);
292 }
293 mutex_unlock(&rd_minor_mutex);
294
Kyle Yane45fa022016-08-29 11:40:26 -0700295 rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL);
296
297 if (!rd_dev) {
298 pr_err("%s: Couldn't alloc space for ramdump device!",
299 __func__);
300 return NULL;
301 }
302
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700303 /* get a minor number */
304 minor = ida_simple_get(&rd_minor_id, 0, RAMDUMP_NUM_DEVICES,
305 GFP_KERNEL);
306 if (minor < 0) {
307 pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
308 minor);
309 ret = -ENODEV;
310 goto fail_out_of_minors;
311 }
312
Kyle Yane45fa022016-08-29 11:40:26 -0700313 snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "ramdump_%s",
314 dev_name);
315
316 init_completion(&rd_dev->ramdump_complete);
Kyle Yane45fa022016-08-29 11:40:26 -0700317 if (parent) {
318 rd_dev->complete_ramdump = of_property_read_bool(
319 parent->of_node, "qcom,complete-ramdump");
320 if (!rd_dev->complete_ramdump)
321 dev_info(parent,
322 "for %s segments only will be dumped.", dev_name);
323 }
324
325 init_waitqueue_head(&rd_dev->dump_wait_q);
326
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700327 rd_dev->dev = device_create(ramdump_class, parent,
328 MKDEV(MAJOR(ramdump_dev), minor),
329 rd_dev, rd_dev->name);
330 if (IS_ERR(rd_dev->dev)) {
331 ret = PTR_ERR(rd_dev->dev);
332 pr_err("%s: device_create failed for %s (%d)", __func__,
Kyle Yane45fa022016-08-29 11:40:26 -0700333 dev_name, ret);
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700334 goto fail_return_minor;
335 }
336
337 cdev_init(&rd_dev->cdev, &ramdump_file_ops);
338
339 ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1);
340 if (ret < 0) {
341 pr_err("%s: cdev_add failed for %s (%d)", __func__,
342 dev_name, ret);
343 goto fail_cdev_add;
Kyle Yane45fa022016-08-29 11:40:26 -0700344 }
345
346 return (void *)rd_dev;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700347
348fail_cdev_add:
349 device_unregister(rd_dev->dev);
350fail_return_minor:
351 ida_simple_remove(&rd_minor_id, minor);
352fail_out_of_minors:
353 kfree(rd_dev);
354 return ERR_PTR(ret);
Kyle Yane45fa022016-08-29 11:40:26 -0700355}
356EXPORT_SYMBOL(create_ramdump_device);
357
358void destroy_ramdump_device(void *dev)
359{
360 struct ramdump_device *rd_dev = dev;
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700361 int minor = MINOR(rd_dev->cdev.dev);
Kyle Yane45fa022016-08-29 11:40:26 -0700362
363 if (IS_ERR_OR_NULL(rd_dev))
364 return;
365
Vikram Mulukutla1b742de2017-06-19 11:31:25 -0700366 cdev_del(&rd_dev->cdev);
367 device_unregister(rd_dev->dev);
368 ida_simple_remove(&rd_minor_id, minor);
Kyle Yane45fa022016-08-29 11:40:26 -0700369 kfree(rd_dev);
370}
371EXPORT_SYMBOL(destroy_ramdump_device);
372
373static int _do_ramdump(void *handle, struct ramdump_segment *segments,
374 int nsegments, bool use_elf)
375{
376 int ret, i;
377 struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
378 Elf32_Phdr *phdr;
379 Elf32_Ehdr *ehdr;
380 unsigned long offset;
381
382 if (!rd_dev->consumer_present) {
383 pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
384 return -EPIPE;
385 }
386
387 if (rd_dev->complete_ramdump) {
388 for (i = 0; i < nsegments-1; i++)
389 segments[i].size =
Satya Durga Srinivasu Prabhala31c96172017-02-14 15:28:51 -0800390 segments[i + 1].address - segments[i].address;
Kyle Yane45fa022016-08-29 11:40:26 -0700391 }
392
393 rd_dev->segments = segments;
394 rd_dev->nsegments = nsegments;
395
396 if (use_elf) {
397 rd_dev->elfcore_size = sizeof(*ehdr) +
398 sizeof(*phdr) * nsegments;
399 ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
400 rd_dev->elfcore_buf = (char *)ehdr;
401 if (!rd_dev->elfcore_buf)
402 return -ENOMEM;
403
404 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
405 ehdr->e_ident[EI_CLASS] = ELFCLASS32;
406 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
407 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
408 ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
409 ehdr->e_type = ET_CORE;
410 ehdr->e_version = EV_CURRENT;
411 ehdr->e_phoff = sizeof(*ehdr);
412 ehdr->e_ehsize = sizeof(*ehdr);
413 ehdr->e_phentsize = sizeof(*phdr);
414 ehdr->e_phnum = nsegments;
415
416 offset = rd_dev->elfcore_size;
417 phdr = (Elf32_Phdr *)(ehdr + 1);
418 for (i = 0; i < nsegments; i++, phdr++) {
419 phdr->p_type = PT_LOAD;
420 phdr->p_offset = offset;
421 phdr->p_vaddr = phdr->p_paddr = segments[i].address;
422 phdr->p_filesz = phdr->p_memsz = segments[i].size;
423 phdr->p_flags = PF_R | PF_W | PF_X;
424 offset += phdr->p_filesz;
425 }
426 }
427
428 rd_dev->data_ready = 1;
429 rd_dev->ramdump_status = -1;
430
431 reinit_completion(&rd_dev->ramdump_complete);
432
433 /* Tell userspace that the data is ready */
434 wake_up(&rd_dev->dump_wait_q);
435
436 /* Wait (with a timeout) to let the ramdump complete */
437 ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
438 msecs_to_jiffies(RAMDUMP_WAIT_MSECS));
439
440 if (!ret) {
441 pr_err("Ramdump(%s): Timed out waiting for userspace.\n",
442 rd_dev->name);
443 ret = -EPIPE;
444 } else
445 ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
446
447 rd_dev->data_ready = 0;
448 rd_dev->elfcore_size = 0;
449 kfree(rd_dev->elfcore_buf);
450 rd_dev->elfcore_buf = NULL;
451 return ret;
452
453}
454
455int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
456{
457 return _do_ramdump(handle, segments, nsegments, false);
458}
459EXPORT_SYMBOL(do_ramdump);
460
461int
462do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
463{
464 return _do_ramdump(handle, segments, nsegments, true);
465}
466EXPORT_SYMBOL(do_elf_ramdump);