blob: ea69b6a77dac9da1ac667506fa52efe7d694fe74 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/*
2 * drivers/misc/logger.c
3 *
4 * A Logging Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 *
8 * Robert Love <rlove@google.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
Corentin Chary23687af2009-11-28 09:45:14 +010020#include <linux/sched.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090021#include <linux/module.h>
22#include <linux/fs.h>
23#include <linux/miscdevice.h>
24#include <linux/uaccess.h>
25#include <linux/poll.h>
Colin Crossc11a1662010-04-15 15:21:51 -070026#include <linux/slab.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090027#include <linux/time.h>
28#include "logger.h"
29
30#include <asm/ioctls.h>
31
32/*
33 * struct logger_log - represents a specific log, such as 'main' or 'radio'
34 *
35 * This structure lives from module insertion until module removal, so it does
36 * not need additional reference counting. The structure is protected by the
37 * mutex 'mutex'.
38 */
39struct logger_log {
Marco Navarra277cdd02011-12-15 17:57:48 +010040 unsigned char *buffer;/* the ring buffer itself */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090041 struct miscdevice misc; /* misc device representing the log */
42 wait_queue_head_t wq; /* wait queue for readers */
43 struct list_head readers; /* this log's readers */
44 struct mutex mutex; /* mutex protecting buffer */
45 size_t w_off; /* current write head offset */
46 size_t head; /* new readers start here */
47 size_t size; /* size of the log */
48};
49
50/*
51 * struct logger_reader - a logging device open for reading
52 *
53 * This object lives from open to release, so we don't need additional
54 * reference counting. The structure is protected by log->mutex.
55 */
56struct logger_reader {
57 struct logger_log *log; /* associated log */
58 struct list_head list; /* entry in logger_log's list */
59 size_t r_off; /* current read head offset */
60};
61
62/* logger_offset - returns index 'n' into the log via (optimized) modulus */
Tim Birdc6262242012-02-07 18:26:38 -080063size_t logger_offset(struct logger_log *log, size_t n)
64{
65 return n & (log->size-1);
66}
67
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090068
69/*
70 * file_get_log - Given a file structure, return the associated log
71 *
72 * This isn't aesthetic. We have several goals:
73 *
Marco Navarra277cdd02011-12-15 17:57:48 +010074 * 1) Need to quickly obtain the associated log during an I/O operation
75 * 2) Readers need to maintain state (logger_reader)
76 * 3) Writers need to be very fast (open() should be a near no-op)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090077 *
78 * In the reader case, we can trivially go file->logger_reader->logger_log.
79 * For a writer, we don't want to maintain a logger_reader, so we just go
80 * file->logger_log. Thus what file->private_data points at depends on whether
81 * or not the file was opened for reading. This function hides that dirtiness.
82 */
83static inline struct logger_log *file_get_log(struct file *file)
84{
85 if (file->f_mode & FMODE_READ) {
86 struct logger_reader *reader = file->private_data;
87 return reader->log;
88 } else
89 return file->private_data;
90}
91
92/*
93 * get_entry_len - Grabs the length of the payload of the next entry starting
94 * from 'off'.
95 *
Tim Bird3bcfa432012-02-08 10:37:57 -080096 * An entry length is 2 bytes (16 bits) in host endian order.
97 * In the log, the length does not include the size of the log entry structure.
98 * This function returns the size including the log entry structure.
99 *
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900100 * Caller needs to hold log->mutex.
101 */
102static __u32 get_entry_len(struct logger_log *log, size_t off)
103{
104 __u16 val;
105
Tim Bird3bcfa432012-02-08 10:37:57 -0800106 /* copy 2 bytes from buffer, in memcpy order, */
107 /* handling possible wrap at end of buffer */
108
109 ((__u8 *)&val)[0] = log->buffer[off];
110 if (likely(off+1 < log->size))
111 ((__u8 *)&val)[1] = log->buffer[off+1];
112 else
113 ((__u8 *)&val)[1] = log->buffer[0];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900114
115 return sizeof(struct logger_entry) + val;
116}
117
118/*
119 * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
120 * user-space buffer 'buf'. Returns 'count' on success.
121 *
122 * Caller must hold log->mutex.
123 */
124static ssize_t do_read_log_to_user(struct logger_log *log,
125 struct logger_reader *reader,
126 char __user *buf,
127 size_t count)
128{
129 size_t len;
130
131 /*
132 * We read from the log in two disjoint operations. First, we read from
133 * the current read head offset up to 'count' bytes or to the end of
134 * the log, whichever comes first.
135 */
136 len = min(count, log->size - reader->r_off);
137 if (copy_to_user(buf, log->buffer + reader->r_off, len))
138 return -EFAULT;
139
140 /*
141 * Second, we read any remaining bytes, starting back at the head of
142 * the log.
143 */
144 if (count != len)
145 if (copy_to_user(buf + len, log->buffer, count - len))
146 return -EFAULT;
147
Tim Birdc6262242012-02-07 18:26:38 -0800148 reader->r_off = logger_offset(log, reader->r_off + count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900149
150 return count;
151}
152
153/*
154 * logger_read - our log's read() method
155 *
156 * Behavior:
157 *
Marco Navarra277cdd02011-12-15 17:57:48 +0100158 * - O_NONBLOCK works
159 * - If there are no log entries to read, blocks until log is written to
160 * - Atomically reads exactly one log entry
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900161 *
162 * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
163 * buffer is insufficient to hold next entry.
164 */
165static ssize_t logger_read(struct file *file, char __user *buf,
166 size_t count, loff_t *pos)
167{
168 struct logger_reader *reader = file->private_data;
169 struct logger_log *log = reader->log;
170 ssize_t ret;
171 DEFINE_WAIT(wait);
172
173start:
174 while (1) {
Tim Birdc76c7ca2012-02-07 18:30:09 -0800175 mutex_lock(&log->mutex);
176
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900177 prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
178
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900179 ret = (log->w_off == reader->r_off);
180 mutex_unlock(&log->mutex);
181 if (!ret)
182 break;
183
184 if (file->f_flags & O_NONBLOCK) {
185 ret = -EAGAIN;
186 break;
187 }
188
189 if (signal_pending(current)) {
190 ret = -EINTR;
191 break;
192 }
193
194 schedule();
195 }
196
197 finish_wait(&log->wq, &wait);
198 if (ret)
199 return ret;
200
201 mutex_lock(&log->mutex);
202
203 /* is there still something to read or did we race? */
204 if (unlikely(log->w_off == reader->r_off)) {
205 mutex_unlock(&log->mutex);
206 goto start;
207 }
208
209 /* get the size of the next entry */
210 ret = get_entry_len(log, reader->r_off);
211 if (count < ret) {
212 ret = -EINVAL;
213 goto out;
214 }
215
216 /* get exactly one entry from the log */
217 ret = do_read_log_to_user(log, reader, buf, ret);
218
219out:
220 mutex_unlock(&log->mutex);
221
222 return ret;
223}
224
225/*
226 * get_next_entry - return the offset of the first valid entry at least 'len'
227 * bytes after 'off'.
228 *
229 * Caller must hold log->mutex.
230 */
231static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
232{
233 size_t count = 0;
234
235 do {
236 size_t nr = get_entry_len(log, off);
Tim Birdc6262242012-02-07 18:26:38 -0800237 off = logger_offset(log, off + nr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900238 count += nr;
239 } while (count < len);
240
241 return off;
242}
243
244/*
Tim Bird169c8432012-02-07 18:32:02 -0800245 * is_between - is a < c < b, accounting for wrapping of a, b, and c
246 * positions in the buffer
247 *
248 * That is, if a<b, check for c between a and b
249 * and if a>b, check for c outside (not between) a and b
250 *
251 * |------- a xxxxxxxx b --------|
252 * c^
253 *
254 * |xxxxx b --------- a xxxxxxxxx|
255 * c^
256 * or c^
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900257 */
Tim Bird169c8432012-02-07 18:32:02 -0800258static inline int is_between(size_t a, size_t b, size_t c)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900259{
Tim Bird169c8432012-02-07 18:32:02 -0800260 if (a < b) {
261 /* is c between a and b? */
262 if (a < c && c <= b)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900263 return 1;
264 } else {
Tim Bird169c8432012-02-07 18:32:02 -0800265 /* is c outside of b through a? */
266 if (c <= b || a < c)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900267 return 1;
268 }
269
270 return 0;
271}
272
273/*
274 * fix_up_readers - walk the list of all readers and "fix up" any who were
275 * lapped by the writer; also do the same for the default "start head".
276 * We do this by "pulling forward" the readers and start head to the first
277 * entry after the new write head.
278 *
279 * The caller needs to hold log->mutex.
280 */
281static void fix_up_readers(struct logger_log *log, size_t len)
282{
283 size_t old = log->w_off;
Tim Birdc6262242012-02-07 18:26:38 -0800284 size_t new = logger_offset(log, old + len);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900285 struct logger_reader *reader;
286
Tim Bird169c8432012-02-07 18:32:02 -0800287 if (is_between(old, new, log->head))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900288 log->head = get_next_entry(log, log->head, len);
289
290 list_for_each_entry(reader, &log->readers, list)
Tim Bird169c8432012-02-07 18:32:02 -0800291 if (is_between(old, new, reader->r_off))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900292 reader->r_off = get_next_entry(log, reader->r_off, len);
293}
294
295/*
296 * do_write_log - writes 'len' bytes from 'buf' to 'log'
297 *
298 * The caller needs to hold log->mutex.
299 */
300static void do_write_log(struct logger_log *log, const void *buf, size_t count)
301{
302 size_t len;
303
304 len = min(count, log->size - log->w_off);
305 memcpy(log->buffer + log->w_off, buf, len);
306
307 if (count != len)
308 memcpy(log->buffer, buf + len, count - len);
309
Tim Birdc6262242012-02-07 18:26:38 -0800310 log->w_off = logger_offset(log, log->w_off + count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900311
312}
313
314/*
315 * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
316 * the log 'log'
317 *
318 * The caller needs to hold log->mutex.
319 *
320 * Returns 'count' on success, negative error code on failure.
321 */
322static ssize_t do_write_log_from_user(struct logger_log *log,
323 const void __user *buf, size_t count)
324{
325 size_t len;
326
327 len = min(count, log->size - log->w_off);
328 if (len && copy_from_user(log->buffer + log->w_off, buf, len))
329 return -EFAULT;
330
331 if (count != len)
332 if (copy_from_user(log->buffer, buf + len, count - len))
Tim Bird350a1952012-02-07 18:34:28 -0800333 /*
334 * Note that by not updating w_off, this abandons the
335 * portion of the new entry that *was* successfully
336 * copied, just above. This is intentional to avoid
337 * message corruption from missing fragments.
338 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900339 return -EFAULT;
340
Tim Birdc6262242012-02-07 18:26:38 -0800341 log->w_off = logger_offset(log, log->w_off + count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900342
343 return count;
344}
345
346/*
347 * logger_aio_write - our write method, implementing support for write(),
348 * writev(), and aio_write(). Writes are our fast path, and we try to optimize
349 * them above all else.
350 */
351ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
352 unsigned long nr_segs, loff_t ppos)
353{
354 struct logger_log *log = file_get_log(iocb->ki_filp);
355 size_t orig = log->w_off;
356 struct logger_entry header;
357 struct timespec now;
358 ssize_t ret = 0;
359
360 now = current_kernel_time();
361
362 header.pid = current->tgid;
363 header.tid = current->pid;
364 header.sec = now.tv_sec;
365 header.nsec = now.tv_nsec;
366 header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
367
368 /* null writes succeed, return zero */
369 if (unlikely(!header.len))
370 return 0;
371
372 mutex_lock(&log->mutex);
373
374 /*
375 * Fix up any readers, pulling them forward to the first readable
376 * entry after (what will be) the new write offset. We do this now
377 * because if we partially fail, we can end up with clobbered log
378 * entries that encroach on readable buffer.
379 */
380 fix_up_readers(log, sizeof(struct logger_entry) + header.len);
381
382 do_write_log(log, &header, sizeof(struct logger_entry));
383
384 while (nr_segs-- > 0) {
385 size_t len;
386 ssize_t nr;
387
388 /* figure out how much of this vector we can keep */
389 len = min_t(size_t, iov->iov_len, header.len - ret);
390
391 /* write out this segment's payload */
392 nr = do_write_log_from_user(log, iov->iov_base, len);
393 if (unlikely(nr < 0)) {
394 log->w_off = orig;
395 mutex_unlock(&log->mutex);
396 return nr;
397 }
398
399 iov++;
400 ret += nr;
401 }
402
403 mutex_unlock(&log->mutex);
404
405 /* wake up any blocked readers */
406 wake_up_interruptible(&log->wq);
407
408 return ret;
409}
410
411static struct logger_log *get_log_from_minor(int);
412
413/*
414 * logger_open - the log's open() file operation
415 *
416 * Note how near a no-op this is in the write-only case. Keep it that way!
417 */
418static int logger_open(struct inode *inode, struct file *file)
419{
420 struct logger_log *log;
421 int ret;
422
423 ret = nonseekable_open(inode, file);
424 if (ret)
425 return ret;
426
427 log = get_log_from_minor(MINOR(inode->i_rdev));
428 if (!log)
429 return -ENODEV;
430
431 if (file->f_mode & FMODE_READ) {
432 struct logger_reader *reader;
433
434 reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
435 if (!reader)
436 return -ENOMEM;
437
438 reader->log = log;
439 INIT_LIST_HEAD(&reader->list);
440
441 mutex_lock(&log->mutex);
442 reader->r_off = log->head;
443 list_add_tail(&reader->list, &log->readers);
444 mutex_unlock(&log->mutex);
445
446 file->private_data = reader;
447 } else
448 file->private_data = log;
449
450 return 0;
451}
452
453/*
454 * logger_release - the log's release file operation
455 *
456 * Note this is a total no-op in the write-only case. Keep it that way!
457 */
458static int logger_release(struct inode *ignored, struct file *file)
459{
460 if (file->f_mode & FMODE_READ) {
461 struct logger_reader *reader = file->private_data;
Rabin Vincent47de87a2012-02-22 15:58:00 +0530462 struct logger_log *log = reader->log;
463
464 mutex_lock(&log->mutex);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900465 list_del(&reader->list);
Rabin Vincent47de87a2012-02-22 15:58:00 +0530466 mutex_unlock(&log->mutex);
467
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900468 kfree(reader);
469 }
470
471 return 0;
472}
473
474/*
475 * logger_poll - the log's poll file operation, for poll/select/epoll
476 *
477 * Note we always return POLLOUT, because you can always write() to the log.
478 * Note also that, strictly speaking, a return value of POLLIN does not
479 * guarantee that the log is readable without blocking, as there is a small
480 * chance that the writer can lap the reader in the interim between poll()
481 * returning and the read() request.
482 */
483static unsigned int logger_poll(struct file *file, poll_table *wait)
484{
485 struct logger_reader *reader;
486 struct logger_log *log;
487 unsigned int ret = POLLOUT | POLLWRNORM;
488
489 if (!(file->f_mode & FMODE_READ))
490 return ret;
491
492 reader = file->private_data;
493 log = reader->log;
494
495 poll_wait(file, &log->wq, wait);
496
497 mutex_lock(&log->mutex);
498 if (log->w_off != reader->r_off)
499 ret |= POLLIN | POLLRDNORM;
500 mutex_unlock(&log->mutex);
501
502 return ret;
503}
504
505static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
506{
507 struct logger_log *log = file_get_log(file);
508 struct logger_reader *reader;
509 long ret = -ENOTTY;
510
511 mutex_lock(&log->mutex);
512
513 switch (cmd) {
514 case LOGGER_GET_LOG_BUF_SIZE:
515 ret = log->size;
516 break;
517 case LOGGER_GET_LOG_LEN:
518 if (!(file->f_mode & FMODE_READ)) {
519 ret = -EBADF;
520 break;
521 }
522 reader = file->private_data;
523 if (log->w_off >= reader->r_off)
524 ret = log->w_off - reader->r_off;
525 else
526 ret = (log->size - reader->r_off) + log->w_off;
527 break;
528 case LOGGER_GET_NEXT_ENTRY_LEN:
529 if (!(file->f_mode & FMODE_READ)) {
530 ret = -EBADF;
531 break;
532 }
533 reader = file->private_data;
534 if (log->w_off != reader->r_off)
535 ret = get_entry_len(log, reader->r_off);
536 else
537 ret = 0;
538 break;
539 case LOGGER_FLUSH_LOG:
540 if (!(file->f_mode & FMODE_WRITE)) {
541 ret = -EBADF;
542 break;
543 }
544 list_for_each_entry(reader, &log->readers, list)
545 reader->r_off = log->w_off;
546 log->head = log->w_off;
547 ret = 0;
548 break;
549 }
550
551 mutex_unlock(&log->mutex);
552
553 return ret;
554}
555
556static const struct file_operations logger_fops = {
557 .owner = THIS_MODULE,
558 .read = logger_read,
559 .aio_write = logger_aio_write,
560 .poll = logger_poll,
561 .unlocked_ioctl = logger_ioctl,
562 .compat_ioctl = logger_ioctl,
563 .open = logger_open,
564 .release = logger_release,
565};
566
567/*
568 * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
569 * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
570 * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
571 */
572#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
573static unsigned char _buf_ ## VAR[SIZE]; \
574static struct logger_log VAR = { \
575 .buffer = _buf_ ## VAR, \
576 .misc = { \
577 .minor = MISC_DYNAMIC_MINOR, \
578 .name = NAME, \
579 .fops = &logger_fops, \
580 .parent = NULL, \
581 }, \
582 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
583 .readers = LIST_HEAD_INIT(VAR .readers), \
584 .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
585 .w_off = 0, \
586 .head = 0, \
587 .size = SIZE, \
588};
589
JP Abgrall2b374952011-08-11 21:33:35 -0700590DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900591DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
JP Abgrall2b374952011-08-11 21:33:35 -0700592DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024)
593DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900594
595static struct logger_log *get_log_from_minor(int minor)
596{
597 if (log_main.misc.minor == minor)
598 return &log_main;
599 if (log_events.misc.minor == minor)
600 return &log_events;
601 if (log_radio.misc.minor == minor)
602 return &log_radio;
San Mehat3537cda2010-02-23 16:09:47 -0800603 if (log_system.misc.minor == minor)
604 return &log_system;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900605 return NULL;
606}
607
608static int __init init_log(struct logger_log *log)
609{
610 int ret;
611
612 ret = misc_register(&log->misc);
613 if (unlikely(ret)) {
614 printk(KERN_ERR "logger: failed to register misc "
615 "device for log '%s'!\n", log->misc.name);
616 return ret;
617 }
618
619 printk(KERN_INFO "logger: created %luK log '%s'\n",
620 (unsigned long) log->size >> 10, log->misc.name);
621
622 return 0;
623}
624
625static int __init logger_init(void)
626{
627 int ret;
628
629 ret = init_log(&log_main);
630 if (unlikely(ret))
631 goto out;
632
633 ret = init_log(&log_events);
634 if (unlikely(ret))
635 goto out;
636
637 ret = init_log(&log_radio);
638 if (unlikely(ret))
639 goto out;
640
San Mehat3537cda2010-02-23 16:09:47 -0800641 ret = init_log(&log_system);
642 if (unlikely(ret))
643 goto out;
644
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900645out:
646 return ret;
647}
648device_initcall(logger_init);