blob: ce073edbc12996aef0f0080846239c73576985a2 [file] [log] [blame]
Yida Wang0bf43bd2017-03-22 18:16:31 -04001/*
2 * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "seemp: %s: " fmt, __func__
15
16#include "seemp_logk.h"
17#include "seemp_ringbuf.h"
18
19#ifndef VM_RESERVED
20#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
21#endif
22
23#define MASK_BUFFER_SIZE 256
24#define FOUR_MB 4
25#define YEAR_BASE 1900
26
27static struct seemp_logk_dev *slogk_dev;
28
29static unsigned int ring_sz = FOUR_MB;
30
31/*
32 * default is besteffort, apps do not get blocked
33 */
34static unsigned int block_apps;
35
36
37/*
38 * When this flag is turned on,
39 * kmalloc should be used for ring buf allocation
40 * otherwise it is vmalloc.
41 * default is to use vmalloc
42 * kmalloc has a limit of 4MB
43 */
44unsigned int kmalloc_flag;
45
46static struct class *cl;
47
48static rwlock_t filter_lock;
49static struct seemp_source_mask *pmask;
50static unsigned int num_sources;
51
52static long seemp_logk_reserve_rdblks(
53 struct seemp_logk_dev *sdev, unsigned long arg);
54static long seemp_logk_set_mask(unsigned long arg);
55static long seemp_logk_set_mapping(unsigned long arg);
56static long seemp_logk_check_filter(unsigned long arg);
57
58void* (*seemp_logk_kernel_begin)(char **buf);
59
60void (*seemp_logk_kernel_end)(void *blck);
61
62/*
63 * the last param is the permission bits *
64 * kernel logging is done in three steps:
65 * (1) fetch a block, fill everything except payload.
66 * (2) return payload pointer to the caller.
67 * (3) caller fills its data directly into the payload area.
68 * (4) caller invoked finish_record(), to finish writing.
69 */
70void *seemp_logk_kernel_start_record(char **buf)
71{
72 struct seemp_logk_blk *blk;
73 struct timespec now;
74 struct tm ts;
75 int idx;
76 int ret;
77
78 DEFINE_WAIT(write_wait);
79
80 ret = 0;
81 idx = 0;
82 now = current_kernel_time();
83 blk = ringbuf_fetch_wr_block(slogk_dev);
84 if (!blk) {
85 /*
86 * there is no blk to write
87 * if block_apps == 0; quietly return
88 */
89 if (!block_apps) {
90 *buf = NULL;
91 return NULL;
92 }
93 /*else wait for the blks to be available*/
94 while (1) {
95 mutex_lock(&slogk_dev->lock);
96 prepare_to_wait(&slogk_dev->writers_wq,
97 &write_wait, TASK_INTERRUPTIBLE);
98 ret = (slogk_dev->num_write_avail_blks <= 0);
99 if (!ret) {
100 /* don't have to wait*/
101 break;
102 }
103 mutex_unlock(&slogk_dev->lock);
104 if (signal_pending(current)) {
105 ret = -EINTR;
106 break;
107 }
108 schedule();
109 }
110
111 finish_wait(&slogk_dev->writers_wq, &write_wait);
112 if (ret)
113 return NULL;
114
115 idx = slogk_dev->write_idx;
116 slogk_dev->write_idx =
117 (slogk_dev->write_idx + 1) % slogk_dev->num_tot_blks;
118 slogk_dev->num_write_avail_blks--;
119 slogk_dev->num_write_in_prog_blks++;
120 slogk_dev->num_writers++;
121
122 blk = &slogk_dev->ring[idx];
123 /*mark block invalid*/
124 blk->status = 0x0;
125 mutex_unlock(&slogk_dev->lock);
126 }
127
128 blk->version = OBSERVER_VERSION;
129 blk->pid = current->tgid;
130 blk->tid = current->pid;
131 blk->uid = (current_uid()).val;
132 blk->sec = now.tv_sec;
133 blk->nsec = now.tv_nsec;
134 strlcpy(blk->appname, current->comm, TASK_COMM_LEN);
135 time_to_tm(now.tv_sec, 0, &ts);
136 ts.tm_year += YEAR_BASE;
137 ts.tm_mon += 1;
138
139 snprintf(blk->ts, TS_SIZE, "%04ld-%02d-%02d %02d:%02d:%02d",
140 ts.tm_year, ts.tm_mon, ts.tm_mday,
141 ts.tm_hour, ts.tm_min, ts.tm_sec);
142
143 *buf = blk->payload.msg;
144
145 return blk;
146}
147
148void seemp_logk_kernel_end_record(void *blck)
149{
150 struct seemp_logk_blk *blk = (struct seemp_logk_blk *)blck;
151
152 if (blk) {
153 /*update status at the very end*/
154 blk->status |= 0x1;
155 blk->uid = (current_uid()).val;
156
157 ringbuf_finish_writer(slogk_dev, blk);
158 }
159}
160
161static int seemp_logk_usr_record(const char __user *buf, size_t count)
162{
163 struct seemp_logk_blk *blk;
164 struct seemp_logk_blk usr_blk;
165 struct seemp_logk_blk *local_blk;
166 struct timespec now;
167 struct tm ts;
168 int idx, ret;
169
170 DEFINE_WAIT(write_wait);
171
172 if (buf) {
173 local_blk = (struct seemp_logk_blk *)buf;
174 if (copy_from_user(&usr_blk.pid, &local_blk->pid,
175 sizeof(usr_blk.pid)) != 0)
176 return -EFAULT;
177 if (copy_from_user(&usr_blk.tid, &local_blk->tid,
178 sizeof(usr_blk.tid)) != 0)
179 return -EFAULT;
180 if (copy_from_user(&usr_blk.uid, &local_blk->uid,
181 sizeof(usr_blk.uid)) != 0)
182 return -EFAULT;
183 if (copy_from_user(&usr_blk.len, &local_blk->len,
184 sizeof(usr_blk.len)) != 0)
185 return -EFAULT;
186 if (copy_from_user(&usr_blk.payload, &local_blk->payload,
187 sizeof(struct blk_payload)) != 0)
188 return -EFAULT;
189 } else {
190 return -EFAULT;
191 }
192 idx = ret = 0;
193 now = current_kernel_time();
194 blk = ringbuf_fetch_wr_block(slogk_dev);
195 if (!blk) {
196 if (!block_apps)
197 return 0;
198 while (1) {
199 mutex_lock(&slogk_dev->lock);
200 prepare_to_wait(&slogk_dev->writers_wq,
201 &write_wait,
202 TASK_INTERRUPTIBLE);
203 ret = (slogk_dev->num_write_avail_blks <= 0);
204 if (!ret)
205 break;
206 mutex_unlock(&slogk_dev->lock);
207 if (signal_pending(current)) {
208 ret = -EINTR;
209 break;
210 }
211 schedule();
212 }
213 finish_wait(&slogk_dev->writers_wq, &write_wait);
214 if (ret)
215 return -EINTR;
216
217 idx = slogk_dev->write_idx;
218 slogk_dev->write_idx =
219 (slogk_dev->write_idx + 1) % slogk_dev->num_tot_blks;
220 slogk_dev->num_write_avail_blks--;
221 slogk_dev->num_write_in_prog_blks++;
222 slogk_dev->num_writers++;
223 blk = &slogk_dev->ring[idx];
224 /*mark block invalid*/
225 blk->status = 0x0;
226 mutex_unlock(&slogk_dev->lock);
227 }
228 if (usr_blk.len > sizeof(struct blk_payload)-1)
229 usr_blk.len = sizeof(struct blk_payload)-1;
230
231 memcpy(&blk->payload, &usr_blk.payload, sizeof(struct blk_payload));
232 blk->pid = usr_blk.pid;
233 blk->uid = usr_blk.uid;
234 blk->tid = usr_blk.tid;
235 blk->sec = now.tv_sec;
236 blk->nsec = now.tv_nsec;
237 time_to_tm(now.tv_sec, 0, &ts);
238 ts.tm_year += YEAR_BASE;
239 ts.tm_mon += 1;
240 snprintf(blk->ts, TS_SIZE, "%02ld-%02d-%02d %02d:%02d:%02d",
241 ts.tm_year, ts.tm_mon, ts.tm_mday,
242 ts.tm_hour, ts.tm_min, ts.tm_sec);
243 strlcpy(blk->appname, current->comm, TASK_COMM_LEN);
244 blk->status |= 0x1;
245 ringbuf_finish_writer(slogk_dev, blk);
246 return ret;
247}
248
249static void seemp_logk_attach(void)
250{
251 seemp_logk_kernel_end = seemp_logk_kernel_end_record;
252 seemp_logk_kernel_begin = seemp_logk_kernel_start_record;
253}
254
255static void seemp_logk_detach(void)
256{
257 seemp_logk_kernel_begin = NULL;
258 seemp_logk_kernel_end = NULL;
259}
260
261static ssize_t
262seemp_logk_write(struct file *file, const char __user *buf, size_t count,
263 loff_t *ppos)
264{
265 return seemp_logk_usr_record(buf, count);
266}
267
268static int
269seemp_logk_open(struct inode *inode, struct file *filp)
270{
271 int ret;
272
273 /*disallow seeks on this file*/
274 ret = nonseekable_open(inode, filp);
275 if (ret) {
276 pr_err("ret= %d\n", ret);
277 return ret;
278 }
279
280 slogk_dev->minor = iminor(inode);
281 filp->private_data = slogk_dev;
282
283 return 0;
284}
285
286static bool seemp_logk_get_bit_from_vector(__u8 *pVec, __u32 index)
287{
288 unsigned int byte_num = index/8;
289 unsigned int bit_num = index%8;
290 unsigned char byte;
291
292 if (DIV_ROUND_UP(index, 8) > MASK_BUFFER_SIZE)
293 return false;
294
295 byte = pVec[byte_num];
296
297 return !(byte & (1 << bit_num));
298}
299
300static long seemp_logk_ioctl(struct file *filp, unsigned int cmd,
301 unsigned long arg)
302{
303 struct seemp_logk_dev *sdev;
304 int ret;
305
306 sdev = (struct seemp_logk_dev *) filp->private_data;
307
308 if (cmd == SEEMP_CMD_RESERVE_RDBLKS) {
309 return seemp_logk_reserve_rdblks(sdev, arg);
310 } else if (cmd == SEEMP_CMD_RELEASE_RDBLKS) {
311 mutex_lock(&sdev->lock);
312 sdev->read_idx = (sdev->read_idx + sdev->num_read_in_prog_blks)
313 % sdev->num_tot_blks;
314 sdev->num_write_avail_blks += sdev->num_read_in_prog_blks;
315 ret = sdev->num_read_in_prog_blks;
316 sdev->num_read_in_prog_blks = 0;
317 /*wake up any waiting writers*/
318 mutex_unlock(&sdev->lock);
319 if (ret && block_apps)
320 wake_up_interruptible(&sdev->writers_wq);
321 } else if (cmd == SEEMP_CMD_GET_RINGSZ) {
322 if (copy_to_user((unsigned int *)arg, &sdev->ring_sz,
323 sizeof(unsigned int)))
324 return -EFAULT;
325 } else if (cmd == SEEMP_CMD_GET_BLKSZ) {
326 if (copy_to_user((unsigned int *)arg, &sdev->blk_sz,
327 sizeof(unsigned int)))
328 return -EFAULT;
329 } else if (cmd == SEEMP_CMD_SET_MASK) {
330 return seemp_logk_set_mask(arg);
331 } else if (cmd == SEEMP_CMD_SET_MAPPING) {
332 return seemp_logk_set_mapping(arg);
333 } else if (cmd == SEEMP_CMD_CHECK_FILTER) {
334 return seemp_logk_check_filter(arg);
335 }
336 pr_err("Invalid Request %X\n", cmd);
337 return -ENOIOCTLCMD;
338}
339
340static long seemp_logk_reserve_rdblks(
341 struct seemp_logk_dev *sdev, unsigned long arg)
342{
343 int ret;
344 struct read_range rrange;
345
346 DEFINE_WAIT(read_wait);
347
348 mutex_lock(&sdev->lock);
349 if (sdev->num_writers > 0 || sdev->num_read_avail_blks <= 0) {
350 ret = -EPERM;
351 pr_debug("(reserve): blocking, cannot read.\n");
352 pr_debug("num_writers=%d num_read_avail_blks=%d\n",
353 sdev->num_writers,
354 sdev->num_read_avail_blks);
355 mutex_unlock(&sdev->lock);
356 /*
357 * unlock the device
358 * wait on a wait queue
359 * after wait, grab the dev lock again
360 */
361 while (1) {
362 mutex_lock(&sdev->lock);
363 prepare_to_wait(&sdev->readers_wq, &read_wait,
364 TASK_INTERRUPTIBLE);
365 ret = (sdev->num_writers > 0 ||
366 sdev->num_read_avail_blks <= 0);
367 if (!ret) {
368 /*don't have to wait*/
369 break;
370 }
371 mutex_unlock(&sdev->lock);
372 if (signal_pending(current)) {
373 ret = -EINTR;
374 break;
375 }
376 schedule();
377 }
378
379 finish_wait(&sdev->readers_wq, &read_wait);
380 if (ret)
381 return -EINTR;
382 }
383
384 /*sdev->lock is held at this point*/
385 sdev->num_read_in_prog_blks = sdev->num_read_avail_blks;
386 sdev->num_read_avail_blks = 0;
387 rrange.start_idx = sdev->read_idx;
388 rrange.num = sdev->num_read_in_prog_blks;
389 mutex_unlock(&sdev->lock);
390
391 if (copy_to_user((unsigned int *)arg, &rrange,
392 sizeof(struct read_range)))
393 return -EFAULT;
394
395 return 0;
396}
397
398static long seemp_logk_set_mask(unsigned long arg)
399{
400 __u8 buffer[256];
401 int i;
402 unsigned int num_elements;
403
404 if (copy_from_user(&num_elements,
405 (unsigned int __user *) arg, sizeof(unsigned int)))
406 return -EFAULT;
407
408 read_lock(&filter_lock);
409 if (num_sources == 0) {
410 read_unlock(&filter_lock);
411 return -EINVAL;
412 }
413
414 if (num_elements == 0 ||
415 DIV_ROUND_UP(num_sources, 8) > MASK_BUFFER_SIZE) {
416 read_unlock(&filter_lock);
417 return -EINVAL;
418 }
419
420 if (copy_from_user(buffer,
421 (__u8 *)arg, DIV_ROUND_UP(num_sources, 8))) {
422 read_unlock(&filter_lock);
423 return -EFAULT;
424 }
425
426 read_unlock(&filter_lock);
427 write_lock(&filter_lock);
428 if (num_elements != num_sources) {
429 write_unlock(&filter_lock);
430 return -EPERM;
431 }
432
433 for (i = 0; i < num_sources; i++) {
434 pmask[i].isOn =
435 seemp_logk_get_bit_from_vector(
436 (__u8 *)buffer, i);
437 }
438 write_unlock(&filter_lock);
439 return 0;
440}
441
442static long seemp_logk_set_mapping(unsigned long arg)
443{
444 __u32 num_elements;
445 __u32 *pbuffer;
446 int i;
447 struct seemp_source_mask *pnewmask;
448
449 if (copy_from_user(&num_elements,
450 (__u32 __user *)arg, sizeof(__u32)))
451 return -EFAULT;
452
453 if ((num_elements == 0) || (num_elements >
454 (UINT_MAX / sizeof(struct seemp_source_mask))))
455 return -EFAULT;
456
457 write_lock(&filter_lock);
458 if (pmask != NULL) {
459 /*
460 * Mask is getting set again.
461 * seemp_core was probably restarted.
462 */
463 struct seemp_source_mask *ptempmask;
464
465 num_sources = 0;
466 ptempmask = pmask;
467 pmask = NULL;
468 kfree(ptempmask);
469 }
470 write_unlock(&filter_lock);
471 pbuffer = kmalloc(sizeof(struct seemp_source_mask)
472 * num_elements, GFP_KERNEL);
473 if (pbuffer == NULL)
474 return -ENOMEM;
475
476 /*
477 * Use our new table as scratch space for now.
478 * We copy an ordered list of hash values into our buffer.
479 */
480 if (copy_from_user(pbuffer, &((__u32 __user *)arg)[1],
481 num_elements*sizeof(unsigned int))) {
482 kfree(pbuffer);
483 return -EFAULT;
484 }
485 /*
486 * We arrange the user data into a more usable form.
487 * This is done in-place.
488 */
489 pnewmask = (struct seemp_source_mask *) pbuffer;
490 for (i = num_elements - 1; i >= 0; i--) {
491 pnewmask[i].hash = pbuffer[i];
492 /* Observer is off by default*/
493 pnewmask[i].isOn = 0;
494 }
495 write_lock(&filter_lock);
496 pmask = pnewmask;
497 num_sources = num_elements;
498 write_unlock(&filter_lock);
499 return 0;
500}
501
502static long seemp_logk_check_filter(unsigned long arg)
503{
504 int i;
505 unsigned int hash = (unsigned int) arg;
506
507 /*
508 * This lock may be a bit long.
509 * If it is a problem, it can be fixed.
510 */
511 read_lock(&filter_lock);
512 for (i = 0; i < num_sources; i++) {
513 if (hash == pmask[i].hash) {
514 int result = pmask[i].isOn;
515
516 read_unlock(&filter_lock);
517 return result;
518 }
519 }
520 read_unlock(&filter_lock);
521 return 0;
522}
523
524static int seemp_logk_mmap(struct file *filp,
525 struct vm_area_struct *vma)
526{
527 int ret;
528 char *vptr;
529 unsigned long length, pfn;
530 unsigned long start = vma->vm_start;
531
532 length = vma->vm_end - vma->vm_start;
533
534 if (length > (unsigned long) slogk_dev->ring_sz) {
535 pr_err("len check failed\n");
536 return -EIO;
537 }
538
539 vma->vm_flags |= VM_RESERVED | VM_SHARED;
540 vptr = (char *) slogk_dev->ring;
541 ret = 0;
542
543 if (kmalloc_flag) {
544 ret = remap_pfn_range(vma,
545 start,
546 virt_to_phys((void *)
547 ((unsigned long)slogk_dev->ring)) >> PAGE_SHIFT,
548 length,
549 vma->vm_page_prot);
550 if (ret != 0) {
551 pr_err("remap_pfn_range() fails with ret = %d\n",
552 ret);
553 return -EAGAIN;
554 }
555 } else {
556 while (length > 0) {
557 pfn = vmalloc_to_pfn(vptr);
558
559 ret = remap_pfn_range(vma, start, pfn, PAGE_SIZE,
560 vma->vm_page_prot);
561 if (ret < 0) {
562 pr_err("remap_pfn_range() fails with ret = %d\n",
563 ret);
564 return ret;
565 }
566 start += PAGE_SIZE;
567 vptr += PAGE_SIZE;
568 length -= PAGE_SIZE;
569 }
570 }
571
572 return 0;
573}
574
575static const struct file_operations seemp_logk_fops = {
576 .write = seemp_logk_write,
577 .open = seemp_logk_open,
578 .unlocked_ioctl = seemp_logk_ioctl,
579 .compat_ioctl = seemp_logk_ioctl,
580 .mmap = seemp_logk_mmap,
581};
582
583__init int seemp_logk_init(void)
584{
585 int ret;
586 int devno = 0;
587
588 num_sources = 0;
589 kmalloc_flag = 0;
590 block_apps = 0;
591 pmask = NULL;
592
593 if (kmalloc_flag && ring_sz > FOUR_MB) {
594 pr_err("kmalloc cannot allocate > 4MB\n");
595 return -ENOMEM;
596 }
597
598 ring_sz = ring_sz * SZ_1M;
599 if (ring_sz <= 0) {
600 pr_err("Too small a ring_sz=%d\n", ring_sz);
601 return -EINVAL;
602 }
603
604 slogk_dev = kmalloc(sizeof(*slogk_dev), GFP_KERNEL);
605 if (slogk_dev == NULL)
606 return -ENOMEM;
607
608 slogk_dev->ring_sz = ring_sz;
609 slogk_dev->blk_sz = sizeof(struct seemp_logk_blk);
610 /*initialize ping-pong buffers*/
611 ret = ringbuf_init(slogk_dev);
612 if (ret < 0) {
613 pr_err("Init Failed, ret = %d\n", ret);
614 goto pingpong_fail;
615 }
616
617 ret = alloc_chrdev_region(&devno, 0, seemp_LOGK_NUM_DEVS,
618 seemp_LOGK_DEV_NAME);
619 if (ret < 0) {
620 pr_err("alloc_chrdev_region failed with ret = %d\n",
621 ret);
622 goto register_fail;
623 }
624
625 slogk_dev->major = MAJOR(devno);
626
627 pr_debug("logk: major# = %d\n", slogk_dev->major);
628
629 cl = class_create(THIS_MODULE, seemp_LOGK_DEV_NAME);
630 if (cl == NULL) {
631 pr_err("class create failed");
632 goto cdev_fail;
633 }
634 if (device_create(cl, NULL, devno, NULL,
635 seemp_LOGK_DEV_NAME) == NULL) {
636 pr_err("device create failed");
637 goto class_destroy_fail;
638 }
639 cdev_init(&(slogk_dev->cdev), &seemp_logk_fops);
640
641 slogk_dev->cdev.owner = THIS_MODULE;
642 ret = cdev_add(&(slogk_dev->cdev), MKDEV(slogk_dev->major, 0), 1);
643 if (ret) {
644 pr_err("cdev_add failed with ret = %d", ret);
645 goto class_destroy_fail;
646 }
647
648 seemp_logk_attach();
649 mutex_init(&slogk_dev->lock);
650 init_waitqueue_head(&slogk_dev->readers_wq);
651 init_waitqueue_head(&slogk_dev->writers_wq);
652 rwlock_init(&filter_lock);
653 return 0;
654class_destroy_fail:
655 class_destroy(cl);
656cdev_fail:
657 unregister_chrdev_region(devno, seemp_LOGK_NUM_DEVS);
658register_fail:
659 ringbuf_cleanup(slogk_dev);
660pingpong_fail:
661 kfree(slogk_dev);
662 return -EPERM;
663}
664
665__exit void seemp_logk_cleanup(void)
666{
667 dev_t devno = MKDEV(slogk_dev->major, slogk_dev->minor);
668
669 seemp_logk_detach();
670
671 cdev_del(&slogk_dev->cdev);
672
673 unregister_chrdev_region(devno, seemp_LOGK_NUM_DEVS);
674 ringbuf_cleanup(slogk_dev);
675 kfree(slogk_dev);
676
677 if (pmask != NULL) {
678 kfree(pmask);
679 pmask = NULL;
680 }
681}
682
683module_init(seemp_logk_init);
684module_exit(seemp_logk_cleanup);
685
686MODULE_LICENSE("GPL v2");
687MODULE_DESCRIPTION("seemp Observer");
688