blob: 4ffdbc93229209bbcba9275cddf7d425ab73d700 [file] [log] [blame]
Yida Wang0bf43bd2017-03-22 18:16:31 -04001/*
2 * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "seemp: %s: " fmt, __func__
15
Yida Wang943c3b42017-06-23 17:10:13 -040016#include <linux/delay.h>
17#include <linux/kthread.h>
18#include <linux/seemp_instrumentation.h>
19#include <soc/qcom/scm.h>
20
Yida Wang0bf43bd2017-03-22 18:16:31 -040021#include "seemp_logk.h"
22#include "seemp_ringbuf.h"
23
24#ifndef VM_RESERVED
25#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
26#endif
27
28#define MASK_BUFFER_SIZE 256
29#define FOUR_MB 4
30#define YEAR_BASE 1900
31
Yida Wang943c3b42017-06-23 17:10:13 -040032#define EL2_SCM_ID 0x02001902
Yida Wang117c2c02017-08-09 17:19:57 -040033#define KP_EL2_REPORT_REVISION 0x01000101
Yida Wang65bf34fb2017-08-24 18:57:29 -040034#define INVALID_PID -1
Yida Wang943c3b42017-06-23 17:10:13 -040035
Yida Wang0bf43bd2017-03-22 18:16:31 -040036static struct seemp_logk_dev *slogk_dev;
37
38static unsigned int ring_sz = FOUR_MB;
39
40/*
41 * default is besteffort, apps do not get blocked
42 */
43static unsigned int block_apps;
44
45
46/*
47 * When this flag is turned on,
48 * kmalloc should be used for ring buf allocation
49 * otherwise it is vmalloc.
50 * default is to use vmalloc
51 * kmalloc has a limit of 4MB
52 */
53unsigned int kmalloc_flag;
54
55static struct class *cl;
56
57static rwlock_t filter_lock;
58static struct seemp_source_mask *pmask;
59static unsigned int num_sources;
60
Yida Wang943c3b42017-06-23 17:10:13 -040061static void *el2_shared_mem;
62static struct task_struct *rtic_thread;
63
Yida Wang0bf43bd2017-03-22 18:16:31 -040064static long seemp_logk_reserve_rdblks(
65 struct seemp_logk_dev *sdev, unsigned long arg);
66static long seemp_logk_set_mask(unsigned long arg);
67static long seemp_logk_set_mapping(unsigned long arg);
68static long seemp_logk_check_filter(unsigned long arg);
Yida Wang65bf34fb2017-08-24 18:57:29 -040069static pid_t seemp_logk_get_pid(struct task_struct *t);
Yida Wang943c3b42017-06-23 17:10:13 -040070static int seemp_logk_rtic_thread(void *data);
Yida Wang0bf43bd2017-03-22 18:16:31 -040071
72void* (*seemp_logk_kernel_begin)(char **buf);
73
74void (*seemp_logk_kernel_end)(void *blck);
75
76/*
77 * the last param is the permission bits *
78 * kernel logging is done in three steps:
79 * (1) fetch a block, fill everything except payload.
80 * (2) return payload pointer to the caller.
81 * (3) caller fills its data directly into the payload area.
82 * (4) caller invoked finish_record(), to finish writing.
83 */
84void *seemp_logk_kernel_start_record(char **buf)
85{
86 struct seemp_logk_blk *blk;
87 struct timespec now;
88 struct tm ts;
89 int idx;
90 int ret;
91
92 DEFINE_WAIT(write_wait);
93
94 ret = 0;
95 idx = 0;
96 now = current_kernel_time();
97 blk = ringbuf_fetch_wr_block(slogk_dev);
98 if (!blk) {
99 /*
100 * there is no blk to write
101 * if block_apps == 0; quietly return
102 */
103 if (!block_apps) {
104 *buf = NULL;
105 return NULL;
106 }
107 /*else wait for the blks to be available*/
108 while (1) {
109 mutex_lock(&slogk_dev->lock);
110 prepare_to_wait(&slogk_dev->writers_wq,
111 &write_wait, TASK_INTERRUPTIBLE);
112 ret = (slogk_dev->num_write_avail_blks <= 0);
113 if (!ret) {
114 /* don't have to wait*/
115 break;
116 }
117 mutex_unlock(&slogk_dev->lock);
118 if (signal_pending(current)) {
119 ret = -EINTR;
120 break;
121 }
122 schedule();
123 }
124
125 finish_wait(&slogk_dev->writers_wq, &write_wait);
126 if (ret)
127 return NULL;
128
129 idx = slogk_dev->write_idx;
130 slogk_dev->write_idx =
131 (slogk_dev->write_idx + 1) % slogk_dev->num_tot_blks;
132 slogk_dev->num_write_avail_blks--;
133 slogk_dev->num_write_in_prog_blks++;
134 slogk_dev->num_writers++;
135
136 blk = &slogk_dev->ring[idx];
137 /*mark block invalid*/
138 blk->status = 0x0;
139 mutex_unlock(&slogk_dev->lock);
140 }
141
142 blk->version = OBSERVER_VERSION;
143 blk->pid = current->tgid;
144 blk->tid = current->pid;
145 blk->uid = (current_uid()).val;
146 blk->sec = now.tv_sec;
147 blk->nsec = now.tv_nsec;
148 strlcpy(blk->appname, current->comm, TASK_COMM_LEN);
149 time_to_tm(now.tv_sec, 0, &ts);
150 ts.tm_year += YEAR_BASE;
151 ts.tm_mon += 1;
152
153 snprintf(blk->ts, TS_SIZE, "%04ld-%02d-%02d %02d:%02d:%02d",
154 ts.tm_year, ts.tm_mon, ts.tm_mday,
155 ts.tm_hour, ts.tm_min, ts.tm_sec);
156
157 *buf = blk->payload.msg;
158
159 return blk;
160}
161
162void seemp_logk_kernel_end_record(void *blck)
163{
164 struct seemp_logk_blk *blk = (struct seemp_logk_blk *)blck;
165
166 if (blk) {
167 /*update status at the very end*/
168 blk->status |= 0x1;
169 blk->uid = (current_uid()).val;
170
171 ringbuf_finish_writer(slogk_dev, blk);
172 }
173}
174
175static int seemp_logk_usr_record(const char __user *buf, size_t count)
176{
177 struct seemp_logk_blk *blk;
178 struct seemp_logk_blk usr_blk;
179 struct seemp_logk_blk *local_blk;
180 struct timespec now;
181 struct tm ts;
182 int idx, ret;
183
184 DEFINE_WAIT(write_wait);
185
186 if (buf) {
187 local_blk = (struct seemp_logk_blk *)buf;
188 if (copy_from_user(&usr_blk.pid, &local_blk->pid,
189 sizeof(usr_blk.pid)) != 0)
190 return -EFAULT;
191 if (copy_from_user(&usr_blk.tid, &local_blk->tid,
192 sizeof(usr_blk.tid)) != 0)
193 return -EFAULT;
194 if (copy_from_user(&usr_blk.uid, &local_blk->uid,
195 sizeof(usr_blk.uid)) != 0)
196 return -EFAULT;
197 if (copy_from_user(&usr_blk.len, &local_blk->len,
198 sizeof(usr_blk.len)) != 0)
199 return -EFAULT;
200 if (copy_from_user(&usr_blk.payload, &local_blk->payload,
201 sizeof(struct blk_payload)) != 0)
202 return -EFAULT;
203 } else {
204 return -EFAULT;
205 }
206 idx = ret = 0;
207 now = current_kernel_time();
208 blk = ringbuf_fetch_wr_block(slogk_dev);
209 if (!blk) {
210 if (!block_apps)
211 return 0;
212 while (1) {
213 mutex_lock(&slogk_dev->lock);
214 prepare_to_wait(&slogk_dev->writers_wq,
215 &write_wait,
216 TASK_INTERRUPTIBLE);
217 ret = (slogk_dev->num_write_avail_blks <= 0);
218 if (!ret)
219 break;
220 mutex_unlock(&slogk_dev->lock);
221 if (signal_pending(current)) {
222 ret = -EINTR;
223 break;
224 }
225 schedule();
226 }
227 finish_wait(&slogk_dev->writers_wq, &write_wait);
228 if (ret)
229 return -EINTR;
230
231 idx = slogk_dev->write_idx;
232 slogk_dev->write_idx =
233 (slogk_dev->write_idx + 1) % slogk_dev->num_tot_blks;
234 slogk_dev->num_write_avail_blks--;
235 slogk_dev->num_write_in_prog_blks++;
236 slogk_dev->num_writers++;
237 blk = &slogk_dev->ring[idx];
238 /*mark block invalid*/
239 blk->status = 0x0;
240 mutex_unlock(&slogk_dev->lock);
241 }
242 if (usr_blk.len > sizeof(struct blk_payload)-1)
243 usr_blk.len = sizeof(struct blk_payload)-1;
244
245 memcpy(&blk->payload, &usr_blk.payload, sizeof(struct blk_payload));
246 blk->pid = usr_blk.pid;
247 blk->uid = usr_blk.uid;
248 blk->tid = usr_blk.tid;
249 blk->sec = now.tv_sec;
250 blk->nsec = now.tv_nsec;
251 time_to_tm(now.tv_sec, 0, &ts);
252 ts.tm_year += YEAR_BASE;
253 ts.tm_mon += 1;
254 snprintf(blk->ts, TS_SIZE, "%02ld-%02d-%02d %02d:%02d:%02d",
255 ts.tm_year, ts.tm_mon, ts.tm_mday,
256 ts.tm_hour, ts.tm_min, ts.tm_sec);
257 strlcpy(blk->appname, current->comm, TASK_COMM_LEN);
258 blk->status |= 0x1;
259 ringbuf_finish_writer(slogk_dev, blk);
260 return ret;
261}
262
263static void seemp_logk_attach(void)
264{
265 seemp_logk_kernel_end = seemp_logk_kernel_end_record;
266 seemp_logk_kernel_begin = seemp_logk_kernel_start_record;
267}
268
269static void seemp_logk_detach(void)
270{
271 seemp_logk_kernel_begin = NULL;
272 seemp_logk_kernel_end = NULL;
273}
274
275static ssize_t
276seemp_logk_write(struct file *file, const char __user *buf, size_t count,
277 loff_t *ppos)
278{
279 return seemp_logk_usr_record(buf, count);
280}
281
282static int
283seemp_logk_open(struct inode *inode, struct file *filp)
284{
285 int ret;
286
287 /*disallow seeks on this file*/
288 ret = nonseekable_open(inode, filp);
289 if (ret) {
290 pr_err("ret= %d\n", ret);
291 return ret;
292 }
293
294 slogk_dev->minor = iminor(inode);
295 filp->private_data = slogk_dev;
296
297 return 0;
298}
299
300static bool seemp_logk_get_bit_from_vector(__u8 *pVec, __u32 index)
301{
302 unsigned int byte_num = index/8;
303 unsigned int bit_num = index%8;
304 unsigned char byte;
305
Yida Wangbaeebf02017-05-03 14:46:14 -0400306 if (byte_num >= MASK_BUFFER_SIZE)
Yida Wang0bf43bd2017-03-22 18:16:31 -0400307 return false;
308
309 byte = pVec[byte_num];
310
311 return !(byte & (1 << bit_num));
312}
313
314static long seemp_logk_ioctl(struct file *filp, unsigned int cmd,
315 unsigned long arg)
316{
317 struct seemp_logk_dev *sdev;
318 int ret;
319
320 sdev = (struct seemp_logk_dev *) filp->private_data;
321
322 if (cmd == SEEMP_CMD_RESERVE_RDBLKS) {
323 return seemp_logk_reserve_rdblks(sdev, arg);
324 } else if (cmd == SEEMP_CMD_RELEASE_RDBLKS) {
325 mutex_lock(&sdev->lock);
326 sdev->read_idx = (sdev->read_idx + sdev->num_read_in_prog_blks)
327 % sdev->num_tot_blks;
328 sdev->num_write_avail_blks += sdev->num_read_in_prog_blks;
329 ret = sdev->num_read_in_prog_blks;
330 sdev->num_read_in_prog_blks = 0;
331 /*wake up any waiting writers*/
332 mutex_unlock(&sdev->lock);
333 if (ret && block_apps)
334 wake_up_interruptible(&sdev->writers_wq);
335 } else if (cmd == SEEMP_CMD_GET_RINGSZ) {
336 if (copy_to_user((unsigned int *)arg, &sdev->ring_sz,
337 sizeof(unsigned int)))
338 return -EFAULT;
339 } else if (cmd == SEEMP_CMD_GET_BLKSZ) {
340 if (copy_to_user((unsigned int *)arg, &sdev->blk_sz,
341 sizeof(unsigned int)))
342 return -EFAULT;
343 } else if (cmd == SEEMP_CMD_SET_MASK) {
344 return seemp_logk_set_mask(arg);
345 } else if (cmd == SEEMP_CMD_SET_MAPPING) {
346 return seemp_logk_set_mapping(arg);
347 } else if (cmd == SEEMP_CMD_CHECK_FILTER) {
348 return seemp_logk_check_filter(arg);
Yida Wang03e13902017-08-30 14:32:32 -0400349 } else {
350 pr_err("Invalid Request %X\n", cmd);
351 return -ENOIOCTLCMD;
Yida Wang0bf43bd2017-03-22 18:16:31 -0400352 }
Yida Wang03e13902017-08-30 14:32:32 -0400353 return 0;
Yida Wang0bf43bd2017-03-22 18:16:31 -0400354}
355
356static long seemp_logk_reserve_rdblks(
357 struct seemp_logk_dev *sdev, unsigned long arg)
358{
359 int ret;
360 struct read_range rrange;
361
362 DEFINE_WAIT(read_wait);
363
364 mutex_lock(&sdev->lock);
365 if (sdev->num_writers > 0 || sdev->num_read_avail_blks <= 0) {
366 ret = -EPERM;
367 pr_debug("(reserve): blocking, cannot read.\n");
368 pr_debug("num_writers=%d num_read_avail_blks=%d\n",
369 sdev->num_writers,
370 sdev->num_read_avail_blks);
371 mutex_unlock(&sdev->lock);
372 /*
373 * unlock the device
374 * wait on a wait queue
375 * after wait, grab the dev lock again
376 */
377 while (1) {
378 mutex_lock(&sdev->lock);
379 prepare_to_wait(&sdev->readers_wq, &read_wait,
380 TASK_INTERRUPTIBLE);
381 ret = (sdev->num_writers > 0 ||
382 sdev->num_read_avail_blks <= 0);
383 if (!ret) {
384 /*don't have to wait*/
385 break;
386 }
387 mutex_unlock(&sdev->lock);
388 if (signal_pending(current)) {
389 ret = -EINTR;
390 break;
391 }
392 schedule();
393 }
394
395 finish_wait(&sdev->readers_wq, &read_wait);
396 if (ret)
397 return -EINTR;
398 }
399
400 /*sdev->lock is held at this point*/
401 sdev->num_read_in_prog_blks = sdev->num_read_avail_blks;
402 sdev->num_read_avail_blks = 0;
403 rrange.start_idx = sdev->read_idx;
404 rrange.num = sdev->num_read_in_prog_blks;
405 mutex_unlock(&sdev->lock);
406
407 if (copy_to_user((unsigned int *)arg, &rrange,
408 sizeof(struct read_range)))
409 return -EFAULT;
410
411 return 0;
412}
413
414static long seemp_logk_set_mask(unsigned long arg)
415{
416 __u8 buffer[256];
417 int i;
418 unsigned int num_elements;
419
420 if (copy_from_user(&num_elements,
421 (unsigned int __user *) arg, sizeof(unsigned int)))
422 return -EFAULT;
423
424 read_lock(&filter_lock);
425 if (num_sources == 0) {
426 read_unlock(&filter_lock);
427 return -EINVAL;
428 }
429
430 if (num_elements == 0 ||
431 DIV_ROUND_UP(num_sources, 8) > MASK_BUFFER_SIZE) {
432 read_unlock(&filter_lock);
433 return -EINVAL;
434 }
435
436 if (copy_from_user(buffer,
437 (__u8 *)arg, DIV_ROUND_UP(num_sources, 8))) {
438 read_unlock(&filter_lock);
439 return -EFAULT;
440 }
441
442 read_unlock(&filter_lock);
443 write_lock(&filter_lock);
444 if (num_elements != num_sources) {
445 write_unlock(&filter_lock);
446 return -EPERM;
447 }
448
449 for (i = 0; i < num_sources; i++) {
450 pmask[i].isOn =
451 seemp_logk_get_bit_from_vector(
452 (__u8 *)buffer, i);
453 }
454 write_unlock(&filter_lock);
455 return 0;
456}
457
458static long seemp_logk_set_mapping(unsigned long arg)
459{
460 __u32 num_elements;
461 __u32 *pbuffer;
462 int i;
463 struct seemp_source_mask *pnewmask;
464
465 if (copy_from_user(&num_elements,
466 (__u32 __user *)arg, sizeof(__u32)))
467 return -EFAULT;
468
469 if ((num_elements == 0) || (num_elements >
470 (UINT_MAX / sizeof(struct seemp_source_mask))))
471 return -EFAULT;
472
473 write_lock(&filter_lock);
474 if (pmask != NULL) {
475 /*
476 * Mask is getting set again.
477 * seemp_core was probably restarted.
478 */
479 struct seemp_source_mask *ptempmask;
480
481 num_sources = 0;
482 ptempmask = pmask;
483 pmask = NULL;
484 kfree(ptempmask);
485 }
486 write_unlock(&filter_lock);
487 pbuffer = kmalloc(sizeof(struct seemp_source_mask)
488 * num_elements, GFP_KERNEL);
489 if (pbuffer == NULL)
490 return -ENOMEM;
491
492 /*
493 * Use our new table as scratch space for now.
494 * We copy an ordered list of hash values into our buffer.
495 */
496 if (copy_from_user(pbuffer, &((__u32 __user *)arg)[1],
497 num_elements*sizeof(unsigned int))) {
498 kfree(pbuffer);
499 return -EFAULT;
500 }
501 /*
502 * We arrange the user data into a more usable form.
503 * This is done in-place.
504 */
505 pnewmask = (struct seemp_source_mask *) pbuffer;
506 for (i = num_elements - 1; i >= 0; i--) {
507 pnewmask[i].hash = pbuffer[i];
508 /* Observer is off by default*/
509 pnewmask[i].isOn = 0;
510 }
511 write_lock(&filter_lock);
512 pmask = pnewmask;
513 num_sources = num_elements;
514 write_unlock(&filter_lock);
515 return 0;
516}
517
518static long seemp_logk_check_filter(unsigned long arg)
519{
520 int i;
521 unsigned int hash = (unsigned int) arg;
522
523 /*
524 * This lock may be a bit long.
525 * If it is a problem, it can be fixed.
526 */
527 read_lock(&filter_lock);
528 for (i = 0; i < num_sources; i++) {
529 if (hash == pmask[i].hash) {
530 int result = pmask[i].isOn;
531
532 read_unlock(&filter_lock);
533 return result;
534 }
535 }
536 read_unlock(&filter_lock);
537 return 0;
538}
539
540static int seemp_logk_mmap(struct file *filp,
541 struct vm_area_struct *vma)
542{
543 int ret;
544 char *vptr;
545 unsigned long length, pfn;
546 unsigned long start = vma->vm_start;
547
548 length = vma->vm_end - vma->vm_start;
549
550 if (length > (unsigned long) slogk_dev->ring_sz) {
551 pr_err("len check failed\n");
552 return -EIO;
553 }
554
555 vma->vm_flags |= VM_RESERVED | VM_SHARED;
556 vptr = (char *) slogk_dev->ring;
557 ret = 0;
558
559 if (kmalloc_flag) {
560 ret = remap_pfn_range(vma,
561 start,
562 virt_to_phys((void *)
563 ((unsigned long)slogk_dev->ring)) >> PAGE_SHIFT,
564 length,
565 vma->vm_page_prot);
566 if (ret != 0) {
567 pr_err("remap_pfn_range() fails with ret = %d\n",
568 ret);
569 return -EAGAIN;
570 }
571 } else {
572 while (length > 0) {
573 pfn = vmalloc_to_pfn(vptr);
574
575 ret = remap_pfn_range(vma, start, pfn, PAGE_SIZE,
576 vma->vm_page_prot);
577 if (ret < 0) {
578 pr_err("remap_pfn_range() fails with ret = %d\n",
579 ret);
580 return ret;
581 }
582 start += PAGE_SIZE;
583 vptr += PAGE_SIZE;
584 length -= PAGE_SIZE;
585 }
586 }
587
Yida Wang943c3b42017-06-23 17:10:13 -0400588 if (!rtic_thread && el2_shared_mem) {
589 rtic_thread = kthread_run(seemp_logk_rtic_thread,
590 NULL, "seemp_logk_rtic_thread");
591 if (IS_ERR(rtic_thread)) {
592 pr_err("rtic_thread creation failed");
593 rtic_thread = NULL;
594 }
595 }
596
Yida Wang0bf43bd2017-03-22 18:16:31 -0400597 return 0;
598}
599
600static const struct file_operations seemp_logk_fops = {
601 .write = seemp_logk_write,
602 .open = seemp_logk_open,
603 .unlocked_ioctl = seemp_logk_ioctl,
604 .compat_ioctl = seemp_logk_ioctl,
605 .mmap = seemp_logk_mmap,
606};
607
Yida Wang65bf34fb2017-08-24 18:57:29 -0400608static pid_t seemp_logk_get_pid(struct task_struct *t)
609{
610 struct task_struct *task;
611 pid_t pid;
612
613 if (t == NULL)
614 return INVALID_PID;
615
616 rcu_read_lock();
617 for_each_process(task) {
618 if (task == t) {
619 pid = task->pid;
620 rcu_read_unlock();
621 return pid;
622 }
623 }
624 rcu_read_unlock();
625 return INVALID_PID;
626}
627
Yida Wang943c3b42017-06-23 17:10:13 -0400628static int seemp_logk_rtic_thread(void *data)
629{
630 struct el2_report_header_t *header;
631 __u64 last_sequence_number = 0;
632 int last_pos = -1;
633 int i;
634 int num_entries = (PAGE_SIZE - sizeof(struct el2_report_header_t))
635 / sizeof(struct el2_report_data_t);
636 header = (struct el2_report_header_t *) el2_shared_mem;
637
Yida Wang117c2c02017-08-09 17:19:57 -0400638 if (header->report_version < KP_EL2_REPORT_REVISION)
639 return -EINVAL;
640
Yida Wang943c3b42017-06-23 17:10:13 -0400641 while (!kthread_should_stop()) {
642 for (i = 1; i < num_entries + 1; i++) {
643 struct el2_report_data_t *report;
644 int cur_pos = last_pos + i;
645
646 if (cur_pos >= num_entries)
647 cur_pos -= num_entries;
648
649 report = el2_shared_mem +
650 sizeof(struct el2_report_header_t) +
651 cur_pos * sizeof(struct el2_report_data_t);
652
653 /* determine legitimacy of report */
654 if (report->report_valid &&
Yida Wang943c3b42017-06-23 17:10:13 -0400655 (last_sequence_number == 0
656 || report->sequence_number >
657 last_sequence_number)) {
658 seemp_logk_rtic(report->report_type,
Yida Wang65bf34fb2017-08-24 18:57:29 -0400659 seemp_logk_get_pid(
660 (struct task_struct *)
661 report->actor),
Yida Wang414df742017-07-19 16:58:23 -0400662 /* leave this empty until
663 * asset id is provided
664 */
665 "",
666 report->asset_category,
667 report->response);
Yida Wang943c3b42017-06-23 17:10:13 -0400668 last_sequence_number = report->sequence_number;
669 } else {
670 last_pos = cur_pos - 1;
671 break;
672 }
673 }
674
675 /* periodically check el2 report every second */
676 ssleep(1);
677 }
678
679 return 0;
680}
681
Yida Wang0bf43bd2017-03-22 18:16:31 -0400682__init int seemp_logk_init(void)
683{
684 int ret;
685 int devno = 0;
Yida Wang943c3b42017-06-23 17:10:13 -0400686 struct scm_desc desc = {0};
Yida Wang0bf43bd2017-03-22 18:16:31 -0400687
688 num_sources = 0;
689 kmalloc_flag = 0;
690 block_apps = 0;
691 pmask = NULL;
692
693 if (kmalloc_flag && ring_sz > FOUR_MB) {
694 pr_err("kmalloc cannot allocate > 4MB\n");
695 return -ENOMEM;
696 }
697
698 ring_sz = ring_sz * SZ_1M;
699 if (ring_sz <= 0) {
700 pr_err("Too small a ring_sz=%d\n", ring_sz);
701 return -EINVAL;
702 }
703
704 slogk_dev = kmalloc(sizeof(*slogk_dev), GFP_KERNEL);
705 if (slogk_dev == NULL)
706 return -ENOMEM;
707
708 slogk_dev->ring_sz = ring_sz;
709 slogk_dev->blk_sz = sizeof(struct seemp_logk_blk);
710 /*initialize ping-pong buffers*/
711 ret = ringbuf_init(slogk_dev);
712 if (ret < 0) {
713 pr_err("Init Failed, ret = %d\n", ret);
714 goto pingpong_fail;
715 }
716
717 ret = alloc_chrdev_region(&devno, 0, seemp_LOGK_NUM_DEVS,
718 seemp_LOGK_DEV_NAME);
719 if (ret < 0) {
720 pr_err("alloc_chrdev_region failed with ret = %d\n",
721 ret);
722 goto register_fail;
723 }
724
725 slogk_dev->major = MAJOR(devno);
726
727 pr_debug("logk: major# = %d\n", slogk_dev->major);
728
729 cl = class_create(THIS_MODULE, seemp_LOGK_DEV_NAME);
730 if (cl == NULL) {
731 pr_err("class create failed");
732 goto cdev_fail;
733 }
734 if (device_create(cl, NULL, devno, NULL,
735 seemp_LOGK_DEV_NAME) == NULL) {
736 pr_err("device create failed");
737 goto class_destroy_fail;
738 }
739 cdev_init(&(slogk_dev->cdev), &seemp_logk_fops);
740
741 slogk_dev->cdev.owner = THIS_MODULE;
742 ret = cdev_add(&(slogk_dev->cdev), MKDEV(slogk_dev->major, 0), 1);
743 if (ret) {
744 pr_err("cdev_add failed with ret = %d", ret);
745 goto class_destroy_fail;
746 }
747
748 seemp_logk_attach();
749 mutex_init(&slogk_dev->lock);
750 init_waitqueue_head(&slogk_dev->readers_wq);
751 init_waitqueue_head(&slogk_dev->writers_wq);
752 rwlock_init(&filter_lock);
Yida Wang943c3b42017-06-23 17:10:13 -0400753
754 el2_shared_mem = (void *) __get_free_page(GFP_KERNEL);
755 if (el2_shared_mem) {
756 desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
757 desc.args[0] = (uint64_t) virt_to_phys(el2_shared_mem);
758 desc.args[1] = PAGE_SIZE;
759 ret = scm_call2(EL2_SCM_ID, &desc);
760 if (ret || desc.ret[0] || desc.ret[1]) {
Yida Wang03e13902017-08-30 14:32:32 -0400761 pr_err("SCM call failed with ret val = %d %d %d\n",
Yida Wang943c3b42017-06-23 17:10:13 -0400762 ret, (int)desc.ret[0], (int)desc.ret[1]);
763 free_page((unsigned long) el2_shared_mem);
764 el2_shared_mem = NULL;
765 }
766 }
767
Yida Wang0bf43bd2017-03-22 18:16:31 -0400768 return 0;
769class_destroy_fail:
770 class_destroy(cl);
771cdev_fail:
772 unregister_chrdev_region(devno, seemp_LOGK_NUM_DEVS);
773register_fail:
774 ringbuf_cleanup(slogk_dev);
775pingpong_fail:
776 kfree(slogk_dev);
777 return -EPERM;
778}
779
780__exit void seemp_logk_cleanup(void)
781{
782 dev_t devno = MKDEV(slogk_dev->major, slogk_dev->minor);
783
Yida Wang943c3b42017-06-23 17:10:13 -0400784 if (rtic_thread) {
785 kthread_stop(rtic_thread);
786 rtic_thread = NULL;
787 }
788
Yida Wang0bf43bd2017-03-22 18:16:31 -0400789 seemp_logk_detach();
790
791 cdev_del(&slogk_dev->cdev);
792
793 unregister_chrdev_region(devno, seemp_LOGK_NUM_DEVS);
794 ringbuf_cleanup(slogk_dev);
795 kfree(slogk_dev);
796
797 if (pmask != NULL) {
798 kfree(pmask);
799 pmask = NULL;
800 }
801}
802
803module_init(seemp_logk_init);
804module_exit(seemp_logk_cleanup);
805
806MODULE_LICENSE("GPL v2");
807MODULE_DESCRIPTION("seemp Observer");
808