blob: 204142b4bf35561eb6aac5f70a3840d4b7600ada [file] [log] [blame]
Yida Wang0bf43bd2017-03-22 18:16:31 -04001/*
2 * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "seemp: %s: " fmt, __func__
15
Yida Wang943c3b42017-06-23 17:10:13 -040016#include <linux/delay.h>
17#include <linux/kthread.h>
18#include <linux/seemp_instrumentation.h>
19#include <soc/qcom/scm.h>
20
Yida Wang0bf43bd2017-03-22 18:16:31 -040021#include "seemp_logk.h"
22#include "seemp_ringbuf.h"
23
24#ifndef VM_RESERVED
25#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
26#endif
27
28#define MASK_BUFFER_SIZE 256
29#define FOUR_MB 4
30#define YEAR_BASE 1900
31
Yida Wang943c3b42017-06-23 17:10:13 -040032#define EL2_SCM_ID 0x02001902
33
Yida Wang0bf43bd2017-03-22 18:16:31 -040034static struct seemp_logk_dev *slogk_dev;
35
36static unsigned int ring_sz = FOUR_MB;
37
38/*
39 * default is besteffort, apps do not get blocked
40 */
41static unsigned int block_apps;
42
43
44/*
45 * When this flag is turned on,
46 * kmalloc should be used for ring buf allocation
47 * otherwise it is vmalloc.
48 * default is to use vmalloc
49 * kmalloc has a limit of 4MB
50 */
51unsigned int kmalloc_flag;
52
53static struct class *cl;
54
55static rwlock_t filter_lock;
56static struct seemp_source_mask *pmask;
57static unsigned int num_sources;
58
Yida Wang943c3b42017-06-23 17:10:13 -040059static void *el2_shared_mem;
60static struct task_struct *rtic_thread;
61
Yida Wang0bf43bd2017-03-22 18:16:31 -040062static long seemp_logk_reserve_rdblks(
63 struct seemp_logk_dev *sdev, unsigned long arg);
64static long seemp_logk_set_mask(unsigned long arg);
65static long seemp_logk_set_mapping(unsigned long arg);
66static long seemp_logk_check_filter(unsigned long arg);
Yida Wang943c3b42017-06-23 17:10:13 -040067static int seemp_logk_rtic_thread(void *data);
Yida Wang0bf43bd2017-03-22 18:16:31 -040068
69void* (*seemp_logk_kernel_begin)(char **buf);
70
71void (*seemp_logk_kernel_end)(void *blck);
72
73/*
74 * the last param is the permission bits *
75 * kernel logging is done in three steps:
76 * (1) fetch a block, fill everything except payload.
77 * (2) return payload pointer to the caller.
78 * (3) caller fills its data directly into the payload area.
79 * (4) caller invoked finish_record(), to finish writing.
80 */
81void *seemp_logk_kernel_start_record(char **buf)
82{
83 struct seemp_logk_blk *blk;
84 struct timespec now;
85 struct tm ts;
86 int idx;
87 int ret;
88
89 DEFINE_WAIT(write_wait);
90
91 ret = 0;
92 idx = 0;
93 now = current_kernel_time();
94 blk = ringbuf_fetch_wr_block(slogk_dev);
95 if (!blk) {
96 /*
97 * there is no blk to write
98 * if block_apps == 0; quietly return
99 */
100 if (!block_apps) {
101 *buf = NULL;
102 return NULL;
103 }
104 /*else wait for the blks to be available*/
105 while (1) {
106 mutex_lock(&slogk_dev->lock);
107 prepare_to_wait(&slogk_dev->writers_wq,
108 &write_wait, TASK_INTERRUPTIBLE);
109 ret = (slogk_dev->num_write_avail_blks <= 0);
110 if (!ret) {
111 /* don't have to wait*/
112 break;
113 }
114 mutex_unlock(&slogk_dev->lock);
115 if (signal_pending(current)) {
116 ret = -EINTR;
117 break;
118 }
119 schedule();
120 }
121
122 finish_wait(&slogk_dev->writers_wq, &write_wait);
123 if (ret)
124 return NULL;
125
126 idx = slogk_dev->write_idx;
127 slogk_dev->write_idx =
128 (slogk_dev->write_idx + 1) % slogk_dev->num_tot_blks;
129 slogk_dev->num_write_avail_blks--;
130 slogk_dev->num_write_in_prog_blks++;
131 slogk_dev->num_writers++;
132
133 blk = &slogk_dev->ring[idx];
134 /*mark block invalid*/
135 blk->status = 0x0;
136 mutex_unlock(&slogk_dev->lock);
137 }
138
139 blk->version = OBSERVER_VERSION;
140 blk->pid = current->tgid;
141 blk->tid = current->pid;
142 blk->uid = (current_uid()).val;
143 blk->sec = now.tv_sec;
144 blk->nsec = now.tv_nsec;
145 strlcpy(blk->appname, current->comm, TASK_COMM_LEN);
146 time_to_tm(now.tv_sec, 0, &ts);
147 ts.tm_year += YEAR_BASE;
148 ts.tm_mon += 1;
149
150 snprintf(blk->ts, TS_SIZE, "%04ld-%02d-%02d %02d:%02d:%02d",
151 ts.tm_year, ts.tm_mon, ts.tm_mday,
152 ts.tm_hour, ts.tm_min, ts.tm_sec);
153
154 *buf = blk->payload.msg;
155
156 return blk;
157}
158
159void seemp_logk_kernel_end_record(void *blck)
160{
161 struct seemp_logk_blk *blk = (struct seemp_logk_blk *)blck;
162
163 if (blk) {
164 /*update status at the very end*/
165 blk->status |= 0x1;
166 blk->uid = (current_uid()).val;
167
168 ringbuf_finish_writer(slogk_dev, blk);
169 }
170}
171
172static int seemp_logk_usr_record(const char __user *buf, size_t count)
173{
174 struct seemp_logk_blk *blk;
175 struct seemp_logk_blk usr_blk;
176 struct seemp_logk_blk *local_blk;
177 struct timespec now;
178 struct tm ts;
179 int idx, ret;
180
181 DEFINE_WAIT(write_wait);
182
183 if (buf) {
184 local_blk = (struct seemp_logk_blk *)buf;
185 if (copy_from_user(&usr_blk.pid, &local_blk->pid,
186 sizeof(usr_blk.pid)) != 0)
187 return -EFAULT;
188 if (copy_from_user(&usr_blk.tid, &local_blk->tid,
189 sizeof(usr_blk.tid)) != 0)
190 return -EFAULT;
191 if (copy_from_user(&usr_blk.uid, &local_blk->uid,
192 sizeof(usr_blk.uid)) != 0)
193 return -EFAULT;
194 if (copy_from_user(&usr_blk.len, &local_blk->len,
195 sizeof(usr_blk.len)) != 0)
196 return -EFAULT;
197 if (copy_from_user(&usr_blk.payload, &local_blk->payload,
198 sizeof(struct blk_payload)) != 0)
199 return -EFAULT;
200 } else {
201 return -EFAULT;
202 }
203 idx = ret = 0;
204 now = current_kernel_time();
205 blk = ringbuf_fetch_wr_block(slogk_dev);
206 if (!blk) {
207 if (!block_apps)
208 return 0;
209 while (1) {
210 mutex_lock(&slogk_dev->lock);
211 prepare_to_wait(&slogk_dev->writers_wq,
212 &write_wait,
213 TASK_INTERRUPTIBLE);
214 ret = (slogk_dev->num_write_avail_blks <= 0);
215 if (!ret)
216 break;
217 mutex_unlock(&slogk_dev->lock);
218 if (signal_pending(current)) {
219 ret = -EINTR;
220 break;
221 }
222 schedule();
223 }
224 finish_wait(&slogk_dev->writers_wq, &write_wait);
225 if (ret)
226 return -EINTR;
227
228 idx = slogk_dev->write_idx;
229 slogk_dev->write_idx =
230 (slogk_dev->write_idx + 1) % slogk_dev->num_tot_blks;
231 slogk_dev->num_write_avail_blks--;
232 slogk_dev->num_write_in_prog_blks++;
233 slogk_dev->num_writers++;
234 blk = &slogk_dev->ring[idx];
235 /*mark block invalid*/
236 blk->status = 0x0;
237 mutex_unlock(&slogk_dev->lock);
238 }
239 if (usr_blk.len > sizeof(struct blk_payload)-1)
240 usr_blk.len = sizeof(struct blk_payload)-1;
241
242 memcpy(&blk->payload, &usr_blk.payload, sizeof(struct blk_payload));
243 blk->pid = usr_blk.pid;
244 blk->uid = usr_blk.uid;
245 blk->tid = usr_blk.tid;
246 blk->sec = now.tv_sec;
247 blk->nsec = now.tv_nsec;
248 time_to_tm(now.tv_sec, 0, &ts);
249 ts.tm_year += YEAR_BASE;
250 ts.tm_mon += 1;
251 snprintf(blk->ts, TS_SIZE, "%02ld-%02d-%02d %02d:%02d:%02d",
252 ts.tm_year, ts.tm_mon, ts.tm_mday,
253 ts.tm_hour, ts.tm_min, ts.tm_sec);
254 strlcpy(blk->appname, current->comm, TASK_COMM_LEN);
255 blk->status |= 0x1;
256 ringbuf_finish_writer(slogk_dev, blk);
257 return ret;
258}
259
260static void seemp_logk_attach(void)
261{
262 seemp_logk_kernel_end = seemp_logk_kernel_end_record;
263 seemp_logk_kernel_begin = seemp_logk_kernel_start_record;
264}
265
266static void seemp_logk_detach(void)
267{
268 seemp_logk_kernel_begin = NULL;
269 seemp_logk_kernel_end = NULL;
270}
271
272static ssize_t
273seemp_logk_write(struct file *file, const char __user *buf, size_t count,
274 loff_t *ppos)
275{
276 return seemp_logk_usr_record(buf, count);
277}
278
279static int
280seemp_logk_open(struct inode *inode, struct file *filp)
281{
282 int ret;
283
284 /*disallow seeks on this file*/
285 ret = nonseekable_open(inode, filp);
286 if (ret) {
287 pr_err("ret= %d\n", ret);
288 return ret;
289 }
290
291 slogk_dev->minor = iminor(inode);
292 filp->private_data = slogk_dev;
293
294 return 0;
295}
296
297static bool seemp_logk_get_bit_from_vector(__u8 *pVec, __u32 index)
298{
299 unsigned int byte_num = index/8;
300 unsigned int bit_num = index%8;
301 unsigned char byte;
302
Yida Wangbaeebf02017-05-03 14:46:14 -0400303 if (byte_num >= MASK_BUFFER_SIZE)
Yida Wang0bf43bd2017-03-22 18:16:31 -0400304 return false;
305
306 byte = pVec[byte_num];
307
308 return !(byte & (1 << bit_num));
309}
310
311static long seemp_logk_ioctl(struct file *filp, unsigned int cmd,
312 unsigned long arg)
313{
314 struct seemp_logk_dev *sdev;
315 int ret;
316
317 sdev = (struct seemp_logk_dev *) filp->private_data;
318
319 if (cmd == SEEMP_CMD_RESERVE_RDBLKS) {
320 return seemp_logk_reserve_rdblks(sdev, arg);
321 } else if (cmd == SEEMP_CMD_RELEASE_RDBLKS) {
322 mutex_lock(&sdev->lock);
323 sdev->read_idx = (sdev->read_idx + sdev->num_read_in_prog_blks)
324 % sdev->num_tot_blks;
325 sdev->num_write_avail_blks += sdev->num_read_in_prog_blks;
326 ret = sdev->num_read_in_prog_blks;
327 sdev->num_read_in_prog_blks = 0;
328 /*wake up any waiting writers*/
329 mutex_unlock(&sdev->lock);
330 if (ret && block_apps)
331 wake_up_interruptible(&sdev->writers_wq);
332 } else if (cmd == SEEMP_CMD_GET_RINGSZ) {
333 if (copy_to_user((unsigned int *)arg, &sdev->ring_sz,
334 sizeof(unsigned int)))
335 return -EFAULT;
336 } else if (cmd == SEEMP_CMD_GET_BLKSZ) {
337 if (copy_to_user((unsigned int *)arg, &sdev->blk_sz,
338 sizeof(unsigned int)))
339 return -EFAULT;
340 } else if (cmd == SEEMP_CMD_SET_MASK) {
341 return seemp_logk_set_mask(arg);
342 } else if (cmd == SEEMP_CMD_SET_MAPPING) {
343 return seemp_logk_set_mapping(arg);
344 } else if (cmd == SEEMP_CMD_CHECK_FILTER) {
345 return seemp_logk_check_filter(arg);
346 }
347 pr_err("Invalid Request %X\n", cmd);
348 return -ENOIOCTLCMD;
349}
350
351static long seemp_logk_reserve_rdblks(
352 struct seemp_logk_dev *sdev, unsigned long arg)
353{
354 int ret;
355 struct read_range rrange;
356
357 DEFINE_WAIT(read_wait);
358
359 mutex_lock(&sdev->lock);
360 if (sdev->num_writers > 0 || sdev->num_read_avail_blks <= 0) {
361 ret = -EPERM;
362 pr_debug("(reserve): blocking, cannot read.\n");
363 pr_debug("num_writers=%d num_read_avail_blks=%d\n",
364 sdev->num_writers,
365 sdev->num_read_avail_blks);
366 mutex_unlock(&sdev->lock);
367 /*
368 * unlock the device
369 * wait on a wait queue
370 * after wait, grab the dev lock again
371 */
372 while (1) {
373 mutex_lock(&sdev->lock);
374 prepare_to_wait(&sdev->readers_wq, &read_wait,
375 TASK_INTERRUPTIBLE);
376 ret = (sdev->num_writers > 0 ||
377 sdev->num_read_avail_blks <= 0);
378 if (!ret) {
379 /*don't have to wait*/
380 break;
381 }
382 mutex_unlock(&sdev->lock);
383 if (signal_pending(current)) {
384 ret = -EINTR;
385 break;
386 }
387 schedule();
388 }
389
390 finish_wait(&sdev->readers_wq, &read_wait);
391 if (ret)
392 return -EINTR;
393 }
394
395 /*sdev->lock is held at this point*/
396 sdev->num_read_in_prog_blks = sdev->num_read_avail_blks;
397 sdev->num_read_avail_blks = 0;
398 rrange.start_idx = sdev->read_idx;
399 rrange.num = sdev->num_read_in_prog_blks;
400 mutex_unlock(&sdev->lock);
401
402 if (copy_to_user((unsigned int *)arg, &rrange,
403 sizeof(struct read_range)))
404 return -EFAULT;
405
406 return 0;
407}
408
409static long seemp_logk_set_mask(unsigned long arg)
410{
411 __u8 buffer[256];
412 int i;
413 unsigned int num_elements;
414
415 if (copy_from_user(&num_elements,
416 (unsigned int __user *) arg, sizeof(unsigned int)))
417 return -EFAULT;
418
419 read_lock(&filter_lock);
420 if (num_sources == 0) {
421 read_unlock(&filter_lock);
422 return -EINVAL;
423 }
424
425 if (num_elements == 0 ||
426 DIV_ROUND_UP(num_sources, 8) > MASK_BUFFER_SIZE) {
427 read_unlock(&filter_lock);
428 return -EINVAL;
429 }
430
431 if (copy_from_user(buffer,
432 (__u8 *)arg, DIV_ROUND_UP(num_sources, 8))) {
433 read_unlock(&filter_lock);
434 return -EFAULT;
435 }
436
437 read_unlock(&filter_lock);
438 write_lock(&filter_lock);
439 if (num_elements != num_sources) {
440 write_unlock(&filter_lock);
441 return -EPERM;
442 }
443
444 for (i = 0; i < num_sources; i++) {
445 pmask[i].isOn =
446 seemp_logk_get_bit_from_vector(
447 (__u8 *)buffer, i);
448 }
449 write_unlock(&filter_lock);
450 return 0;
451}
452
453static long seemp_logk_set_mapping(unsigned long arg)
454{
455 __u32 num_elements;
456 __u32 *pbuffer;
457 int i;
458 struct seemp_source_mask *pnewmask;
459
460 if (copy_from_user(&num_elements,
461 (__u32 __user *)arg, sizeof(__u32)))
462 return -EFAULT;
463
464 if ((num_elements == 0) || (num_elements >
465 (UINT_MAX / sizeof(struct seemp_source_mask))))
466 return -EFAULT;
467
468 write_lock(&filter_lock);
469 if (pmask != NULL) {
470 /*
471 * Mask is getting set again.
472 * seemp_core was probably restarted.
473 */
474 struct seemp_source_mask *ptempmask;
475
476 num_sources = 0;
477 ptempmask = pmask;
478 pmask = NULL;
479 kfree(ptempmask);
480 }
481 write_unlock(&filter_lock);
482 pbuffer = kmalloc(sizeof(struct seemp_source_mask)
483 * num_elements, GFP_KERNEL);
484 if (pbuffer == NULL)
485 return -ENOMEM;
486
487 /*
488 * Use our new table as scratch space for now.
489 * We copy an ordered list of hash values into our buffer.
490 */
491 if (copy_from_user(pbuffer, &((__u32 __user *)arg)[1],
492 num_elements*sizeof(unsigned int))) {
493 kfree(pbuffer);
494 return -EFAULT;
495 }
496 /*
497 * We arrange the user data into a more usable form.
498 * This is done in-place.
499 */
500 pnewmask = (struct seemp_source_mask *) pbuffer;
501 for (i = num_elements - 1; i >= 0; i--) {
502 pnewmask[i].hash = pbuffer[i];
503 /* Observer is off by default*/
504 pnewmask[i].isOn = 0;
505 }
506 write_lock(&filter_lock);
507 pmask = pnewmask;
508 num_sources = num_elements;
509 write_unlock(&filter_lock);
510 return 0;
511}
512
513static long seemp_logk_check_filter(unsigned long arg)
514{
515 int i;
516 unsigned int hash = (unsigned int) arg;
517
518 /*
519 * This lock may be a bit long.
520 * If it is a problem, it can be fixed.
521 */
522 read_lock(&filter_lock);
523 for (i = 0; i < num_sources; i++) {
524 if (hash == pmask[i].hash) {
525 int result = pmask[i].isOn;
526
527 read_unlock(&filter_lock);
528 return result;
529 }
530 }
531 read_unlock(&filter_lock);
532 return 0;
533}
534
535static int seemp_logk_mmap(struct file *filp,
536 struct vm_area_struct *vma)
537{
538 int ret;
539 char *vptr;
540 unsigned long length, pfn;
541 unsigned long start = vma->vm_start;
542
543 length = vma->vm_end - vma->vm_start;
544
545 if (length > (unsigned long) slogk_dev->ring_sz) {
546 pr_err("len check failed\n");
547 return -EIO;
548 }
549
550 vma->vm_flags |= VM_RESERVED | VM_SHARED;
551 vptr = (char *) slogk_dev->ring;
552 ret = 0;
553
554 if (kmalloc_flag) {
555 ret = remap_pfn_range(vma,
556 start,
557 virt_to_phys((void *)
558 ((unsigned long)slogk_dev->ring)) >> PAGE_SHIFT,
559 length,
560 vma->vm_page_prot);
561 if (ret != 0) {
562 pr_err("remap_pfn_range() fails with ret = %d\n",
563 ret);
564 return -EAGAIN;
565 }
566 } else {
567 while (length > 0) {
568 pfn = vmalloc_to_pfn(vptr);
569
570 ret = remap_pfn_range(vma, start, pfn, PAGE_SIZE,
571 vma->vm_page_prot);
572 if (ret < 0) {
573 pr_err("remap_pfn_range() fails with ret = %d\n",
574 ret);
575 return ret;
576 }
577 start += PAGE_SIZE;
578 vptr += PAGE_SIZE;
579 length -= PAGE_SIZE;
580 }
581 }
582
Yida Wang943c3b42017-06-23 17:10:13 -0400583 if (!rtic_thread && el2_shared_mem) {
584 rtic_thread = kthread_run(seemp_logk_rtic_thread,
585 NULL, "seemp_logk_rtic_thread");
586 if (IS_ERR(rtic_thread)) {
587 pr_err("rtic_thread creation failed");
588 rtic_thread = NULL;
589 }
590 }
591
Yida Wang0bf43bd2017-03-22 18:16:31 -0400592 return 0;
593}
594
595static const struct file_operations seemp_logk_fops = {
596 .write = seemp_logk_write,
597 .open = seemp_logk_open,
598 .unlocked_ioctl = seemp_logk_ioctl,
599 .compat_ioctl = seemp_logk_ioctl,
600 .mmap = seemp_logk_mmap,
601};
602
Yida Wang943c3b42017-06-23 17:10:13 -0400603static int seemp_logk_rtic_thread(void *data)
604{
605 struct el2_report_header_t *header;
606 __u64 last_sequence_number = 0;
607 int last_pos = -1;
608 int i;
609 int num_entries = (PAGE_SIZE - sizeof(struct el2_report_header_t))
610 / sizeof(struct el2_report_data_t);
611 header = (struct el2_report_header_t *) el2_shared_mem;
612
613 while (!kthread_should_stop()) {
614 for (i = 1; i < num_entries + 1; i++) {
615 struct el2_report_data_t *report;
616 int cur_pos = last_pos + i;
617
618 if (cur_pos >= num_entries)
619 cur_pos -= num_entries;
620
621 report = el2_shared_mem +
622 sizeof(struct el2_report_header_t) +
623 cur_pos * sizeof(struct el2_report_data_t);
624
625 /* determine legitimacy of report */
626 if (report->report_valid &&
Yida Wang943c3b42017-06-23 17:10:13 -0400627 (last_sequence_number == 0
628 || report->sequence_number >
629 last_sequence_number)) {
630 seemp_logk_rtic(report->report_type,
Yida Wang414df742017-07-19 16:58:23 -0400631 report->actor,
632 /* leave this empty until
633 * asset id is provided
634 */
635 "",
636 report->asset_category,
637 report->response);
Yida Wang943c3b42017-06-23 17:10:13 -0400638 last_sequence_number = report->sequence_number;
639 } else {
640 last_pos = cur_pos - 1;
641 break;
642 }
643 }
644
645 /* periodically check el2 report every second */
646 ssleep(1);
647 }
648
649 return 0;
650}
651
Yida Wang0bf43bd2017-03-22 18:16:31 -0400652__init int seemp_logk_init(void)
653{
654 int ret;
655 int devno = 0;
Yida Wang943c3b42017-06-23 17:10:13 -0400656 struct scm_desc desc = {0};
Yida Wang0bf43bd2017-03-22 18:16:31 -0400657
658 num_sources = 0;
659 kmalloc_flag = 0;
660 block_apps = 0;
661 pmask = NULL;
662
663 if (kmalloc_flag && ring_sz > FOUR_MB) {
664 pr_err("kmalloc cannot allocate > 4MB\n");
665 return -ENOMEM;
666 }
667
668 ring_sz = ring_sz * SZ_1M;
669 if (ring_sz <= 0) {
670 pr_err("Too small a ring_sz=%d\n", ring_sz);
671 return -EINVAL;
672 }
673
674 slogk_dev = kmalloc(sizeof(*slogk_dev), GFP_KERNEL);
675 if (slogk_dev == NULL)
676 return -ENOMEM;
677
678 slogk_dev->ring_sz = ring_sz;
679 slogk_dev->blk_sz = sizeof(struct seemp_logk_blk);
680 /*initialize ping-pong buffers*/
681 ret = ringbuf_init(slogk_dev);
682 if (ret < 0) {
683 pr_err("Init Failed, ret = %d\n", ret);
684 goto pingpong_fail;
685 }
686
687 ret = alloc_chrdev_region(&devno, 0, seemp_LOGK_NUM_DEVS,
688 seemp_LOGK_DEV_NAME);
689 if (ret < 0) {
690 pr_err("alloc_chrdev_region failed with ret = %d\n",
691 ret);
692 goto register_fail;
693 }
694
695 slogk_dev->major = MAJOR(devno);
696
697 pr_debug("logk: major# = %d\n", slogk_dev->major);
698
699 cl = class_create(THIS_MODULE, seemp_LOGK_DEV_NAME);
700 if (cl == NULL) {
701 pr_err("class create failed");
702 goto cdev_fail;
703 }
704 if (device_create(cl, NULL, devno, NULL,
705 seemp_LOGK_DEV_NAME) == NULL) {
706 pr_err("device create failed");
707 goto class_destroy_fail;
708 }
709 cdev_init(&(slogk_dev->cdev), &seemp_logk_fops);
710
711 slogk_dev->cdev.owner = THIS_MODULE;
712 ret = cdev_add(&(slogk_dev->cdev), MKDEV(slogk_dev->major, 0), 1);
713 if (ret) {
714 pr_err("cdev_add failed with ret = %d", ret);
715 goto class_destroy_fail;
716 }
717
718 seemp_logk_attach();
719 mutex_init(&slogk_dev->lock);
720 init_waitqueue_head(&slogk_dev->readers_wq);
721 init_waitqueue_head(&slogk_dev->writers_wq);
722 rwlock_init(&filter_lock);
Yida Wang943c3b42017-06-23 17:10:13 -0400723
724 el2_shared_mem = (void *) __get_free_page(GFP_KERNEL);
725 if (el2_shared_mem) {
726 desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
727 desc.args[0] = (uint64_t) virt_to_phys(el2_shared_mem);
728 desc.args[1] = PAGE_SIZE;
729 ret = scm_call2(EL2_SCM_ID, &desc);
730 if (ret || desc.ret[0] || desc.ret[1]) {
731 pr_err("SCM call failed with ret val = %d %d %d",
732 ret, (int)desc.ret[0], (int)desc.ret[1]);
733 free_page((unsigned long) el2_shared_mem);
734 el2_shared_mem = NULL;
735 }
736 }
737
Yida Wang0bf43bd2017-03-22 18:16:31 -0400738 return 0;
739class_destroy_fail:
740 class_destroy(cl);
741cdev_fail:
742 unregister_chrdev_region(devno, seemp_LOGK_NUM_DEVS);
743register_fail:
744 ringbuf_cleanup(slogk_dev);
745pingpong_fail:
746 kfree(slogk_dev);
747 return -EPERM;
748}
749
750__exit void seemp_logk_cleanup(void)
751{
752 dev_t devno = MKDEV(slogk_dev->major, slogk_dev->minor);
753
Yida Wang943c3b42017-06-23 17:10:13 -0400754 if (rtic_thread) {
755 kthread_stop(rtic_thread);
756 rtic_thread = NULL;
757 }
758
Yida Wang0bf43bd2017-03-22 18:16:31 -0400759 seemp_logk_detach();
760
761 cdev_del(&slogk_dev->cdev);
762
763 unregister_chrdev_region(devno, seemp_LOGK_NUM_DEVS);
764 ringbuf_cleanup(slogk_dev);
765 kfree(slogk_dev);
766
767 if (pmask != NULL) {
768 kfree(pmask);
769 pmask = NULL;
770 }
771}
772
773module_init(seemp_logk_init);
774module_exit(seemp_logk_cleanup);
775
776MODULE_LICENSE("GPL v2");
777MODULE_DESCRIPTION("seemp Observer");
778