blob: 78bed7125515cf20726e14b28144380058fe2933 [file] [log] [blame]
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -07001#define pr_fmt(fmt) "kcov: " fmt
2
3#include <linux/compiler.h>
4#include <linux/types.h>
5#include <linux/file.h>
6#include <linux/fs.h>
7#include <linux/mm.h>
8#include <linux/printk.h>
9#include <linux/slab.h>
10#include <linux/spinlock.h>
11#include <linux/vmalloc.h>
12#include <linux/debugfs.h>
13#include <linux/uaccess.h>
14#include <linux/kcov.h>
15
16/*
17 * kcov descriptor (one per opened debugfs file).
18 * State transitions of the descriptor:
19 * - initial state after open()
20 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
21 * - then, mmap() call (several calls are allowed but not useful)
22 * - then, repeated enable/disable for a task (only one task a time allowed)
23 */
24struct kcov {
25 /*
26 * Reference counter. We keep one for:
27 * - opened file descriptor
28 * - task with enabled coverage (we can't unwire it from another task)
29 */
30 atomic_t refcount;
31 /* The lock protects mode, size, area and t. */
32 spinlock_t lock;
33 enum kcov_mode mode;
34 /* Size of arena (in long's for KCOV_MODE_TRACE). */
35 unsigned size;
36 /* Coverage buffer shared with user space. */
37 void *area;
38 /* Task for which we collect coverage, or NULL. */
39 struct task_struct *t;
40};
41
42/*
43 * Entry point from instrumented code.
44 * This is called once per basic-block/edge.
45 */
James Morsebdab42d2016-04-28 16:18:52 -070046void notrace __sanitizer_cov_trace_pc(void)
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -070047{
48 struct task_struct *t;
49 enum kcov_mode mode;
50
51 t = current;
52 /*
53 * We are interested in code coverage as a function of a syscall inputs,
54 * so we ignore code executed in interrupts.
55 */
56 if (!t || in_interrupt())
57 return;
58 mode = READ_ONCE(t->kcov_mode);
59 if (mode == KCOV_MODE_TRACE) {
60 unsigned long *area;
61 unsigned long pos;
62
63 /*
64 * There is some code that runs in interrupts but for which
65 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
66 * READ_ONCE()/barrier() effectively provides load-acquire wrt
67 * interrupts, there are paired barrier()/WRITE_ONCE() in
68 * kcov_ioctl_locked().
69 */
70 barrier();
71 area = t->kcov_area;
72 /* The first word is number of subsequent PCs. */
73 pos = READ_ONCE(area[0]) + 1;
74 if (likely(pos < t->kcov_size)) {
75 area[pos] = _RET_IP_;
76 WRITE_ONCE(area[0], pos);
77 }
78 }
79}
80EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
81
82static void kcov_get(struct kcov *kcov)
83{
84 atomic_inc(&kcov->refcount);
85}
86
87static void kcov_put(struct kcov *kcov)
88{
89 if (atomic_dec_and_test(&kcov->refcount)) {
90 vfree(kcov->area);
91 kfree(kcov);
92 }
93}
94
95void kcov_task_init(struct task_struct *t)
96{
97 t->kcov_mode = KCOV_MODE_DISABLED;
98 t->kcov_size = 0;
99 t->kcov_area = NULL;
100 t->kcov = NULL;
101}
102
103void kcov_task_exit(struct task_struct *t)
104{
105 struct kcov *kcov;
106
107 kcov = t->kcov;
108 if (kcov == NULL)
109 return;
110 spin_lock(&kcov->lock);
111 if (WARN_ON(kcov->t != t)) {
112 spin_unlock(&kcov->lock);
113 return;
114 }
115 /* Just to not leave dangling references behind. */
116 kcov_task_init(t);
117 kcov->t = NULL;
118 spin_unlock(&kcov->lock);
119 kcov_put(kcov);
120}
121
122static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
123{
124 int res = 0;
125 void *area;
126 struct kcov *kcov = vma->vm_file->private_data;
127 unsigned long size, off;
128 struct page *page;
129
130 area = vmalloc_user(vma->vm_end - vma->vm_start);
131 if (!area)
132 return -ENOMEM;
133
134 spin_lock(&kcov->lock);
135 size = kcov->size * sizeof(unsigned long);
136 if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
137 vma->vm_end - vma->vm_start != size) {
138 res = -EINVAL;
139 goto exit;
140 }
141 if (!kcov->area) {
142 kcov->area = area;
143 vma->vm_flags |= VM_DONTEXPAND;
144 spin_unlock(&kcov->lock);
145 for (off = 0; off < size; off += PAGE_SIZE) {
146 page = vmalloc_to_page(kcov->area + off);
147 if (vm_insert_page(vma, vma->vm_start + off, page))
148 WARN_ONCE(1, "vm_insert_page() failed");
149 }
150 return 0;
151 }
152exit:
153 spin_unlock(&kcov->lock);
154 vfree(area);
155 return res;
156}
157
158static int kcov_open(struct inode *inode, struct file *filep)
159{
160 struct kcov *kcov;
161
162 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
163 if (!kcov)
164 return -ENOMEM;
165 atomic_set(&kcov->refcount, 1);
166 spin_lock_init(&kcov->lock);
167 filep->private_data = kcov;
168 return nonseekable_open(inode, filep);
169}
170
171static int kcov_close(struct inode *inode, struct file *filep)
172{
173 kcov_put(filep->private_data);
174 return 0;
175}
176
177static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
178 unsigned long arg)
179{
180 struct task_struct *t;
181 unsigned long size, unused;
182
183 switch (cmd) {
184 case KCOV_INIT_TRACE:
185 /*
186 * Enable kcov in trace mode and setup buffer size.
187 * Must happen before anything else.
188 */
189 if (kcov->mode != KCOV_MODE_DISABLED)
190 return -EBUSY;
191 /*
192 * Size must be at least 2 to hold current position and one PC.
193 * Later we allocate size * sizeof(unsigned long) memory,
194 * that must not overflow.
195 */
196 size = arg;
197 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
198 return -EINVAL;
199 kcov->size = size;
200 kcov->mode = KCOV_MODE_TRACE;
201 return 0;
202 case KCOV_ENABLE:
203 /*
204 * Enable coverage for the current task.
205 * At this point user must have been enabled trace mode,
206 * and mmapped the file. Coverage collection is disabled only
207 * at task exit or voluntary by KCOV_DISABLE. After that it can
208 * be enabled for another task.
209 */
210 unused = arg;
211 if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
212 kcov->area == NULL)
213 return -EINVAL;
214 if (kcov->t != NULL)
215 return -EBUSY;
216 t = current;
217 /* Cache in task struct for performance. */
218 t->kcov_size = kcov->size;
219 t->kcov_area = kcov->area;
220 /* See comment in __sanitizer_cov_trace_pc(). */
221 barrier();
222 WRITE_ONCE(t->kcov_mode, kcov->mode);
223 t->kcov = kcov;
224 kcov->t = t;
225 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
226 kcov_get(kcov);
227 return 0;
228 case KCOV_DISABLE:
229 /* Disable coverage for the current task. */
230 unused = arg;
231 if (unused != 0 || current->kcov != kcov)
232 return -EINVAL;
233 t = current;
234 if (WARN_ON(kcov->t != t))
235 return -EINVAL;
236 kcov_task_init(t);
237 kcov->t = NULL;
238 kcov_put(kcov);
239 return 0;
240 default:
241 return -ENOTTY;
242 }
243}
244
245static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
246{
247 struct kcov *kcov;
248 int res;
249
250 kcov = filep->private_data;
251 spin_lock(&kcov->lock);
252 res = kcov_ioctl_locked(kcov, cmd, arg);
253 spin_unlock(&kcov->lock);
254 return res;
255}
256
257static const struct file_operations kcov_fops = {
258 .open = kcov_open,
259 .unlocked_ioctl = kcov_ioctl,
260 .mmap = kcov_mmap,
261 .release = kcov_close,
262};
263
264static int __init kcov_init(void)
265{
266 if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
267 pr_err("failed to create kcov in debugfs\n");
268 return -ENOMEM;
269 }
270 return 0;
271}
272
273device_initcall(kcov_init);