blob: b0ec31493fdc936e5d6ea85e51080c771bcde11e [file] [log] [blame]
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -07001#define pr_fmt(fmt) "kcov: " fmt
2
Andrey Ryabinin36f05ae2016-04-28 16:18:55 -07003#define DISABLE_BRANCH_PROFILING
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -07004#include <linux/compiler.h>
5#include <linux/types.h>
6#include <linux/file.h>
7#include <linux/fs.h>
8#include <linux/mm.h>
9#include <linux/printk.h>
Kefeng Wang166ad0e2016-12-07 14:44:36 -080010#include <linux/sched.h>
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -070011#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/vmalloc.h>
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/kcov.h>
17
18/*
19 * kcov descriptor (one per opened debugfs file).
20 * State transitions of the descriptor:
21 * - initial state after open()
22 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
23 * - then, mmap() call (several calls are allowed but not useful)
24 * - then, repeated enable/disable for a task (only one task a time allowed)
25 */
26struct kcov {
27 /*
28 * Reference counter. We keep one for:
29 * - opened file descriptor
30 * - task with enabled coverage (we can't unwire it from another task)
31 */
32 atomic_t refcount;
33 /* The lock protects mode, size, area and t. */
34 spinlock_t lock;
35 enum kcov_mode mode;
36 /* Size of arena (in long's for KCOV_MODE_TRACE). */
37 unsigned size;
38 /* Coverage buffer shared with user space. */
39 void *area;
40 /* Task for which we collect coverage, or NULL. */
41 struct task_struct *t;
42};
43
44/*
45 * Entry point from instrumented code.
46 * This is called once per basic-block/edge.
47 */
James Morsebdab42d2016-04-28 16:18:52 -070048void notrace __sanitizer_cov_trace_pc(void)
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -070049{
50 struct task_struct *t;
51 enum kcov_mode mode;
52
53 t = current;
54 /*
55 * We are interested in code coverage as a function of a syscall inputs,
56 * so we ignore code executed in interrupts.
Andrey Konovalovb274c0b2016-10-27 17:46:21 -070057 * The checks for whether we are in an interrupt are open-coded, because
58 * 1. We can't use in_interrupt() here, since it also returns true
59 * when we are inside local_bh_disable() section.
60 * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
61 * since that leads to slower generated code (three separate tests,
62 * one for each of the flags).
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -070063 */
Andrey Konovalovb274c0b2016-10-27 17:46:21 -070064 if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
65 | NMI_MASK)))
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -070066 return;
67 mode = READ_ONCE(t->kcov_mode);
68 if (mode == KCOV_MODE_TRACE) {
69 unsigned long *area;
70 unsigned long pos;
71
72 /*
73 * There is some code that runs in interrupts but for which
74 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
75 * READ_ONCE()/barrier() effectively provides load-acquire wrt
76 * interrupts, there are paired barrier()/WRITE_ONCE() in
77 * kcov_ioctl_locked().
78 */
79 barrier();
80 area = t->kcov_area;
81 /* The first word is number of subsequent PCs. */
82 pos = READ_ONCE(area[0]) + 1;
83 if (likely(pos < t->kcov_size)) {
84 area[pos] = _RET_IP_;
85 WRITE_ONCE(area[0], pos);
86 }
87 }
88}
89EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
90
91static void kcov_get(struct kcov *kcov)
92{
93 atomic_inc(&kcov->refcount);
94}
95
96static void kcov_put(struct kcov *kcov)
97{
98 if (atomic_dec_and_test(&kcov->refcount)) {
99 vfree(kcov->area);
100 kfree(kcov);
101 }
102}
103
104void kcov_task_init(struct task_struct *t)
105{
Mark Rutland31ad1042018-06-14 15:27:34 -0700106 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
107 barrier();
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -0700108 t->kcov_size = 0;
109 t->kcov_area = NULL;
110 t->kcov = NULL;
111}
112
113void kcov_task_exit(struct task_struct *t)
114{
115 struct kcov *kcov;
116
117 kcov = t->kcov;
118 if (kcov == NULL)
119 return;
120 spin_lock(&kcov->lock);
121 if (WARN_ON(kcov->t != t)) {
122 spin_unlock(&kcov->lock);
123 return;
124 }
125 /* Just to not leave dangling references behind. */
126 kcov_task_init(t);
127 kcov->t = NULL;
128 spin_unlock(&kcov->lock);
129 kcov_put(kcov);
130}
131
132static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
133{
134 int res = 0;
135 void *area;
136 struct kcov *kcov = vma->vm_file->private_data;
137 unsigned long size, off;
138 struct page *page;
139
140 area = vmalloc_user(vma->vm_end - vma->vm_start);
141 if (!area)
142 return -ENOMEM;
143
144 spin_lock(&kcov->lock);
145 size = kcov->size * sizeof(unsigned long);
146 if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
147 vma->vm_end - vma->vm_start != size) {
148 res = -EINVAL;
149 goto exit;
150 }
151 if (!kcov->area) {
152 kcov->area = area;
153 vma->vm_flags |= VM_DONTEXPAND;
154 spin_unlock(&kcov->lock);
155 for (off = 0; off < size; off += PAGE_SIZE) {
156 page = vmalloc_to_page(kcov->area + off);
157 if (vm_insert_page(vma, vma->vm_start + off, page))
158 WARN_ONCE(1, "vm_insert_page() failed");
159 }
160 return 0;
161 }
162exit:
163 spin_unlock(&kcov->lock);
164 vfree(area);
165 return res;
166}
167
168static int kcov_open(struct inode *inode, struct file *filep)
169{
170 struct kcov *kcov;
171
172 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
173 if (!kcov)
174 return -ENOMEM;
175 atomic_set(&kcov->refcount, 1);
176 spin_lock_init(&kcov->lock);
177 filep->private_data = kcov;
178 return nonseekable_open(inode, filep);
179}
180
181static int kcov_close(struct inode *inode, struct file *filep)
182{
183 kcov_put(filep->private_data);
184 return 0;
185}
186
187static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
188 unsigned long arg)
189{
190 struct task_struct *t;
191 unsigned long size, unused;
192
193 switch (cmd) {
194 case KCOV_INIT_TRACE:
195 /*
196 * Enable kcov in trace mode and setup buffer size.
197 * Must happen before anything else.
198 */
199 if (kcov->mode != KCOV_MODE_DISABLED)
200 return -EBUSY;
201 /*
202 * Size must be at least 2 to hold current position and one PC.
203 * Later we allocate size * sizeof(unsigned long) memory,
204 * that must not overflow.
205 */
206 size = arg;
207 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
208 return -EINVAL;
209 kcov->size = size;
210 kcov->mode = KCOV_MODE_TRACE;
211 return 0;
212 case KCOV_ENABLE:
213 /*
214 * Enable coverage for the current task.
215 * At this point user must have been enabled trace mode,
216 * and mmapped the file. Coverage collection is disabled only
217 * at task exit or voluntary by KCOV_DISABLE. After that it can
218 * be enabled for another task.
219 */
220 unused = arg;
221 if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
222 kcov->area == NULL)
223 return -EINVAL;
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -0700224 t = current;
Dmitry Vyukovc33f9272018-02-06 15:40:28 -0800225 if (kcov->t != NULL || t->kcov != NULL)
226 return -EBUSY;
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -0700227 /* Cache in task struct for performance. */
228 t->kcov_size = kcov->size;
229 t->kcov_area = kcov->area;
230 /* See comment in __sanitizer_cov_trace_pc(). */
231 barrier();
232 WRITE_ONCE(t->kcov_mode, kcov->mode);
233 t->kcov = kcov;
234 kcov->t = t;
235 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
236 kcov_get(kcov);
237 return 0;
238 case KCOV_DISABLE:
239 /* Disable coverage for the current task. */
240 unused = arg;
241 if (unused != 0 || current->kcov != kcov)
242 return -EINVAL;
243 t = current;
244 if (WARN_ON(kcov->t != t))
245 return -EINVAL;
246 kcov_task_init(t);
247 kcov->t = NULL;
248 kcov_put(kcov);
249 return 0;
250 default:
251 return -ENOTTY;
252 }
253}
254
255static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
256{
257 struct kcov *kcov;
258 int res;
259
260 kcov = filep->private_data;
261 spin_lock(&kcov->lock);
262 res = kcov_ioctl_locked(kcov, cmd, arg);
263 spin_unlock(&kcov->lock);
264 return res;
265}
266
267static const struct file_operations kcov_fops = {
268 .open = kcov_open,
269 .unlocked_ioctl = kcov_ioctl,
270 .mmap = kcov_mmap,
271 .release = kcov_close,
272};
273
274static int __init kcov_init(void)
275{
Nicolai Stangedf4565f2016-05-24 14:05:05 +0200276 /*
277 * The kcov debugfs file won't ever get removed and thus,
278 * there is no need to protect it against removal races. The
279 * use of debugfs_create_file_unsafe() is actually safe here.
280 */
281 if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -0700282 pr_err("failed to create kcov in debugfs\n");
283 return -ENOMEM;
284 }
285 return 0;
286}
287
288device_initcall(kcov_init);