blob: fad51446590f321f0af764263a1fa4363179fa60
1 | #define pr_fmt(fmt) "kcov: " fmt |
2 | |
3 | #define DISABLE_BRANCH_PROFILING |
4 | #include <linux/atomic.h> |
5 | #include <linux/compiler.h> |
6 | #include <linux/errno.h> |
7 | #include <linux/export.h> |
8 | #include <linux/types.h> |
9 | #include <linux/file.h> |
10 | #include <linux/fs.h> |
11 | #include <linux/init.h> |
12 | #include <linux/mm.h> |
13 | #include <linux/preempt.h> |
14 | #include <linux/printk.h> |
15 | #include <linux/sched.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/spinlock.h> |
18 | #include <linux/vmalloc.h> |
19 | #include <linux/debugfs.h> |
20 | #include <linux/uaccess.h> |
21 | #include <linux/kcov.h> |
22 | #include <asm/setup.h> |
23 | |
24 | /* Number of 64-bit words written per one comparison: */ |
25 | #define KCOV_WORDS_PER_CMP 4 |
26 | |
27 | /* |
28 | * kcov descriptor (one per opened debugfs file). |
29 | * State transitions of the descriptor: |
30 | * - initial state after open() |
31 | * - then there must be a single ioctl(KCOV_INIT_TRACE) call |
32 | * - then, mmap() call (several calls are allowed but not useful) |
33 | * - then, ioctl(KCOV_ENABLE, arg), where arg is |
34 | * KCOV_TRACE_PC - to trace only the PCs |
35 | * or |
36 | * KCOV_TRACE_CMP - to trace only the comparison operands |
37 | * - then, ioctl(KCOV_DISABLE) to disable the task. |
38 | * Enabling/disabling ioctls can be repeated (only one task a time allowed). |
39 | */ |
40 | struct kcov { |
41 | /* |
42 | * Reference counter. We keep one for: |
43 | * - opened file descriptor |
44 | * - task with enabled coverage (we can't unwire it from another task) |
45 | */ |
46 | atomic_t refcount; |
47 | /* The lock protects mode, size, area and t. */ |
48 | spinlock_t lock; |
49 | enum kcov_mode mode; |
50 | /* Size of arena (in long's for KCOV_MODE_TRACE). */ |
51 | unsigned size; |
52 | /* Coverage buffer shared with user space. */ |
53 | void *area; |
54 | /* Task for which we collect coverage, or NULL. */ |
55 | struct task_struct *t; |
56 | }; |
57 | |
58 | static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) |
59 | { |
60 | enum kcov_mode mode; |
61 | |
62 | /* |
63 | * We are interested in code coverage as a function of a syscall inputs, |
64 | * so we ignore code executed in interrupts. |
65 | */ |
66 | if (!in_task()) |
67 | return false; |
68 | mode = READ_ONCE(t->kcov_mode); |
69 | /* |
70 | * There is some code that runs in interrupts but for which |
71 | * in_interrupt() returns false (e.g. preempt_schedule_irq()). |
72 | * READ_ONCE()/barrier() effectively provides load-acquire wrt |
73 | * interrupts, there are paired barrier()/WRITE_ONCE() in |
74 | * kcov_ioctl_locked(). |
75 | */ |
76 | barrier(); |
77 | return mode == needed_mode; |
78 | } |
79 | |
80 | static unsigned long canonicalize_ip(unsigned long ip) |
81 | { |
82 | #ifdef CONFIG_RANDOMIZE_BASE |
83 | ip -= kaslr_offset(); |
84 | #endif |
85 | return ip; |
86 | } |
87 | |
88 | /* |
89 | * Entry point from instrumented code. |
90 | * This is called once per basic-block/edge. |
91 | */ |
92 | void notrace __sanitizer_cov_trace_pc(void) |
93 | { |
94 | struct task_struct *t; |
95 | unsigned long *area; |
96 | unsigned long ip = canonicalize_ip(_RET_IP_); |
97 | unsigned long pos; |
98 | |
99 | t = current; |
100 | if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t)) |
101 | return; |
102 | |
103 | area = t->kcov_area; |
104 | /* The first 64-bit word is the number of subsequent PCs. */ |
105 | pos = READ_ONCE(area[0]) + 1; |
106 | if (likely(pos < t->kcov_size)) { |
107 | area[pos] = ip; |
108 | WRITE_ONCE(area[0], pos); |
109 | } |
110 | } |
111 | EXPORT_SYMBOL(__sanitizer_cov_trace_pc); |
112 | |
113 | #ifdef CONFIG_KCOV_ENABLE_COMPARISONS |
114 | static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) |
115 | { |
116 | struct task_struct *t; |
117 | u64 *area; |
118 | u64 count, start_index, end_pos, max_pos; |
119 | |
120 | t = current; |
121 | if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t)) |
122 | return; |
123 | |
124 | ip = canonicalize_ip(ip); |
125 | |
126 | /* |
127 | * We write all comparison arguments and types as u64. |
128 | * The buffer was allocated for t->kcov_size unsigned longs. |
129 | */ |
130 | area = (u64 *)t->kcov_area; |
131 | max_pos = t->kcov_size * sizeof(unsigned long); |
132 | |
133 | count = READ_ONCE(area[0]); |
134 | |
135 | /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */ |
136 | start_index = 1 + count * KCOV_WORDS_PER_CMP; |
137 | end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64); |
138 | if (likely(end_pos <= max_pos)) { |
139 | area[start_index] = type; |
140 | area[start_index + 1] = arg1; |
141 | area[start_index + 2] = arg2; |
142 | area[start_index + 3] = ip; |
143 | WRITE_ONCE(area[0], count + 1); |
144 | } |
145 | } |
146 | |
147 | void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2) |
148 | { |
149 | write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_); |
150 | } |
151 | EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1); |
152 | |
153 | void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2) |
154 | { |
155 | write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_); |
156 | } |
157 | EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2); |
158 | |
159 | void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2) |
160 | { |
161 | write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); |
162 | } |
163 | EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4); |
164 | |
165 | void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2) |
166 | { |
167 | write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_); |
168 | } |
169 | EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8); |
170 | |
171 | void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2) |
172 | { |
173 | write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2, |
174 | _RET_IP_); |
175 | } |
176 | EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1); |
177 | |
178 | void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2) |
179 | { |
180 | write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2, |
181 | _RET_IP_); |
182 | } |
183 | EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2); |
184 | |
185 | void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2) |
186 | { |
187 | write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, |
188 | _RET_IP_); |
189 | } |
190 | EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4); |
191 | |
192 | void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2) |
193 | { |
194 | write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2, |
195 | _RET_IP_); |
196 | } |
197 | EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8); |
198 | |
199 | void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases) |
200 | { |
201 | u64 i; |
202 | u64 count = cases[0]; |
203 | u64 size = cases[1]; |
204 | u64 type = KCOV_CMP_CONST; |
205 | |
206 | switch (size) { |
207 | case 8: |
208 | type |= KCOV_CMP_SIZE(0); |
209 | break; |
210 | case 16: |
211 | type |= KCOV_CMP_SIZE(1); |
212 | break; |
213 | case 32: |
214 | type |= KCOV_CMP_SIZE(2); |
215 | break; |
216 | case 64: |
217 | type |= KCOV_CMP_SIZE(3); |
218 | break; |
219 | default: |
220 | return; |
221 | } |
222 | for (i = 0; i < count; i++) |
223 | write_comp_data(type, cases[i + 2], val, _RET_IP_); |
224 | } |
225 | EXPORT_SYMBOL(__sanitizer_cov_trace_switch); |
226 | #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */ |
227 | |
228 | static void kcov_get(struct kcov *kcov) |
229 | { |
230 | atomic_inc(&kcov->refcount); |
231 | } |
232 | |
233 | static void kcov_put(struct kcov *kcov) |
234 | { |
235 | if (atomic_dec_and_test(&kcov->refcount)) { |
236 | vfree(kcov->area); |
237 | kfree(kcov); |
238 | } |
239 | } |
240 | |
241 | void kcov_task_init(struct task_struct *t) |
242 | { |
243 | WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); |
244 | barrier(); |
245 | t->kcov_size = 0; |
246 | t->kcov_area = NULL; |
247 | t->kcov = NULL; |
248 | } |
249 | |
250 | void kcov_task_exit(struct task_struct *t) |
251 | { |
252 | struct kcov *kcov; |
253 | |
254 | kcov = t->kcov; |
255 | if (kcov == NULL) |
256 | return; |
257 | spin_lock(&kcov->lock); |
258 | if (WARN_ON(kcov->t != t)) { |
259 | spin_unlock(&kcov->lock); |
260 | return; |
261 | } |
262 | /* Just to not leave dangling references behind. */ |
263 | kcov_task_init(t); |
264 | kcov->t = NULL; |
265 | kcov->mode = KCOV_MODE_INIT; |
266 | spin_unlock(&kcov->lock); |
267 | kcov_put(kcov); |
268 | } |
269 | |
270 | static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) |
271 | { |
272 | int res = 0; |
273 | void *area; |
274 | struct kcov *kcov = vma->vm_file->private_data; |
275 | unsigned long size, off; |
276 | struct page *page; |
277 | |
278 | area = vmalloc_user(vma->vm_end - vma->vm_start); |
279 | if (!area) |
280 | return -ENOMEM; |
281 | |
282 | spin_lock(&kcov->lock); |
283 | size = kcov->size * sizeof(unsigned long); |
284 | if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 || |
285 | vma->vm_end - vma->vm_start != size) { |
286 | res = -EINVAL; |
287 | goto exit; |
288 | } |
289 | if (!kcov->area) { |
290 | kcov->area = area; |
291 | vma->vm_flags |= VM_DONTEXPAND; |
292 | spin_unlock(&kcov->lock); |
293 | for (off = 0; off < size; off += PAGE_SIZE) { |
294 | page = vmalloc_to_page(kcov->area + off); |
295 | if (vm_insert_page(vma, vma->vm_start + off, page)) |
296 | WARN_ONCE(1, "vm_insert_page() failed"); |
297 | } |
298 | return 0; |
299 | } |
300 | exit: |
301 | spin_unlock(&kcov->lock); |
302 | vfree(area); |
303 | return res; |
304 | } |
305 | |
306 | static int kcov_open(struct inode *inode, struct file *filep) |
307 | { |
308 | struct kcov *kcov; |
309 | |
310 | kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); |
311 | if (!kcov) |
312 | return -ENOMEM; |
313 | kcov->mode = KCOV_MODE_DISABLED; |
314 | atomic_set(&kcov->refcount, 1); |
315 | spin_lock_init(&kcov->lock); |
316 | filep->private_data = kcov; |
317 | return nonseekable_open(inode, filep); |
318 | } |
319 | |
320 | static int kcov_close(struct inode *inode, struct file *filep) |
321 | { |
322 | kcov_put(filep->private_data); |
323 | return 0; |
324 | } |
325 | |
326 | static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, |
327 | unsigned long arg) |
328 | { |
329 | struct task_struct *t; |
330 | unsigned long size, unused; |
331 | |
332 | switch (cmd) { |
333 | case KCOV_INIT_TRACE: |
334 | /* |
335 | * Enable kcov in trace mode and setup buffer size. |
336 | * Must happen before anything else. |
337 | */ |
338 | if (kcov->mode != KCOV_MODE_DISABLED) |
339 | return -EBUSY; |
340 | /* |
341 | * Size must be at least 2 to hold current position and one PC. |
342 | * Later we allocate size * sizeof(unsigned long) memory, |
343 | * that must not overflow. |
344 | */ |
345 | size = arg; |
346 | if (size < 2 || size > INT_MAX / sizeof(unsigned long)) |
347 | return -EINVAL; |
348 | kcov->size = size; |
349 | kcov->mode = KCOV_MODE_INIT; |
350 | return 0; |
351 | case KCOV_ENABLE: |
352 | /* |
353 | * Enable coverage for the current task. |
354 | * At this point user must have been enabled trace mode, |
355 | * and mmapped the file. Coverage collection is disabled only |
356 | * at task exit or voluntary by KCOV_DISABLE. After that it can |
357 | * be enabled for another task. |
358 | */ |
359 | if (kcov->mode != KCOV_MODE_INIT || !kcov->area) |
360 | return -EINVAL; |
361 | if (kcov->t != NULL) |
362 | return -EBUSY; |
363 | if (arg == KCOV_TRACE_PC) |
364 | kcov->mode = KCOV_MODE_TRACE_PC; |
365 | else if (arg == KCOV_TRACE_CMP) |
366 | #ifdef CONFIG_KCOV_ENABLE_COMPARISONS |
367 | kcov->mode = KCOV_MODE_TRACE_CMP; |
368 | #else |
369 | return -ENOTSUPP; |
370 | #endif |
371 | else |
372 | return -EINVAL; |
373 | t = current; |
374 | if (kcov->t != NULL || t->kcov != NULL) |
375 | return -EBUSY; |
376 | /* Cache in task struct for performance. */ |
377 | t->kcov_size = kcov->size; |
378 | t->kcov_area = kcov->area; |
379 | /* See comment in check_kcov_mode(). */ |
380 | barrier(); |
381 | WRITE_ONCE(t->kcov_mode, kcov->mode); |
382 | t->kcov = kcov; |
383 | kcov->t = t; |
384 | /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */ |
385 | kcov_get(kcov); |
386 | return 0; |
387 | case KCOV_DISABLE: |
388 | /* Disable coverage for the current task. */ |
389 | unused = arg; |
390 | if (unused != 0 || current->kcov != kcov) |
391 | return -EINVAL; |
392 | t = current; |
393 | if (WARN_ON(kcov->t != t)) |
394 | return -EINVAL; |
395 | kcov_task_init(t); |
396 | kcov->t = NULL; |
397 | kcov->mode = KCOV_MODE_INIT; |
398 | kcov_put(kcov); |
399 | return 0; |
400 | default: |
401 | return -ENOTTY; |
402 | } |
403 | } |
404 | |
405 | static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) |
406 | { |
407 | struct kcov *kcov; |
408 | int res; |
409 | |
410 | kcov = filep->private_data; |
411 | spin_lock(&kcov->lock); |
412 | res = kcov_ioctl_locked(kcov, cmd, arg); |
413 | spin_unlock(&kcov->lock); |
414 | return res; |
415 | } |
416 | |
417 | static const struct file_operations kcov_fops = { |
418 | .open = kcov_open, |
419 | .unlocked_ioctl = kcov_ioctl, |
420 | .compat_ioctl = kcov_ioctl, |
421 | .mmap = kcov_mmap, |
422 | .release = kcov_close, |
423 | }; |
424 | |
425 | static int __init kcov_init(void) |
426 | { |
427 | /* |
428 | * The kcov debugfs file won't ever get removed and thus, |
429 | * there is no need to protect it against removal races. The |
430 | * use of debugfs_create_file_unsafe() is actually safe here. |
431 | */ |
432 | if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) { |
433 | pr_err("failed to create kcov in debugfs\n"); |
434 | return -ENOMEM; |
435 | } |
436 | return 0; |
437 | } |
438 | |
439 | device_initcall(kcov_init); |
440 |