kcov: move t->kcov assignments into kcov_start/stop
[linux-block.git] / kernel / kcov.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
5c9a8750
DV
2#define pr_fmt(fmt) "kcov: " fmt
3
36f05ae8 4#define DISABLE_BRANCH_PROFILING
db862358 5#include <linux/atomic.h>
5c9a8750 6#include <linux/compiler.h>
db862358
KW
7#include <linux/errno.h>
8#include <linux/export.h>
5c9a8750
DV
9#include <linux/types.h>
10#include <linux/file.h>
11#include <linux/fs.h>
eec028c9 12#include <linux/hashtable.h>
db862358 13#include <linux/init.h>
5c9a8750 14#include <linux/mm.h>
db862358 15#include <linux/preempt.h>
5c9a8750 16#include <linux/printk.h>
166ad0e1 17#include <linux/sched.h>
5c9a8750
DV
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/vmalloc.h>
21#include <linux/debugfs.h>
22#include <linux/uaccess.h>
23#include <linux/kcov.h>
39e07cb6 24#include <linux/refcount.h>
eec028c9 25#include <linux/log2.h>
4983f0ab 26#include <asm/setup.h>
5c9a8750 27
eec028c9
AK
28#define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
29
ded97d2c
VC
30/* Number of 64-bit words written per one comparison: */
31#define KCOV_WORDS_PER_CMP 4
32
5c9a8750
DV
33/*
34 * kcov descriptor (one per opened debugfs file).
35 * State transitions of the descriptor:
36 * - initial state after open()
37 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
38 * - then, mmap() call (several calls are allowed but not useful)
ded97d2c
VC
39 * - then, ioctl(KCOV_ENABLE, arg), where arg is
40 * KCOV_TRACE_PC - to trace only the PCs
41 * or
42 * KCOV_TRACE_CMP - to trace only the comparison operands
43 * - then, ioctl(KCOV_DISABLE) to disable the task.
44 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
5c9a8750
DV
45 */
46struct kcov {
47 /*
48 * Reference counter. We keep one for:
49 * - opened file descriptor
50 * - task with enabled coverage (we can't unwire it from another task)
eec028c9 51 * - each code section for remote coverage collection
5c9a8750 52 */
39e07cb6 53 refcount_t refcount;
5c9a8750
DV
54 /* The lock protects mode, size, area and t. */
55 spinlock_t lock;
56 enum kcov_mode mode;
eec028c9
AK
57 /* Size of arena (in long's). */
58 unsigned int size;
5c9a8750
DV
59 /* Coverage buffer shared with user space. */
60 void *area;
61 /* Task for which we collect coverage, or NULL. */
62 struct task_struct *t;
eec028c9
AK
63 /* Collecting coverage from remote (background) threads. */
64 bool remote;
65 /* Size of remote area (in long's). */
66 unsigned int remote_size;
67 /*
68 * Sequence is incremented each time kcov is reenabled, used by
69 * kcov_remote_stop(), see the comment there.
70 */
71 int sequence;
5c9a8750
DV
72};
73
eec028c9
AK
74struct kcov_remote_area {
75 struct list_head list;
76 unsigned int size;
77};
78
79struct kcov_remote {
80 u64 handle;
81 struct kcov *kcov;
82 struct hlist_node hnode;
83};
84
85static DEFINE_SPINLOCK(kcov_remote_lock);
86static DEFINE_HASHTABLE(kcov_remote_map, 4);
87static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
88
89/* Must be called with kcov_remote_lock locked. */
90static struct kcov_remote *kcov_remote_find(u64 handle)
91{
92 struct kcov_remote *remote;
93
94 hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
95 if (remote->handle == handle)
96 return remote;
97 }
98 return NULL;
99}
100
3c61df38 101/* Must be called with kcov_remote_lock locked. */
eec028c9
AK
102static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
103{
104 struct kcov_remote *remote;
105
106 if (kcov_remote_find(handle))
107 return ERR_PTR(-EEXIST);
108 remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
109 if (!remote)
110 return ERR_PTR(-ENOMEM);
111 remote->handle = handle;
112 remote->kcov = kcov;
113 hash_add(kcov_remote_map, &remote->hnode, handle);
114 return remote;
115}
116
117/* Must be called with kcov_remote_lock locked. */
118static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
119{
120 struct kcov_remote_area *area;
121 struct list_head *pos;
122
eec028c9
AK
123 list_for_each(pos, &kcov_remote_areas) {
124 area = list_entry(pos, struct kcov_remote_area, list);
125 if (area->size == size) {
126 list_del(&area->list);
eec028c9
AK
127 return area;
128 }
129 }
eec028c9
AK
130 return NULL;
131}
132
133/* Must be called with kcov_remote_lock locked. */
134static void kcov_remote_area_put(struct kcov_remote_area *area,
135 unsigned int size)
136{
eec028c9
AK
137 INIT_LIST_HEAD(&area->list);
138 area->size = size;
139 list_add(&area->list, &kcov_remote_areas);
140}
141
903e8ff8 142static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
5c9a8750 143{
0ed557aa 144 unsigned int mode;
5c9a8750 145
5c9a8750
DV
146 /*
147 * We are interested in code coverage as a function of a syscall inputs,
148 * so we ignore code executed in interrupts.
149 */
fcf4edac 150 if (!in_task())
ded97d2c 151 return false;
5c9a8750 152 mode = READ_ONCE(t->kcov_mode);
ded97d2c
VC
153 /*
154 * There is some code that runs in interrupts but for which
155 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
156 * READ_ONCE()/barrier() effectively provides load-acquire wrt
157 * interrupts, there are paired barrier()/WRITE_ONCE() in
eec028c9 158 * kcov_start().
ded97d2c
VC
159 */
160 barrier();
161 return mode == needed_mode;
162}
4983f0ab 163
903e8ff8 164static notrace unsigned long canonicalize_ip(unsigned long ip)
ded97d2c 165{
4983f0ab 166#ifdef CONFIG_RANDOMIZE_BASE
ded97d2c 167 ip -= kaslr_offset();
4983f0ab 168#endif
ded97d2c
VC
169 return ip;
170}
5c9a8750 171
ded97d2c
VC
172/*
173 * Entry point from instrumented code.
174 * This is called once per basic-block/edge.
175 */
176void notrace __sanitizer_cov_trace_pc(void)
177{
178 struct task_struct *t;
179 unsigned long *area;
180 unsigned long ip = canonicalize_ip(_RET_IP_);
181 unsigned long pos;
182
183 t = current;
184 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
185 return;
186
187 area = t->kcov_area;
188 /* The first 64-bit word is the number of subsequent PCs. */
189 pos = READ_ONCE(area[0]) + 1;
190 if (likely(pos < t->kcov_size)) {
191 area[pos] = ip;
192 WRITE_ONCE(area[0], pos);
5c9a8750
DV
193 }
194}
195EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
196
ded97d2c 197#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
63472443 198static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
ded97d2c
VC
199{
200 struct task_struct *t;
201 u64 *area;
202 u64 count, start_index, end_pos, max_pos;
203
204 t = current;
205 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
206 return;
207
208 ip = canonicalize_ip(ip);
209
210 /*
211 * We write all comparison arguments and types as u64.
212 * The buffer was allocated for t->kcov_size unsigned longs.
213 */
214 area = (u64 *)t->kcov_area;
215 max_pos = t->kcov_size * sizeof(unsigned long);
216
217 count = READ_ONCE(area[0]);
218
219 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
220 start_index = 1 + count * KCOV_WORDS_PER_CMP;
221 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
222 if (likely(end_pos <= max_pos)) {
223 area[start_index] = type;
224 area[start_index + 1] = arg1;
225 area[start_index + 2] = arg2;
226 area[start_index + 3] = ip;
227 WRITE_ONCE(area[0], count + 1);
228 }
229}
230
231void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
232{
233 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
234}
235EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
236
237void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
238{
239 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
240}
241EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
242
689d77f0 243void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
ded97d2c
VC
244{
245 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
246}
247EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
248
249void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
250{
251 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
252}
253EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
254
255void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
256{
257 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
258 _RET_IP_);
259}
260EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
261
262void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
263{
264 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
265 _RET_IP_);
266}
267EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
268
689d77f0 269void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
ded97d2c
VC
270{
271 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
272 _RET_IP_);
273}
274EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
275
276void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
277{
278 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
279 _RET_IP_);
280}
281EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
282
283void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
284{
285 u64 i;
286 u64 count = cases[0];
287 u64 size = cases[1];
288 u64 type = KCOV_CMP_CONST;
289
290 switch (size) {
291 case 8:
292 type |= KCOV_CMP_SIZE(0);
293 break;
294 case 16:
295 type |= KCOV_CMP_SIZE(1);
296 break;
297 case 32:
298 type |= KCOV_CMP_SIZE(2);
299 break;
300 case 64:
301 type |= KCOV_CMP_SIZE(3);
302 break;
303 default:
304 return;
305 }
306 for (i = 0; i < count; i++)
307 write_comp_data(type, cases[i + 2], val, _RET_IP_);
308}
309EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
310#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
311
76484b1c
AK
312static void kcov_start(struct task_struct *t, struct kcov *kcov,
313 unsigned int size, void *area, enum kcov_mode mode,
314 int sequence)
eec028c9
AK
315{
316 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
76484b1c 317 t->kcov = kcov;
eec028c9
AK
318 /* Cache in task struct for performance. */
319 t->kcov_size = size;
320 t->kcov_area = area;
321 /* See comment in check_kcov_mode(). */
322 barrier();
323 WRITE_ONCE(t->kcov_mode, mode);
324 t->kcov_sequence = sequence;
325}
326
327static void kcov_stop(struct task_struct *t)
328{
329 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
330 barrier();
76484b1c 331 t->kcov = NULL;
eec028c9
AK
332 t->kcov_size = 0;
333 t->kcov_area = NULL;
334}
335
336static void kcov_task_reset(struct task_struct *t)
337{
338 kcov_stop(t);
eec028c9
AK
339 t->kcov_sequence = 0;
340 t->kcov_handle = 0;
341}
342
343void kcov_task_init(struct task_struct *t)
344{
345 kcov_task_reset(t);
346 t->kcov_handle = current->kcov_handle;
347}
348
349static void kcov_reset(struct kcov *kcov)
350{
351 kcov->t = NULL;
352 kcov->mode = KCOV_MODE_INIT;
353 kcov->remote = false;
354 kcov->remote_size = 0;
355 kcov->sequence++;
356}
357
358static void kcov_remote_reset(struct kcov *kcov)
359{
360 int bkt;
361 struct kcov_remote *remote;
362 struct hlist_node *tmp;
363
364 spin_lock(&kcov_remote_lock);
365 hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
366 if (remote->kcov != kcov)
367 continue;
eec028c9
AK
368 hash_del(&remote->hnode);
369 kfree(remote);
370 }
371 /* Do reset before unlock to prevent races with kcov_remote_start(). */
372 kcov_reset(kcov);
373 spin_unlock(&kcov_remote_lock);
374}
375
376static void kcov_disable(struct task_struct *t, struct kcov *kcov)
377{
378 kcov_task_reset(t);
379 if (kcov->remote)
380 kcov_remote_reset(kcov);
381 else
382 kcov_reset(kcov);
383}
384
5c9a8750
DV
385static void kcov_get(struct kcov *kcov)
386{
39e07cb6 387 refcount_inc(&kcov->refcount);
5c9a8750
DV
388}
389
390static void kcov_put(struct kcov *kcov)
391{
39e07cb6 392 if (refcount_dec_and_test(&kcov->refcount)) {
eec028c9 393 kcov_remote_reset(kcov);
5c9a8750
DV
394 vfree(kcov->area);
395 kfree(kcov);
396 }
397}
398
5c9a8750
DV
399void kcov_task_exit(struct task_struct *t)
400{
401 struct kcov *kcov;
402
403 kcov = t->kcov;
404 if (kcov == NULL)
405 return;
eec028c9 406
5c9a8750 407 spin_lock(&kcov->lock);
eec028c9
AK
408 kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
409 /*
410 * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
411 * which comes down to:
412 * WARN_ON(!kcov->remote && kcov->t != t);
413 *
414 * For KCOV_REMOTE_ENABLE devices, the exiting task is either:
415 * 2. A remote task between kcov_remote_start() and kcov_remote_stop().
416 * In this case we should print a warning right away, since a task
417 * shouldn't be exiting when it's in a kcov coverage collection
418 * section. Here t points to the task that is collecting remote
419 * coverage, and t->kcov->t points to the thread that created the
420 * kcov device. Which means that to detect this case we need to
421 * check that t != t->kcov->t, and this gives us the following:
422 * WARN_ON(kcov->remote && kcov->t != t);
423 *
424 * 2. The task that created kcov exiting without calling KCOV_DISABLE,
425 * and then again we can make sure that t->kcov->t == t:
426 * WARN_ON(kcov->remote && kcov->t != t);
427 *
428 * By combining all three checks into one we get:
429 */
5c9a8750
DV
430 if (WARN_ON(kcov->t != t)) {
431 spin_unlock(&kcov->lock);
432 return;
433 }
434 /* Just to not leave dangling references behind. */
eec028c9 435 kcov_disable(t, kcov);
5c9a8750
DV
436 spin_unlock(&kcov->lock);
437 kcov_put(kcov);
438}
439
440static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
441{
442 int res = 0;
443 void *area;
444 struct kcov *kcov = vma->vm_file->private_data;
445 unsigned long size, off;
446 struct page *page;
447
448 area = vmalloc_user(vma->vm_end - vma->vm_start);
449 if (!area)
450 return -ENOMEM;
451
452 spin_lock(&kcov->lock);
453 size = kcov->size * sizeof(unsigned long);
ded97d2c 454 if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
5c9a8750
DV
455 vma->vm_end - vma->vm_start != size) {
456 res = -EINVAL;
457 goto exit;
458 }
459 if (!kcov->area) {
460 kcov->area = area;
461 vma->vm_flags |= VM_DONTEXPAND;
462 spin_unlock(&kcov->lock);
463 for (off = 0; off < size; off += PAGE_SIZE) {
464 page = vmalloc_to_page(kcov->area + off);
465 if (vm_insert_page(vma, vma->vm_start + off, page))
466 WARN_ONCE(1, "vm_insert_page() failed");
467 }
468 return 0;
469 }
470exit:
471 spin_unlock(&kcov->lock);
472 vfree(area);
473 return res;
474}
475
476static int kcov_open(struct inode *inode, struct file *filep)
477{
478 struct kcov *kcov;
479
480 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
481 if (!kcov)
482 return -ENOMEM;
ded97d2c 483 kcov->mode = KCOV_MODE_DISABLED;
eec028c9 484 kcov->sequence = 1;
39e07cb6 485 refcount_set(&kcov->refcount, 1);
5c9a8750
DV
486 spin_lock_init(&kcov->lock);
487 filep->private_data = kcov;
488 return nonseekable_open(inode, filep);
489}
490
491static int kcov_close(struct inode *inode, struct file *filep)
492{
493 kcov_put(filep->private_data);
494 return 0;
495}
496
eec028c9
AK
497static int kcov_get_mode(unsigned long arg)
498{
499 if (arg == KCOV_TRACE_PC)
500 return KCOV_MODE_TRACE_PC;
501 else if (arg == KCOV_TRACE_CMP)
502#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
503 return KCOV_MODE_TRACE_CMP;
504#else
505 return -ENOTSUPP;
506#endif
507 else
508 return -EINVAL;
509}
510
dc55daff
MR
511/*
512 * Fault in a lazily-faulted vmalloc area before it can be used by
513 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
514 * vmalloc fault handling path is instrumented.
515 */
516static void kcov_fault_in_area(struct kcov *kcov)
517{
518 unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
519 unsigned long *area = kcov->area;
520 unsigned long offset;
521
522 for (offset = 0; offset < kcov->size; offset += stride)
523 READ_ONCE(area[offset]);
524}
525
eec028c9
AK
526static inline bool kcov_check_handle(u64 handle, bool common_valid,
527 bool uncommon_valid, bool zero_valid)
528{
529 if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
530 return false;
531 switch (handle & KCOV_SUBSYSTEM_MASK) {
532 case KCOV_SUBSYSTEM_COMMON:
533 return (handle & KCOV_INSTANCE_MASK) ?
534 common_valid : zero_valid;
535 case KCOV_SUBSYSTEM_USB:
536 return uncommon_valid;
537 default:
538 return false;
539 }
540 return false;
541}
542
5c9a8750
DV
543static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
544 unsigned long arg)
545{
546 struct task_struct *t;
547 unsigned long size, unused;
eec028c9
AK
548 int mode, i;
549 struct kcov_remote_arg *remote_arg;
550 struct kcov_remote *remote;
5c9a8750
DV
551
552 switch (cmd) {
553 case KCOV_INIT_TRACE:
554 /*
555 * Enable kcov in trace mode and setup buffer size.
556 * Must happen before anything else.
557 */
558 if (kcov->mode != KCOV_MODE_DISABLED)
559 return -EBUSY;
560 /*
561 * Size must be at least 2 to hold current position and one PC.
562 * Later we allocate size * sizeof(unsigned long) memory,
563 * that must not overflow.
564 */
565 size = arg;
566 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
567 return -EINVAL;
568 kcov->size = size;
ded97d2c 569 kcov->mode = KCOV_MODE_INIT;
5c9a8750
DV
570 return 0;
571 case KCOV_ENABLE:
572 /*
573 * Enable coverage for the current task.
574 * At this point user must have been enabled trace mode,
575 * and mmapped the file. Coverage collection is disabled only
576 * at task exit or voluntary by KCOV_DISABLE. After that it can
577 * be enabled for another task.
578 */
ded97d2c 579 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
5c9a8750 580 return -EINVAL;
a77660d2
DV
581 t = current;
582 if (kcov->t != NULL || t->kcov != NULL)
5c9a8750 583 return -EBUSY;
eec028c9
AK
584 mode = kcov_get_mode(arg);
585 if (mode < 0)
586 return mode;
dc55daff 587 kcov_fault_in_area(kcov);
eec028c9 588 kcov->mode = mode;
76484b1c 589 kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
eec028c9 590 kcov->sequence);
5c9a8750 591 kcov->t = t;
eec028c9 592 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
5c9a8750
DV
593 kcov_get(kcov);
594 return 0;
595 case KCOV_DISABLE:
596 /* Disable coverage for the current task. */
597 unused = arg;
598 if (unused != 0 || current->kcov != kcov)
599 return -EINVAL;
600 t = current;
601 if (WARN_ON(kcov->t != t))
602 return -EINVAL;
eec028c9 603 kcov_disable(t, kcov);
5c9a8750
DV
604 kcov_put(kcov);
605 return 0;
eec028c9 606 case KCOV_REMOTE_ENABLE:
eec028c9
AK
607 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
608 return -EINVAL;
609 t = current;
610 if (kcov->t != NULL || t->kcov != NULL)
611 return -EBUSY;
612 remote_arg = (struct kcov_remote_arg *)arg;
613 mode = kcov_get_mode(remote_arg->trace_mode);
614 if (mode < 0)
615 return mode;
616 if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long))
617 return -EINVAL;
618 kcov->mode = mode;
619 t->kcov = kcov;
620 kcov->t = t;
621 kcov->remote = true;
622 kcov->remote_size = remote_arg->area_size;
623 spin_lock(&kcov_remote_lock);
624 for (i = 0; i < remote_arg->num_handles; i++) {
eec028c9
AK
625 if (!kcov_check_handle(remote_arg->handles[i],
626 false, true, false)) {
627 spin_unlock(&kcov_remote_lock);
628 kcov_disable(t, kcov);
629 return -EINVAL;
630 }
631 remote = kcov_remote_add(kcov, remote_arg->handles[i]);
632 if (IS_ERR(remote)) {
633 spin_unlock(&kcov_remote_lock);
634 kcov_disable(t, kcov);
635 return PTR_ERR(remote);
636 }
637 }
638 if (remote_arg->common_handle) {
eec028c9
AK
639 if (!kcov_check_handle(remote_arg->common_handle,
640 true, false, false)) {
641 spin_unlock(&kcov_remote_lock);
642 kcov_disable(t, kcov);
643 return -EINVAL;
644 }
645 remote = kcov_remote_add(kcov,
646 remote_arg->common_handle);
647 if (IS_ERR(remote)) {
648 spin_unlock(&kcov_remote_lock);
649 kcov_disable(t, kcov);
650 return PTR_ERR(remote);
651 }
652 t->kcov_handle = remote_arg->common_handle;
653 }
654 spin_unlock(&kcov_remote_lock);
655 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
656 kcov_get(kcov);
657 return 0;
5c9a8750
DV
658 default:
659 return -ENOTTY;
660 }
661}
662
663static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
664{
665 struct kcov *kcov;
666 int res;
eec028c9
AK
667 struct kcov_remote_arg *remote_arg = NULL;
668 unsigned int remote_num_handles;
669 unsigned long remote_arg_size;
670
671 if (cmd == KCOV_REMOTE_ENABLE) {
672 if (get_user(remote_num_handles, (unsigned __user *)(arg +
673 offsetof(struct kcov_remote_arg, num_handles))))
674 return -EFAULT;
675 if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
676 return -EINVAL;
677 remote_arg_size = struct_size(remote_arg, handles,
678 remote_num_handles);
679 remote_arg = memdup_user((void __user *)arg, remote_arg_size);
680 if (IS_ERR(remote_arg))
681 return PTR_ERR(remote_arg);
682 if (remote_arg->num_handles != remote_num_handles) {
683 kfree(remote_arg);
684 return -EINVAL;
685 }
686 arg = (unsigned long)remote_arg;
687 }
5c9a8750
DV
688
689 kcov = filep->private_data;
690 spin_lock(&kcov->lock);
691 res = kcov_ioctl_locked(kcov, cmd, arg);
692 spin_unlock(&kcov->lock);
eec028c9
AK
693
694 kfree(remote_arg);
695
5c9a8750
DV
696 return res;
697}
698
699static const struct file_operations kcov_fops = {
700 .open = kcov_open,
701 .unlocked_ioctl = kcov_ioctl,
7483e5d4 702 .compat_ioctl = kcov_ioctl,
5c9a8750
DV
703 .mmap = kcov_mmap,
704 .release = kcov_close,
705};
706
eec028c9
AK
707/*
708 * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
709 * of code in a kernel background thread to allow kcov to be used to collect
710 * coverage from that part of code.
711 *
712 * The handle argument of kcov_remote_start() identifies a code section that is
713 * used for coverage collection. A userspace process passes this handle to
714 * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting
715 * coverage for the code section identified by this handle.
716 *
717 * The usage of these annotations in the kernel code is different depending on
718 * the type of the kernel thread whose code is being annotated.
719 *
720 * For global kernel threads that are spawned in a limited number of instances
721 * (e.g. one USB hub_event() worker thread is spawned per USB HCD), each
722 * instance must be assigned a unique 4-byte instance id. The instance id is
723 * then combined with a 1-byte subsystem id to get a handle via
724 * kcov_remote_handle(subsystem_id, instance_id).
725 *
726 * For local kernel threads that are spawned from system calls handler when a
727 * user interacts with some kernel interface (e.g. vhost workers), a handle is
728 * passed from a userspace process as the common_handle field of the
729 * kcov_remote_arg struct (note, that the user must generate a handle by using
730 * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
731 * arbitrary 4-byte non-zero number as the instance id). This common handle
732 * then gets saved into the task_struct of the process that issued the
324cfb19
MG
733 * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn
734 * kernel threads, the common handle must be retrieved via kcov_common_handle()
eec028c9
AK
735 * and passed to the spawned threads via custom annotations. Those kernel
736 * threads must in turn be annotated with kcov_remote_start(common_handle) and
737 * kcov_remote_stop(). All of the threads that are spawned by the same process
738 * obtain the same handle, hence the name "common".
739 *
740 * See Documentation/dev-tools/kcov.rst for more details.
741 *
742 * Internally, this function looks up the kcov device associated with the
743 * provided handle, allocates an area for coverage collection, and saves the
744 * pointers to kcov and area into the current task_struct to allow coverage to
745 * be collected via __sanitizer_cov_trace_pc()
746 * In turns kcov_remote_stop() clears those pointers from task_struct to stop
747 * collecting coverage and copies all collected coverage into the kcov area.
748 */
749void kcov_remote_start(u64 handle)
750{
751 struct kcov_remote *remote;
67b3d3cc 752 struct kcov *kcov;
eec028c9
AK
753 void *area;
754 struct task_struct *t;
755 unsigned int size;
756 enum kcov_mode mode;
757 int sequence;
758
759 if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
760 return;
761 if (WARN_ON(!in_task()))
762 return;
763 t = current;
764 /*
765 * Check that kcov_remote_start is not called twice
766 * nor called by user tasks (with enabled kcov).
767 */
768 if (WARN_ON(t->kcov))
769 return;
770
771 kcov_debug("handle = %llx\n", handle);
772
773 spin_lock(&kcov_remote_lock);
774 remote = kcov_remote_find(handle);
775 if (!remote) {
eec028c9
AK
776 spin_unlock(&kcov_remote_lock);
777 return;
778 }
67b3d3cc 779 kcov = remote->kcov;
eec028c9 780 /* Put in kcov_remote_stop(). */
67b3d3cc 781 kcov_get(kcov);
eec028c9
AK
782 /*
783 * Read kcov fields before unlock to prevent races with
784 * KCOV_DISABLE / kcov_remote_reset().
785 */
67b3d3cc
AK
786 size = kcov->remote_size;
787 mode = kcov->mode;
788 sequence = kcov->sequence;
eec028c9
AK
789 area = kcov_remote_area_get(size);
790 spin_unlock(&kcov_remote_lock);
791
792 if (!area) {
793 area = vmalloc(size * sizeof(unsigned long));
794 if (!area) {
67b3d3cc 795 kcov_put(kcov);
eec028c9
AK
796 return;
797 }
798 }
799 /* Reset coverage size. */
800 *(u64 *)area = 0;
801
76484b1c 802 kcov_start(t, kcov, size, area, mode, sequence);
eec028c9
AK
803
804}
805EXPORT_SYMBOL(kcov_remote_start);
806
807static void kcov_move_area(enum kcov_mode mode, void *dst_area,
808 unsigned int dst_area_size, void *src_area)
809{
810 u64 word_size = sizeof(unsigned long);
811 u64 count_size, entry_size_log;
812 u64 dst_len, src_len;
813 void *dst_entries, *src_entries;
814 u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
815
816 kcov_debug("%px %u <= %px %lu\n",
817 dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
818
819 switch (mode) {
820 case KCOV_MODE_TRACE_PC:
821 dst_len = READ_ONCE(*(unsigned long *)dst_area);
822 src_len = *(unsigned long *)src_area;
823 count_size = sizeof(unsigned long);
824 entry_size_log = __ilog2_u64(sizeof(unsigned long));
825 break;
826 case KCOV_MODE_TRACE_CMP:
827 dst_len = READ_ONCE(*(u64 *)dst_area);
828 src_len = *(u64 *)src_area;
829 count_size = sizeof(u64);
830 BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
831 entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
832 break;
833 default:
834 WARN_ON(1);
835 return;
836 }
837
838 /* As arm can't divide u64 integers use log of entry size. */
839 if (dst_len > ((dst_area_size * word_size - count_size) >>
840 entry_size_log))
841 return;
842 dst_occupied = count_size + (dst_len << entry_size_log);
843 dst_free = dst_area_size * word_size - dst_occupied;
844 bytes_to_move = min(dst_free, src_len << entry_size_log);
845 dst_entries = dst_area + dst_occupied;
846 src_entries = src_area + count_size;
847 memcpy(dst_entries, src_entries, bytes_to_move);
848 entries_moved = bytes_to_move >> entry_size_log;
849
850 switch (mode) {
851 case KCOV_MODE_TRACE_PC:
852 WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
853 break;
854 case KCOV_MODE_TRACE_CMP:
855 WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
856 break;
857 default:
858 break;
859 }
860}
861
862/* See the comment before kcov_remote_start() for usage details. */
863void kcov_remote_stop(void)
864{
865 struct task_struct *t = current;
866 struct kcov *kcov = t->kcov;
867 void *area = t->kcov_area;
868 unsigned int size = t->kcov_size;
869 int sequence = t->kcov_sequence;
870
3c61df38 871 if (!kcov)
eec028c9 872 return;
eec028c9
AK
873
874 kcov_stop(t);
eec028c9
AK
875
876 spin_lock(&kcov->lock);
877 /*
878 * KCOV_DISABLE could have been called between kcov_remote_start()
879 * and kcov_remote_stop(), hence the check.
880 */
eec028c9
AK
881 if (sequence == kcov->sequence && kcov->remote)
882 kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
883 spin_unlock(&kcov->lock);
884
885 spin_lock(&kcov_remote_lock);
886 kcov_remote_area_put(area, size);
887 spin_unlock(&kcov_remote_lock);
888
889 kcov_put(kcov);
890}
891EXPORT_SYMBOL(kcov_remote_stop);
892
893/* See the comment before kcov_remote_start() for usage details. */
894u64 kcov_common_handle(void)
895{
896 return current->kcov_handle;
897}
898EXPORT_SYMBOL(kcov_common_handle);
899
5c9a8750
DV
900static int __init kcov_init(void)
901{
df4565f9
NS
902 /*
903 * The kcov debugfs file won't ever get removed and thus,
904 * there is no need to protect it against removal races. The
905 * use of debugfs_create_file_unsafe() is actually safe here.
906 */
ec9672d5
GKH
907 debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
908
5c9a8750
DV
909 return 0;
910}
911
912device_initcall(kcov_init);