1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "kcov: " fmt
4 #define DISABLE_BRANCH_PROFILING
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/errno.h>
8 #include <linux/export.h>
9 #include <linux/types.h>
10 #include <linux/file.h>
12 #include <linux/hashtable.h>
13 #include <linux/init.h>
15 #include <linux/preempt.h>
16 #include <linux/printk.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/vmalloc.h>
21 #include <linux/debugfs.h>
22 #include <linux/uaccess.h>
23 #include <linux/kcov.h>
24 #include <linux/refcount.h>
25 #include <linux/log2.h>
26 #include <asm/setup.h>
28 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
30 /* Number of 64-bit words written per one comparison: */
31 #define KCOV_WORDS_PER_CMP 4
34 * kcov descriptor (one per opened debugfs file).
35 * State transitions of the descriptor:
36 * - initial state after open()
37 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
38 * - then, mmap() call (several calls are allowed but not useful)
39 * - then, ioctl(KCOV_ENABLE, arg), where arg is
40 * KCOV_TRACE_PC - to trace only the PCs
42 * KCOV_TRACE_CMP - to trace only the comparison operands
43 * - then, ioctl(KCOV_DISABLE) to disable the task.
44 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
48 * Reference counter. We keep one for:
49 * - opened file descriptor
50 * - task with enabled coverage (we can't unwire it from another task)
51 * - each code section for remote coverage collection
54 /* The lock protects mode, size, area and t. */
57 /* Size of arena (in long's). */
59 /* Coverage buffer shared with user space. */
61 /* Task for which we collect coverage, or NULL. */
62 struct task_struct *t;
63 /* Collecting coverage from remote (background) threads. */
65 /* Size of remote area (in long's). */
66 unsigned int remote_size;
68 * Sequence is incremented each time kcov is reenabled, used by
69 * kcov_remote_stop(), see the comment there.
74 struct kcov_remote_area {
75 struct list_head list;
82 struct hlist_node hnode;
85 static DEFINE_SPINLOCK(kcov_remote_lock);
86 static DEFINE_HASHTABLE(kcov_remote_map, 4);
87 static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
89 struct kcov_percpu_data {
93 unsigned int saved_mode;
94 unsigned int saved_size;
96 struct kcov *saved_kcov;
100 static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = {
101 .lock = INIT_LOCAL_LOCK(lock),
104 /* Must be called with kcov_remote_lock locked. */
105 static struct kcov_remote *kcov_remote_find(u64 handle)
107 struct kcov_remote *remote;
109 hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
110 if (remote->handle == handle)
116 /* Must be called with kcov_remote_lock locked. */
117 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
119 struct kcov_remote *remote;
121 if (kcov_remote_find(handle))
122 return ERR_PTR(-EEXIST);
123 remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
125 return ERR_PTR(-ENOMEM);
126 remote->handle = handle;
128 hash_add(kcov_remote_map, &remote->hnode, handle);
132 /* Must be called with kcov_remote_lock locked. */
133 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
135 struct kcov_remote_area *area;
136 struct list_head *pos;
138 list_for_each(pos, &kcov_remote_areas) {
139 area = list_entry(pos, struct kcov_remote_area, list);
140 if (area->size == size) {
141 list_del(&area->list);
148 /* Must be called with kcov_remote_lock locked. */
149 static void kcov_remote_area_put(struct kcov_remote_area *area,
152 INIT_LIST_HEAD(&area->list);
154 list_add(&area->list, &kcov_remote_areas);
157 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
162 * We are interested in code coverage as a function of a syscall inputs,
163 * so we ignore code executed in interrupts, unless we are in a remote
164 * coverage collection section in a softirq.
166 if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
168 mode = READ_ONCE(t->kcov_mode);
170 * There is some code that runs in interrupts but for which
171 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
172 * READ_ONCE()/barrier() effectively provides load-acquire wrt
173 * interrupts, there are paired barrier()/WRITE_ONCE() in
177 return mode == needed_mode;
180 static notrace unsigned long canonicalize_ip(unsigned long ip)
182 #ifdef CONFIG_RANDOMIZE_BASE
183 ip -= kaslr_offset();
189 * Entry point from instrumented code.
190 * This is called once per basic-block/edge.
192 void notrace __sanitizer_cov_trace_pc(void)
194 struct task_struct *t;
196 unsigned long ip = canonicalize_ip(_RET_IP_);
200 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
204 /* The first 64-bit word is the number of subsequent PCs. */
205 pos = READ_ONCE(area[0]) + 1;
206 if (likely(pos < t->kcov_size)) {
208 WRITE_ONCE(area[0], pos);
211 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
213 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
214 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
216 struct task_struct *t;
218 u64 count, start_index, end_pos, max_pos;
221 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
224 ip = canonicalize_ip(ip);
227 * We write all comparison arguments and types as u64.
228 * The buffer was allocated for t->kcov_size unsigned longs.
230 area = (u64 *)t->kcov_area;
231 max_pos = t->kcov_size * sizeof(unsigned long);
233 count = READ_ONCE(area[0]);
235 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
236 start_index = 1 + count * KCOV_WORDS_PER_CMP;
237 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
238 if (likely(end_pos <= max_pos)) {
239 area[start_index] = type;
240 area[start_index + 1] = arg1;
241 area[start_index + 2] = arg2;
242 area[start_index + 3] = ip;
243 WRITE_ONCE(area[0], count + 1);
247 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
249 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
251 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
253 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
255 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
257 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
259 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
261 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
263 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
265 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
267 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
269 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
271 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
273 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
276 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
278 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
280 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
283 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
285 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
287 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
290 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
292 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
294 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
297 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
299 void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
302 u64 count = cases[0];
304 u64 type = KCOV_CMP_CONST;
308 type |= KCOV_CMP_SIZE(0);
311 type |= KCOV_CMP_SIZE(1);
314 type |= KCOV_CMP_SIZE(2);
317 type |= KCOV_CMP_SIZE(3);
322 for (i = 0; i < count; i++)
323 write_comp_data(type, cases[i + 2], val, _RET_IP_);
325 EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
326 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
328 static void kcov_start(struct task_struct *t, struct kcov *kcov,
329 unsigned int size, void *area, enum kcov_mode mode,
332 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
334 /* Cache in task struct for performance. */
337 t->kcov_sequence = sequence;
338 /* See comment in check_kcov_mode(). */
340 WRITE_ONCE(t->kcov_mode, mode);
343 static void kcov_stop(struct task_struct *t)
345 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
352 static void kcov_task_reset(struct task_struct *t)
355 t->kcov_sequence = 0;
359 void kcov_task_init(struct task_struct *t)
362 t->kcov_handle = current->kcov_handle;
365 static void kcov_reset(struct kcov *kcov)
368 kcov->mode = KCOV_MODE_INIT;
369 kcov->remote = false;
370 kcov->remote_size = 0;
374 static void kcov_remote_reset(struct kcov *kcov)
377 struct kcov_remote *remote;
378 struct hlist_node *tmp;
381 spin_lock_irqsave(&kcov_remote_lock, flags);
382 hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
383 if (remote->kcov != kcov)
385 hash_del(&remote->hnode);
388 /* Do reset before unlock to prevent races with kcov_remote_start(). */
390 spin_unlock_irqrestore(&kcov_remote_lock, flags);
393 static void kcov_disable(struct task_struct *t, struct kcov *kcov)
397 kcov_remote_reset(kcov);
402 static void kcov_get(struct kcov *kcov)
404 refcount_inc(&kcov->refcount);
407 static void kcov_put(struct kcov *kcov)
409 if (refcount_dec_and_test(&kcov->refcount)) {
410 kcov_remote_reset(kcov);
416 void kcov_task_exit(struct task_struct *t)
425 spin_lock_irqsave(&kcov->lock, flags);
426 kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
428 * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
429 * which comes down to:
430 * WARN_ON(!kcov->remote && kcov->t != t);
432 * For KCOV_REMOTE_ENABLE devices, the exiting task is either:
434 * 1. A remote task between kcov_remote_start() and kcov_remote_stop().
435 * In this case we should print a warning right away, since a task
436 * shouldn't be exiting when it's in a kcov coverage collection
437 * section. Here t points to the task that is collecting remote
438 * coverage, and t->kcov->t points to the thread that created the
439 * kcov device. Which means that to detect this case we need to
440 * check that t != t->kcov->t, and this gives us the following:
441 * WARN_ON(kcov->remote && kcov->t != t);
443 * 2. The task that created kcov exiting without calling KCOV_DISABLE,
444 * and then again we make sure that t->kcov->t == t:
445 * WARN_ON(kcov->remote && kcov->t != t);
447 * By combining all three checks into one we get:
449 if (WARN_ON(kcov->t != t)) {
450 spin_unlock_irqrestore(&kcov->lock, flags);
453 /* Just to not leave dangling references behind. */
454 kcov_disable(t, kcov);
455 spin_unlock_irqrestore(&kcov->lock, flags);
459 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
462 struct kcov *kcov = vma->vm_file->private_data;
463 unsigned long size, off;
467 spin_lock_irqsave(&kcov->lock, flags);
468 size = kcov->size * sizeof(unsigned long);
469 if (kcov->area == NULL || vma->vm_pgoff != 0 ||
470 vma->vm_end - vma->vm_start != size) {
474 spin_unlock_irqrestore(&kcov->lock, flags);
475 vma->vm_flags |= VM_DONTEXPAND;
476 for (off = 0; off < size; off += PAGE_SIZE) {
477 page = vmalloc_to_page(kcov->area + off);
478 res = vm_insert_page(vma, vma->vm_start + off, page);
480 pr_warn_once("kcov: vm_insert_page() failed\n");
486 spin_unlock_irqrestore(&kcov->lock, flags);
490 static int kcov_open(struct inode *inode, struct file *filep)
494 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
497 kcov->mode = KCOV_MODE_DISABLED;
499 refcount_set(&kcov->refcount, 1);
500 spin_lock_init(&kcov->lock);
501 filep->private_data = kcov;
502 return nonseekable_open(inode, filep);
505 static int kcov_close(struct inode *inode, struct file *filep)
507 kcov_put(filep->private_data);
511 static int kcov_get_mode(unsigned long arg)
513 if (arg == KCOV_TRACE_PC)
514 return KCOV_MODE_TRACE_PC;
515 else if (arg == KCOV_TRACE_CMP)
516 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
517 return KCOV_MODE_TRACE_CMP;
526 * Fault in a lazily-faulted vmalloc area before it can be used by
527 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
528 * vmalloc fault handling path is instrumented.
530 static void kcov_fault_in_area(struct kcov *kcov)
532 unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
533 unsigned long *area = kcov->area;
534 unsigned long offset;
536 for (offset = 0; offset < kcov->size; offset += stride)
537 READ_ONCE(area[offset]);
540 static inline bool kcov_check_handle(u64 handle, bool common_valid,
541 bool uncommon_valid, bool zero_valid)
543 if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
545 switch (handle & KCOV_SUBSYSTEM_MASK) {
546 case KCOV_SUBSYSTEM_COMMON:
547 return (handle & KCOV_INSTANCE_MASK) ?
548 common_valid : zero_valid;
549 case KCOV_SUBSYSTEM_USB:
550 return uncommon_valid;
557 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
560 struct task_struct *t;
561 unsigned long flags, unused;
563 struct kcov_remote_arg *remote_arg;
564 struct kcov_remote *remote;
569 * Enable coverage for the current task.
570 * At this point user must have been enabled trace mode,
571 * and mmapped the file. Coverage collection is disabled only
572 * at task exit or voluntary by KCOV_DISABLE. After that it can
573 * be enabled for another task.
575 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
578 if (kcov->t != NULL || t->kcov != NULL)
580 mode = kcov_get_mode(arg);
583 kcov_fault_in_area(kcov);
585 kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
588 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
592 /* Disable coverage for the current task. */
594 if (unused != 0 || current->kcov != kcov)
597 if (WARN_ON(kcov->t != t))
599 kcov_disable(t, kcov);
602 case KCOV_REMOTE_ENABLE:
603 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
606 if (kcov->t != NULL || t->kcov != NULL)
608 remote_arg = (struct kcov_remote_arg *)arg;
609 mode = kcov_get_mode(remote_arg->trace_mode);
612 if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long))
618 kcov->remote_size = remote_arg->area_size;
619 spin_lock_irqsave(&kcov_remote_lock, flags);
620 for (i = 0; i < remote_arg->num_handles; i++) {
621 if (!kcov_check_handle(remote_arg->handles[i],
622 false, true, false)) {
623 spin_unlock_irqrestore(&kcov_remote_lock,
625 kcov_disable(t, kcov);
628 remote = kcov_remote_add(kcov, remote_arg->handles[i]);
629 if (IS_ERR(remote)) {
630 spin_unlock_irqrestore(&kcov_remote_lock,
632 kcov_disable(t, kcov);
633 return PTR_ERR(remote);
636 if (remote_arg->common_handle) {
637 if (!kcov_check_handle(remote_arg->common_handle,
638 true, false, false)) {
639 spin_unlock_irqrestore(&kcov_remote_lock,
641 kcov_disable(t, kcov);
644 remote = kcov_remote_add(kcov,
645 remote_arg->common_handle);
646 if (IS_ERR(remote)) {
647 spin_unlock_irqrestore(&kcov_remote_lock,
649 kcov_disable(t, kcov);
650 return PTR_ERR(remote);
652 t->kcov_handle = remote_arg->common_handle;
654 spin_unlock_irqrestore(&kcov_remote_lock, flags);
655 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
663 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
667 struct kcov_remote_arg *remote_arg = NULL;
668 unsigned int remote_num_handles;
669 unsigned long remote_arg_size;
670 unsigned long size, flags;
673 kcov = filep->private_data;
675 case KCOV_INIT_TRACE:
677 * Enable kcov in trace mode and setup buffer size.
678 * Must happen before anything else.
680 * First check the size argument - it must be at least 2
681 * to hold the current position and one PC.
684 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
686 area = vmalloc_user(size * sizeof(unsigned long));
689 spin_lock_irqsave(&kcov->lock, flags);
690 if (kcov->mode != KCOV_MODE_DISABLED) {
691 spin_unlock_irqrestore(&kcov->lock, flags);
697 kcov->mode = KCOV_MODE_INIT;
698 spin_unlock_irqrestore(&kcov->lock, flags);
700 case KCOV_REMOTE_ENABLE:
701 if (get_user(remote_num_handles, (unsigned __user *)(arg +
702 offsetof(struct kcov_remote_arg, num_handles))))
704 if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
706 remote_arg_size = struct_size(remote_arg, handles,
708 remote_arg = memdup_user((void __user *)arg, remote_arg_size);
709 if (IS_ERR(remote_arg))
710 return PTR_ERR(remote_arg);
711 if (remote_arg->num_handles != remote_num_handles) {
715 arg = (unsigned long)remote_arg;
719 * All other commands can be normally executed under a spin lock, so we
720 * obtain and release it here in order to simplify kcov_ioctl_locked().
722 spin_lock_irqsave(&kcov->lock, flags);
723 res = kcov_ioctl_locked(kcov, cmd, arg);
724 spin_unlock_irqrestore(&kcov->lock, flags);
730 static const struct file_operations kcov_fops = {
732 .unlocked_ioctl = kcov_ioctl,
733 .compat_ioctl = kcov_ioctl,
735 .release = kcov_close,
739 * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
740 * of code in a kernel background thread or in a softirq to allow kcov to be
741 * used to collect coverage from that part of code.
743 * The handle argument of kcov_remote_start() identifies a code section that is
744 * used for coverage collection. A userspace process passes this handle to
745 * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting
746 * coverage for the code section identified by this handle.
748 * The usage of these annotations in the kernel code is different depending on
749 * the type of the kernel thread whose code is being annotated.
751 * For global kernel threads that are spawned in a limited number of instances
752 * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for
753 * softirqs, each instance must be assigned a unique 4-byte instance id. The
754 * instance id is then combined with a 1-byte subsystem id to get a handle via
755 * kcov_remote_handle(subsystem_id, instance_id).
757 * For local kernel threads that are spawned from system calls handler when a
758 * user interacts with some kernel interface (e.g. vhost workers), a handle is
759 * passed from a userspace process as the common_handle field of the
760 * kcov_remote_arg struct (note, that the user must generate a handle by using
761 * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
762 * arbitrary 4-byte non-zero number as the instance id). This common handle
763 * then gets saved into the task_struct of the process that issued the
764 * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn
765 * kernel threads, the common handle must be retrieved via kcov_common_handle()
766 * and passed to the spawned threads via custom annotations. Those kernel
767 * threads must in turn be annotated with kcov_remote_start(common_handle) and
768 * kcov_remote_stop(). All of the threads that are spawned by the same process
769 * obtain the same handle, hence the name "common".
771 * See Documentation/dev-tools/kcov.rst for more details.
773 * Internally, kcov_remote_start() looks up the kcov device associated with the
774 * provided handle, allocates an area for coverage collection, and saves the
775 * pointers to kcov and area into the current task_struct to allow coverage to
776 * be collected via __sanitizer_cov_trace_pc().
777 * In turns kcov_remote_stop() clears those pointers from task_struct to stop
778 * collecting coverage and copies all collected coverage into the kcov area.
781 static inline bool kcov_mode_enabled(unsigned int mode)
783 return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED;
786 static void kcov_remote_softirq_start(struct task_struct *t)
788 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
791 mode = READ_ONCE(t->kcov_mode);
793 if (kcov_mode_enabled(mode)) {
794 data->saved_mode = mode;
795 data->saved_size = t->kcov_size;
796 data->saved_area = t->kcov_area;
797 data->saved_sequence = t->kcov_sequence;
798 data->saved_kcov = t->kcov;
803 static void kcov_remote_softirq_stop(struct task_struct *t)
805 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
807 if (data->saved_kcov) {
808 kcov_start(t, data->saved_kcov, data->saved_size,
809 data->saved_area, data->saved_mode,
810 data->saved_sequence);
811 data->saved_mode = 0;
812 data->saved_size = 0;
813 data->saved_area = NULL;
814 data->saved_sequence = 0;
815 data->saved_kcov = NULL;
819 void kcov_remote_start(u64 handle)
821 struct task_struct *t = current;
822 struct kcov_remote *remote;
830 if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
832 if (!in_task() && !in_serving_softirq())
835 local_lock_irqsave(&kcov_percpu_data.lock, flags);
838 * Check that kcov_remote_start() is not called twice in background
839 * threads nor called by user tasks (with enabled kcov).
841 mode = READ_ONCE(t->kcov_mode);
842 if (WARN_ON(in_task() && kcov_mode_enabled(mode))) {
843 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
847 * Check that kcov_remote_start() is not called twice in softirqs.
848 * Note, that kcov_remote_start() can be called from a softirq that
849 * happened while collecting coverage from a background thread.
851 if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) {
852 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
856 spin_lock(&kcov_remote_lock);
857 remote = kcov_remote_find(handle);
859 spin_unlock(&kcov_remote_lock);
860 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
863 kcov_debug("handle = %llx, context: %s\n", handle,
864 in_task() ? "task" : "softirq");
866 /* Put in kcov_remote_stop(). */
869 * Read kcov fields before unlock to prevent races with
870 * KCOV_DISABLE / kcov_remote_reset().
873 sequence = kcov->sequence;
875 size = kcov->remote_size;
876 area = kcov_remote_area_get(size);
878 size = CONFIG_KCOV_IRQ_AREA_SIZE;
879 area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
881 spin_unlock(&kcov_remote_lock);
883 /* Can only happen when in_task(). */
885 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
886 area = vmalloc(size * sizeof(unsigned long));
891 local_lock_irqsave(&kcov_percpu_data.lock, flags);
894 /* Reset coverage size. */
897 if (in_serving_softirq()) {
898 kcov_remote_softirq_start(t);
901 kcov_start(t, kcov, size, area, mode, sequence);
903 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
906 EXPORT_SYMBOL(kcov_remote_start);
908 static void kcov_move_area(enum kcov_mode mode, void *dst_area,
909 unsigned int dst_area_size, void *src_area)
911 u64 word_size = sizeof(unsigned long);
912 u64 count_size, entry_size_log;
913 u64 dst_len, src_len;
914 void *dst_entries, *src_entries;
915 u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
917 kcov_debug("%px %u <= %px %lu\n",
918 dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
921 case KCOV_MODE_TRACE_PC:
922 dst_len = READ_ONCE(*(unsigned long *)dst_area);
923 src_len = *(unsigned long *)src_area;
924 count_size = sizeof(unsigned long);
925 entry_size_log = __ilog2_u64(sizeof(unsigned long));
927 case KCOV_MODE_TRACE_CMP:
928 dst_len = READ_ONCE(*(u64 *)dst_area);
929 src_len = *(u64 *)src_area;
930 count_size = sizeof(u64);
931 BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
932 entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
939 /* As arm can't divide u64 integers use log of entry size. */
940 if (dst_len > ((dst_area_size * word_size - count_size) >>
943 dst_occupied = count_size + (dst_len << entry_size_log);
944 dst_free = dst_area_size * word_size - dst_occupied;
945 bytes_to_move = min(dst_free, src_len << entry_size_log);
946 dst_entries = dst_area + dst_occupied;
947 src_entries = src_area + count_size;
948 memcpy(dst_entries, src_entries, bytes_to_move);
949 entries_moved = bytes_to_move >> entry_size_log;
952 case KCOV_MODE_TRACE_PC:
953 WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
955 case KCOV_MODE_TRACE_CMP:
956 WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
963 /* See the comment before kcov_remote_start() for usage details. */
964 void kcov_remote_stop(void)
966 struct task_struct *t = current;
974 if (!in_task() && !in_serving_softirq())
977 local_lock_irqsave(&kcov_percpu_data.lock, flags);
979 mode = READ_ONCE(t->kcov_mode);
981 if (!kcov_mode_enabled(mode)) {
982 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
986 * When in softirq, check if the corresponding kcov_remote_start()
987 * actually found the remote handle and started collecting coverage.
989 if (in_serving_softirq() && !t->kcov_softirq) {
990 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
993 /* Make sure that kcov_softirq is only set when in softirq. */
994 if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
995 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1000 area = t->kcov_area;
1001 size = t->kcov_size;
1002 sequence = t->kcov_sequence;
1005 if (in_serving_softirq()) {
1006 t->kcov_softirq = 0;
1007 kcov_remote_softirq_stop(t);
1010 spin_lock(&kcov->lock);
1012 * KCOV_DISABLE could have been called between kcov_remote_start()
1013 * and kcov_remote_stop(), hence the sequence check.
1015 if (sequence == kcov->sequence && kcov->remote)
1016 kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
1017 spin_unlock(&kcov->lock);
1020 spin_lock(&kcov_remote_lock);
1021 kcov_remote_area_put(area, size);
1022 spin_unlock(&kcov_remote_lock);
1025 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1027 /* Get in kcov_remote_start(). */
1030 EXPORT_SYMBOL(kcov_remote_stop);
1032 /* See the comment before kcov_remote_start() for usage details. */
1033 u64 kcov_common_handle(void)
1037 return current->kcov_handle;
1039 EXPORT_SYMBOL(kcov_common_handle);
1041 static int __init kcov_init(void)
1045 for_each_possible_cpu(cpu) {
1046 void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE *
1047 sizeof(unsigned long), cpu_to_node(cpu));
1050 per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
1054 * The kcov debugfs file won't ever get removed and thus,
1055 * there is no need to protect it against removal races. The
1056 * use of debugfs_create_file_unsafe() is actually safe here.
1058 debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
1063 device_initcall(kcov_init);