1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "kcov: " fmt
3
4 #define DISABLE_BRANCH_PROFILING
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/errno.h>
8 #include <linux/export.h>
9 #include <linux/types.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/hashtable.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/preempt.h>
16 #include <linux/printk.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/vmalloc.h>
21 #include <linux/debugfs.h>
22 #include <linux/uaccess.h>
23 #include <linux/kcov.h>
24 #include <linux/refcount.h>
25 #include <linux/log2.h>
26 #include <asm/setup.h>
27
28 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
29
30 /* Number of 64-bit words written per one comparison: */
31 #define KCOV_WORDS_PER_CMP 4
32
33 /*
34 * kcov descriptor (one per opened debugfs file).
35 * State transitions of the descriptor:
36 * - initial state after open()
37 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
38 * - then, mmap() call (several calls are allowed but not useful)
39 * - then, ioctl(KCOV_ENABLE, arg), where arg is
40 * KCOV_TRACE_PC - to trace only the PCs
41 * or
42 * KCOV_TRACE_CMP - to trace only the comparison operands
43 * - then, ioctl(KCOV_DISABLE) to disable the task.
44 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
45 */
46 struct kcov {
47 /*
48 * Reference counter. We keep one for:
49 * - opened file descriptor
50 * - task with enabled coverage (we can't unwire it from another task)
51 * - each code section for remote coverage collection
52 */
53 refcount_t refcount;
54 /* The lock protects mode, size, area and t. */
55 spinlock_t lock;
56 enum kcov_mode mode;
57 /* Size of arena (in long's). */
58 unsigned int size;
59 /* Coverage buffer shared with user space. */
60 void *area;
61 /* Task for which we collect coverage, or NULL. */
62 struct task_struct *t;
63 /* Collecting coverage from remote (background) threads. */
64 bool remote;
65 /* Size of remote area (in long's). */
66 unsigned int remote_size;
67 /*
68 * Sequence is incremented each time kcov is reenabled, used by
69 * kcov_remote_stop(), see the comment there.
70 */
71 int sequence;
72 };
73
74 struct kcov_remote_area {
75 struct list_head list;
76 unsigned int size;
77 };
78
79 struct kcov_remote {
80 u64 handle;
81 struct kcov *kcov;
82 struct hlist_node hnode;
83 };
84
85 static DEFINE_SPINLOCK(kcov_remote_lock);
86 static DEFINE_HASHTABLE(kcov_remote_map, 4);
87 static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
88
89 struct kcov_percpu_data {
90 void *irq_area;
91 local_lock_t lock;
92
93 unsigned int saved_mode;
94 unsigned int saved_size;
95 void *saved_area;
96 struct kcov *saved_kcov;
97 int saved_sequence;
98 };
99
100 static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = {
101 .lock = INIT_LOCAL_LOCK(lock),
102 };
103
104 /* Must be called with kcov_remote_lock locked. */
kcov_remote_find(u64 handle)105 static struct kcov_remote *kcov_remote_find(u64 handle)
106 {
107 struct kcov_remote *remote;
108
109 hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
110 if (remote->handle == handle)
111 return remote;
112 }
113 return NULL;
114 }
115
116 /* Must be called with kcov_remote_lock locked. */
kcov_remote_add(struct kcov * kcov,u64 handle)117 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
118 {
119 struct kcov_remote *remote;
120
121 if (kcov_remote_find(handle))
122 return ERR_PTR(-EEXIST);
123 remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
124 if (!remote)
125 return ERR_PTR(-ENOMEM);
126 remote->handle = handle;
127 remote->kcov = kcov;
128 hash_add(kcov_remote_map, &remote->hnode, handle);
129 return remote;
130 }
131
132 /* Must be called with kcov_remote_lock locked. */
kcov_remote_area_get(unsigned int size)133 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
134 {
135 struct kcov_remote_area *area;
136 struct list_head *pos;
137
138 list_for_each(pos, &kcov_remote_areas) {
139 area = list_entry(pos, struct kcov_remote_area, list);
140 if (area->size == size) {
141 list_del(&area->list);
142 return area;
143 }
144 }
145 return NULL;
146 }
147
148 /* Must be called with kcov_remote_lock locked. */
kcov_remote_area_put(struct kcov_remote_area * area,unsigned int size)149 static void kcov_remote_area_put(struct kcov_remote_area *area,
150 unsigned int size)
151 {
152 INIT_LIST_HEAD(&area->list);
153 area->size = size;
154 list_add(&area->list, &kcov_remote_areas);
155 }
156
check_kcov_mode(enum kcov_mode needed_mode,struct task_struct * t)157 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
158 {
159 unsigned int mode;
160
161 /*
162 * We are interested in code coverage as a function of a syscall inputs,
163 * so we ignore code executed in interrupts, unless we are in a remote
164 * coverage collection section in a softirq.
165 */
166 if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
167 return false;
168 mode = READ_ONCE(t->kcov_mode);
169 /*
170 * There is some code that runs in interrupts but for which
171 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
172 * READ_ONCE()/barrier() effectively provides load-acquire wrt
173 * interrupts, there are paired barrier()/WRITE_ONCE() in
174 * kcov_start().
175 */
176 barrier();
177 return mode == needed_mode;
178 }
179
canonicalize_ip(unsigned long ip)180 static notrace unsigned long canonicalize_ip(unsigned long ip)
181 {
182 #ifdef CONFIG_RANDOMIZE_BASE
183 ip -= kaslr_offset();
184 #endif
185 return ip;
186 }
187
188 /*
189 * Entry point from instrumented code.
190 * This is called once per basic-block/edge.
191 */
__sanitizer_cov_trace_pc(void)192 void notrace __sanitizer_cov_trace_pc(void)
193 {
194 struct task_struct *t;
195 unsigned long *area;
196 unsigned long ip = canonicalize_ip(_RET_IP_);
197 unsigned long pos;
198
199 t = current;
200 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
201 return;
202
203 area = t->kcov_area;
204 /* The first 64-bit word is the number of subsequent PCs. */
205 pos = READ_ONCE(area[0]) + 1;
206 if (likely(pos < t->kcov_size)) {
207 /* Previously we write pc before updating pos. However, some
208 * early interrupt code could bypass check_kcov_mode() check
209 * and invoke __sanitizer_cov_trace_pc(). If such interrupt is
210 * raised between writing pc and updating pos, the pc could be
211 * overitten by the recursive __sanitizer_cov_trace_pc().
212 * Update pos before writing pc to avoid such interleaving.
213 */
214 WRITE_ONCE(area[0], pos);
215 barrier();
216 area[pos] = ip;
217 }
218 }
219 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
220
221 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
write_comp_data(u64 type,u64 arg1,u64 arg2,u64 ip)222 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
223 {
224 struct task_struct *t;
225 u64 *area;
226 u64 count, start_index, end_pos, max_pos;
227
228 t = current;
229 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
230 return;
231
232 ip = canonicalize_ip(ip);
233
234 /*
235 * We write all comparison arguments and types as u64.
236 * The buffer was allocated for t->kcov_size unsigned longs.
237 */
238 area = (u64 *)t->kcov_area;
239 max_pos = t->kcov_size * sizeof(unsigned long);
240
241 count = READ_ONCE(area[0]);
242
243 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
244 start_index = 1 + count * KCOV_WORDS_PER_CMP;
245 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
246 if (likely(end_pos <= max_pos)) {
247 /* See comment in __sanitizer_cov_trace_pc(). */
248 WRITE_ONCE(area[0], count + 1);
249 barrier();
250 area[start_index] = type;
251 area[start_index + 1] = arg1;
252 area[start_index + 2] = arg2;
253 area[start_index + 3] = ip;
254 }
255 }
256
__sanitizer_cov_trace_cmp1(u8 arg1,u8 arg2)257 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
258 {
259 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
260 }
261 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
262
__sanitizer_cov_trace_cmp2(u16 arg1,u16 arg2)263 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
264 {
265 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
266 }
267 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
268
__sanitizer_cov_trace_cmp4(u32 arg1,u32 arg2)269 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
270 {
271 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
272 }
273 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
274
__sanitizer_cov_trace_cmp8(u64 arg1,u64 arg2)275 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
276 {
277 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
278 }
279 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
280
__sanitizer_cov_trace_const_cmp1(u8 arg1,u8 arg2)281 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
282 {
283 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
284 _RET_IP_);
285 }
286 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
287
__sanitizer_cov_trace_const_cmp2(u16 arg1,u16 arg2)288 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
289 {
290 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
291 _RET_IP_);
292 }
293 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
294
__sanitizer_cov_trace_const_cmp4(u32 arg1,u32 arg2)295 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
296 {
297 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
298 _RET_IP_);
299 }
300 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
301
__sanitizer_cov_trace_const_cmp8(u64 arg1,u64 arg2)302 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
303 {
304 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
305 _RET_IP_);
306 }
307 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
308
__sanitizer_cov_trace_switch(u64 val,u64 * cases)309 void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
310 {
311 u64 i;
312 u64 count = cases[0];
313 u64 size = cases[1];
314 u64 type = KCOV_CMP_CONST;
315
316 switch (size) {
317 case 8:
318 type |= KCOV_CMP_SIZE(0);
319 break;
320 case 16:
321 type |= KCOV_CMP_SIZE(1);
322 break;
323 case 32:
324 type |= KCOV_CMP_SIZE(2);
325 break;
326 case 64:
327 type |= KCOV_CMP_SIZE(3);
328 break;
329 default:
330 return;
331 }
332 for (i = 0; i < count; i++)
333 write_comp_data(type, cases[i + 2], val, _RET_IP_);
334 }
335 EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
336 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
337
kcov_start(struct task_struct * t,struct kcov * kcov,unsigned int size,void * area,enum kcov_mode mode,int sequence)338 static void kcov_start(struct task_struct *t, struct kcov *kcov,
339 unsigned int size, void *area, enum kcov_mode mode,
340 int sequence)
341 {
342 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
343 t->kcov = kcov;
344 /* Cache in task struct for performance. */
345 t->kcov_size = size;
346 t->kcov_area = area;
347 t->kcov_sequence = sequence;
348 /* See comment in check_kcov_mode(). */
349 barrier();
350 WRITE_ONCE(t->kcov_mode, mode);
351 }
352
kcov_stop(struct task_struct * t)353 static void kcov_stop(struct task_struct *t)
354 {
355 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
356 barrier();
357 t->kcov = NULL;
358 t->kcov_size = 0;
359 t->kcov_area = NULL;
360 }
361
kcov_task_reset(struct task_struct * t)362 static void kcov_task_reset(struct task_struct *t)
363 {
364 kcov_stop(t);
365 t->kcov_sequence = 0;
366 t->kcov_handle = 0;
367 }
368
kcov_task_init(struct task_struct * t)369 void kcov_task_init(struct task_struct *t)
370 {
371 kcov_task_reset(t);
372 t->kcov_handle = current->kcov_handle;
373 }
374
kcov_reset(struct kcov * kcov)375 static void kcov_reset(struct kcov *kcov)
376 {
377 kcov->t = NULL;
378 kcov->mode = KCOV_MODE_INIT;
379 kcov->remote = false;
380 kcov->remote_size = 0;
381 kcov->sequence++;
382 }
383
kcov_remote_reset(struct kcov * kcov)384 static void kcov_remote_reset(struct kcov *kcov)
385 {
386 int bkt;
387 struct kcov_remote *remote;
388 struct hlist_node *tmp;
389 unsigned long flags;
390
391 spin_lock_irqsave(&kcov_remote_lock, flags);
392 hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
393 if (remote->kcov != kcov)
394 continue;
395 hash_del(&remote->hnode);
396 kfree(remote);
397 }
398 /* Do reset before unlock to prevent races with kcov_remote_start(). */
399 kcov_reset(kcov);
400 spin_unlock_irqrestore(&kcov_remote_lock, flags);
401 }
402
kcov_disable(struct task_struct * t,struct kcov * kcov)403 static void kcov_disable(struct task_struct *t, struct kcov *kcov)
404 {
405 kcov_task_reset(t);
406 if (kcov->remote)
407 kcov_remote_reset(kcov);
408 else
409 kcov_reset(kcov);
410 }
411
kcov_get(struct kcov * kcov)412 static void kcov_get(struct kcov *kcov)
413 {
414 refcount_inc(&kcov->refcount);
415 }
416
kcov_put(struct kcov * kcov)417 static void kcov_put(struct kcov *kcov)
418 {
419 if (refcount_dec_and_test(&kcov->refcount)) {
420 kcov_remote_reset(kcov);
421 vfree(kcov->area);
422 kfree(kcov);
423 }
424 }
425
kcov_task_exit(struct task_struct * t)426 void kcov_task_exit(struct task_struct *t)
427 {
428 struct kcov *kcov;
429 unsigned long flags;
430
431 kcov = t->kcov;
432 if (kcov == NULL)
433 return;
434
435 spin_lock_irqsave(&kcov->lock, flags);
436 kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
437 /*
438 * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
439 * which comes down to:
440 * WARN_ON(!kcov->remote && kcov->t != t);
441 *
442 * For KCOV_REMOTE_ENABLE devices, the exiting task is either:
443 *
444 * 1. A remote task between kcov_remote_start() and kcov_remote_stop().
445 * In this case we should print a warning right away, since a task
446 * shouldn't be exiting when it's in a kcov coverage collection
447 * section. Here t points to the task that is collecting remote
448 * coverage, and t->kcov->t points to the thread that created the
449 * kcov device. Which means that to detect this case we need to
450 * check that t != t->kcov->t, and this gives us the following:
451 * WARN_ON(kcov->remote && kcov->t != t);
452 *
453 * 2. The task that created kcov exiting without calling KCOV_DISABLE,
454 * and then again we make sure that t->kcov->t == t:
455 * WARN_ON(kcov->remote && kcov->t != t);
456 *
457 * By combining all three checks into one we get:
458 */
459 if (WARN_ON(kcov->t != t)) {
460 spin_unlock_irqrestore(&kcov->lock, flags);
461 return;
462 }
463 /* Just to not leave dangling references behind. */
464 kcov_disable(t, kcov);
465 spin_unlock_irqrestore(&kcov->lock, flags);
466 kcov_put(kcov);
467 }
468
kcov_mmap(struct file * filep,struct vm_area_struct * vma)469 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
470 {
471 int res = 0;
472 struct kcov *kcov = vma->vm_file->private_data;
473 unsigned long size, off;
474 struct page *page;
475 unsigned long flags;
476
477 spin_lock_irqsave(&kcov->lock, flags);
478 size = kcov->size * sizeof(unsigned long);
479 if (kcov->area == NULL || vma->vm_pgoff != 0 ||
480 vma->vm_end - vma->vm_start != size) {
481 res = -EINVAL;
482 goto exit;
483 }
484 spin_unlock_irqrestore(&kcov->lock, flags);
485 vma->vm_flags |= VM_DONTEXPAND;
486 for (off = 0; off < size; off += PAGE_SIZE) {
487 page = vmalloc_to_page(kcov->area + off);
488 res = vm_insert_page(vma, vma->vm_start + off, page);
489 if (res) {
490 pr_warn_once("kcov: vm_insert_page() failed\n");
491 return res;
492 }
493 }
494 return 0;
495 exit:
496 spin_unlock_irqrestore(&kcov->lock, flags);
497 return res;
498 }
499
kcov_open(struct inode * inode,struct file * filep)500 static int kcov_open(struct inode *inode, struct file *filep)
501 {
502 struct kcov *kcov;
503
504 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
505 if (!kcov)
506 return -ENOMEM;
507 kcov->mode = KCOV_MODE_DISABLED;
508 kcov->sequence = 1;
509 refcount_set(&kcov->refcount, 1);
510 spin_lock_init(&kcov->lock);
511 filep->private_data = kcov;
512 return nonseekable_open(inode, filep);
513 }
514
kcov_close(struct inode * inode,struct file * filep)515 static int kcov_close(struct inode *inode, struct file *filep)
516 {
517 kcov_put(filep->private_data);
518 return 0;
519 }
520
kcov_get_mode(unsigned long arg)521 static int kcov_get_mode(unsigned long arg)
522 {
523 if (arg == KCOV_TRACE_PC)
524 return KCOV_MODE_TRACE_PC;
525 else if (arg == KCOV_TRACE_CMP)
526 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
527 return KCOV_MODE_TRACE_CMP;
528 #else
529 return -ENOTSUPP;
530 #endif
531 else
532 return -EINVAL;
533 }
534
535 /*
536 * Fault in a lazily-faulted vmalloc area before it can be used by
537 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
538 * vmalloc fault handling path is instrumented.
539 */
kcov_fault_in_area(struct kcov * kcov)540 static void kcov_fault_in_area(struct kcov *kcov)
541 {
542 unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
543 unsigned long *area = kcov->area;
544 unsigned long offset;
545
546 for (offset = 0; offset < kcov->size; offset += stride)
547 READ_ONCE(area[offset]);
548 }
549
kcov_check_handle(u64 handle,bool common_valid,bool uncommon_valid,bool zero_valid)550 static inline bool kcov_check_handle(u64 handle, bool common_valid,
551 bool uncommon_valid, bool zero_valid)
552 {
553 if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
554 return false;
555 switch (handle & KCOV_SUBSYSTEM_MASK) {
556 case KCOV_SUBSYSTEM_COMMON:
557 return (handle & KCOV_INSTANCE_MASK) ?
558 common_valid : zero_valid;
559 case KCOV_SUBSYSTEM_USB:
560 return uncommon_valid;
561 default:
562 return false;
563 }
564 return false;
565 }
566
kcov_ioctl_locked(struct kcov * kcov,unsigned int cmd,unsigned long arg)567 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
568 unsigned long arg)
569 {
570 struct task_struct *t;
571 unsigned long flags, unused;
572 int mode, i;
573 struct kcov_remote_arg *remote_arg;
574 struct kcov_remote *remote;
575
576 switch (cmd) {
577 case KCOV_ENABLE:
578 /*
579 * Enable coverage for the current task.
580 * At this point user must have been enabled trace mode,
581 * and mmapped the file. Coverage collection is disabled only
582 * at task exit or voluntary by KCOV_DISABLE. After that it can
583 * be enabled for another task.
584 */
585 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
586 return -EINVAL;
587 t = current;
588 if (kcov->t != NULL || t->kcov != NULL)
589 return -EBUSY;
590 mode = kcov_get_mode(arg);
591 if (mode < 0)
592 return mode;
593 kcov_fault_in_area(kcov);
594 kcov->mode = mode;
595 kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
596 kcov->sequence);
597 kcov->t = t;
598 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
599 kcov_get(kcov);
600 return 0;
601 case KCOV_DISABLE:
602 /* Disable coverage for the current task. */
603 unused = arg;
604 if (unused != 0 || current->kcov != kcov)
605 return -EINVAL;
606 t = current;
607 if (WARN_ON(kcov->t != t))
608 return -EINVAL;
609 kcov_disable(t, kcov);
610 kcov_put(kcov);
611 return 0;
612 case KCOV_REMOTE_ENABLE:
613 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
614 return -EINVAL;
615 t = current;
616 if (kcov->t != NULL || t->kcov != NULL)
617 return -EBUSY;
618 remote_arg = (struct kcov_remote_arg *)arg;
619 mode = kcov_get_mode(remote_arg->trace_mode);
620 if (mode < 0)
621 return mode;
622 if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long))
623 return -EINVAL;
624 kcov->mode = mode;
625 t->kcov = kcov;
626 kcov->t = t;
627 kcov->remote = true;
628 kcov->remote_size = remote_arg->area_size;
629 spin_lock_irqsave(&kcov_remote_lock, flags);
630 for (i = 0; i < remote_arg->num_handles; i++) {
631 if (!kcov_check_handle(remote_arg->handles[i],
632 false, true, false)) {
633 spin_unlock_irqrestore(&kcov_remote_lock,
634 flags);
635 kcov_disable(t, kcov);
636 return -EINVAL;
637 }
638 remote = kcov_remote_add(kcov, remote_arg->handles[i]);
639 if (IS_ERR(remote)) {
640 spin_unlock_irqrestore(&kcov_remote_lock,
641 flags);
642 kcov_disable(t, kcov);
643 return PTR_ERR(remote);
644 }
645 }
646 if (remote_arg->common_handle) {
647 if (!kcov_check_handle(remote_arg->common_handle,
648 true, false, false)) {
649 spin_unlock_irqrestore(&kcov_remote_lock,
650 flags);
651 kcov_disable(t, kcov);
652 return -EINVAL;
653 }
654 remote = kcov_remote_add(kcov,
655 remote_arg->common_handle);
656 if (IS_ERR(remote)) {
657 spin_unlock_irqrestore(&kcov_remote_lock,
658 flags);
659 kcov_disable(t, kcov);
660 return PTR_ERR(remote);
661 }
662 t->kcov_handle = remote_arg->common_handle;
663 }
664 spin_unlock_irqrestore(&kcov_remote_lock, flags);
665 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
666 kcov_get(kcov);
667 return 0;
668 default:
669 return -ENOTTY;
670 }
671 }
672
kcov_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)673 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
674 {
675 struct kcov *kcov;
676 int res;
677 struct kcov_remote_arg *remote_arg = NULL;
678 unsigned int remote_num_handles;
679 unsigned long remote_arg_size;
680 unsigned long size, flags;
681 void *area;
682
683 kcov = filep->private_data;
684 switch (cmd) {
685 case KCOV_INIT_TRACE:
686 /*
687 * Enable kcov in trace mode and setup buffer size.
688 * Must happen before anything else.
689 *
690 * First check the size argument - it must be at least 2
691 * to hold the current position and one PC.
692 */
693 size = arg;
694 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
695 return -EINVAL;
696 area = vmalloc_user(size * sizeof(unsigned long));
697 if (area == NULL)
698 return -ENOMEM;
699 spin_lock_irqsave(&kcov->lock, flags);
700 if (kcov->mode != KCOV_MODE_DISABLED) {
701 spin_unlock_irqrestore(&kcov->lock, flags);
702 vfree(area);
703 return -EBUSY;
704 }
705 kcov->area = area;
706 kcov->size = size;
707 kcov->mode = KCOV_MODE_INIT;
708 spin_unlock_irqrestore(&kcov->lock, flags);
709 return 0;
710 case KCOV_REMOTE_ENABLE:
711 if (get_user(remote_num_handles, (unsigned __user *)(arg +
712 offsetof(struct kcov_remote_arg, num_handles))))
713 return -EFAULT;
714 if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
715 return -EINVAL;
716 remote_arg_size = struct_size(remote_arg, handles,
717 remote_num_handles);
718 remote_arg = memdup_user((void __user *)arg, remote_arg_size);
719 if (IS_ERR(remote_arg))
720 return PTR_ERR(remote_arg);
721 if (remote_arg->num_handles != remote_num_handles) {
722 kfree(remote_arg);
723 return -EINVAL;
724 }
725 arg = (unsigned long)remote_arg;
726 fallthrough;
727 default:
728 /*
729 * All other commands can be normally executed under a spin lock, so we
730 * obtain and release it here in order to simplify kcov_ioctl_locked().
731 */
732 spin_lock_irqsave(&kcov->lock, flags);
733 res = kcov_ioctl_locked(kcov, cmd, arg);
734 spin_unlock_irqrestore(&kcov->lock, flags);
735 kfree(remote_arg);
736 return res;
737 }
738 }
739
740 static const struct file_operations kcov_fops = {
741 .open = kcov_open,
742 .unlocked_ioctl = kcov_ioctl,
743 .compat_ioctl = kcov_ioctl,
744 .mmap = kcov_mmap,
745 .release = kcov_close,
746 };
747
748 /*
749 * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
750 * of code in a kernel background thread or in a softirq to allow kcov to be
751 * used to collect coverage from that part of code.
752 *
753 * The handle argument of kcov_remote_start() identifies a code section that is
754 * used for coverage collection. A userspace process passes this handle to
755 * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting
756 * coverage for the code section identified by this handle.
757 *
758 * The usage of these annotations in the kernel code is different depending on
759 * the type of the kernel thread whose code is being annotated.
760 *
761 * For global kernel threads that are spawned in a limited number of instances
762 * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for
763 * softirqs, each instance must be assigned a unique 4-byte instance id. The
764 * instance id is then combined with a 1-byte subsystem id to get a handle via
765 * kcov_remote_handle(subsystem_id, instance_id).
766 *
767 * For local kernel threads that are spawned from system calls handler when a
768 * user interacts with some kernel interface (e.g. vhost workers), a handle is
769 * passed from a userspace process as the common_handle field of the
770 * kcov_remote_arg struct (note, that the user must generate a handle by using
771 * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
772 * arbitrary 4-byte non-zero number as the instance id). This common handle
773 * then gets saved into the task_struct of the process that issued the
774 * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn
775 * kernel threads, the common handle must be retrieved via kcov_common_handle()
776 * and passed to the spawned threads via custom annotations. Those kernel
777 * threads must in turn be annotated with kcov_remote_start(common_handle) and
778 * kcov_remote_stop(). All of the threads that are spawned by the same process
779 * obtain the same handle, hence the name "common".
780 *
781 * See Documentation/dev-tools/kcov.rst for more details.
782 *
783 * Internally, kcov_remote_start() looks up the kcov device associated with the
784 * provided handle, allocates an area for coverage collection, and saves the
785 * pointers to kcov and area into the current task_struct to allow coverage to
786 * be collected via __sanitizer_cov_trace_pc().
787 * In turns kcov_remote_stop() clears those pointers from task_struct to stop
788 * collecting coverage and copies all collected coverage into the kcov area.
789 */
790
kcov_mode_enabled(unsigned int mode)791 static inline bool kcov_mode_enabled(unsigned int mode)
792 {
793 return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED;
794 }
795
kcov_remote_softirq_start(struct task_struct * t)796 static void kcov_remote_softirq_start(struct task_struct *t)
797 {
798 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
799 unsigned int mode;
800
801 mode = READ_ONCE(t->kcov_mode);
802 barrier();
803 if (kcov_mode_enabled(mode)) {
804 data->saved_mode = mode;
805 data->saved_size = t->kcov_size;
806 data->saved_area = t->kcov_area;
807 data->saved_sequence = t->kcov_sequence;
808 data->saved_kcov = t->kcov;
809 kcov_stop(t);
810 }
811 }
812
kcov_remote_softirq_stop(struct task_struct * t)813 static void kcov_remote_softirq_stop(struct task_struct *t)
814 {
815 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
816
817 if (data->saved_kcov) {
818 kcov_start(t, data->saved_kcov, data->saved_size,
819 data->saved_area, data->saved_mode,
820 data->saved_sequence);
821 data->saved_mode = 0;
822 data->saved_size = 0;
823 data->saved_area = NULL;
824 data->saved_sequence = 0;
825 data->saved_kcov = NULL;
826 }
827 }
828
kcov_remote_start(u64 handle)829 void kcov_remote_start(u64 handle)
830 {
831 struct task_struct *t = current;
832 struct kcov_remote *remote;
833 struct kcov *kcov;
834 unsigned int mode;
835 void *area;
836 unsigned int size;
837 int sequence;
838 unsigned long flags;
839
840 if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
841 return;
842 if (!in_task() && !in_serving_softirq())
843 return;
844
845 local_lock_irqsave(&kcov_percpu_data.lock, flags);
846
847 /*
848 * Check that kcov_remote_start() is not called twice in background
849 * threads nor called by user tasks (with enabled kcov).
850 */
851 mode = READ_ONCE(t->kcov_mode);
852 if (WARN_ON(in_task() && kcov_mode_enabled(mode))) {
853 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
854 return;
855 }
856 /*
857 * Check that kcov_remote_start() is not called twice in softirqs.
858 * Note, that kcov_remote_start() can be called from a softirq that
859 * happened while collecting coverage from a background thread.
860 */
861 if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) {
862 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
863 return;
864 }
865
866 spin_lock(&kcov_remote_lock);
867 remote = kcov_remote_find(handle);
868 if (!remote) {
869 spin_unlock(&kcov_remote_lock);
870 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
871 return;
872 }
873 kcov_debug("handle = %llx, context: %s\n", handle,
874 in_task() ? "task" : "softirq");
875 kcov = remote->kcov;
876 /* Put in kcov_remote_stop(). */
877 kcov_get(kcov);
878 /*
879 * Read kcov fields before unlock to prevent races with
880 * KCOV_DISABLE / kcov_remote_reset().
881 */
882 mode = kcov->mode;
883 sequence = kcov->sequence;
884 if (in_task()) {
885 size = kcov->remote_size;
886 area = kcov_remote_area_get(size);
887 } else {
888 size = CONFIG_KCOV_IRQ_AREA_SIZE;
889 area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
890 }
891 spin_unlock(&kcov_remote_lock);
892
893 /* Can only happen when in_task(). */
894 if (!area) {
895 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
896 area = vmalloc(size * sizeof(unsigned long));
897 if (!area) {
898 kcov_put(kcov);
899 return;
900 }
901 local_lock_irqsave(&kcov_percpu_data.lock, flags);
902 }
903
904 /* Reset coverage size. */
905 *(u64 *)area = 0;
906
907 if (in_serving_softirq()) {
908 kcov_remote_softirq_start(t);
909 t->kcov_softirq = 1;
910 }
911 kcov_start(t, kcov, size, area, mode, sequence);
912
913 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
914
915 }
916 EXPORT_SYMBOL(kcov_remote_start);
917
kcov_move_area(enum kcov_mode mode,void * dst_area,unsigned int dst_area_size,void * src_area)918 static void kcov_move_area(enum kcov_mode mode, void *dst_area,
919 unsigned int dst_area_size, void *src_area)
920 {
921 u64 word_size = sizeof(unsigned long);
922 u64 count_size, entry_size_log;
923 u64 dst_len, src_len;
924 void *dst_entries, *src_entries;
925 u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
926
927 kcov_debug("%px %u <= %px %lu\n",
928 dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
929
930 switch (mode) {
931 case KCOV_MODE_TRACE_PC:
932 dst_len = READ_ONCE(*(unsigned long *)dst_area);
933 src_len = *(unsigned long *)src_area;
934 count_size = sizeof(unsigned long);
935 entry_size_log = __ilog2_u64(sizeof(unsigned long));
936 break;
937 case KCOV_MODE_TRACE_CMP:
938 dst_len = READ_ONCE(*(u64 *)dst_area);
939 src_len = *(u64 *)src_area;
940 count_size = sizeof(u64);
941 BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
942 entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
943 break;
944 default:
945 WARN_ON(1);
946 return;
947 }
948
949 /* As arm can't divide u64 integers use log of entry size. */
950 if (dst_len > ((dst_area_size * word_size - count_size) >>
951 entry_size_log))
952 return;
953 dst_occupied = count_size + (dst_len << entry_size_log);
954 dst_free = dst_area_size * word_size - dst_occupied;
955 bytes_to_move = min(dst_free, src_len << entry_size_log);
956 dst_entries = dst_area + dst_occupied;
957 src_entries = src_area + count_size;
958 memcpy(dst_entries, src_entries, bytes_to_move);
959 entries_moved = bytes_to_move >> entry_size_log;
960
961 switch (mode) {
962 case KCOV_MODE_TRACE_PC:
963 WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
964 break;
965 case KCOV_MODE_TRACE_CMP:
966 WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
967 break;
968 default:
969 break;
970 }
971 }
972
973 /* See the comment before kcov_remote_start() for usage details. */
kcov_remote_stop(void)974 void kcov_remote_stop(void)
975 {
976 struct task_struct *t = current;
977 struct kcov *kcov;
978 unsigned int mode;
979 void *area;
980 unsigned int size;
981 int sequence;
982 unsigned long flags;
983
984 if (!in_task() && !in_serving_softirq())
985 return;
986
987 local_lock_irqsave(&kcov_percpu_data.lock, flags);
988
989 mode = READ_ONCE(t->kcov_mode);
990 barrier();
991 if (!kcov_mode_enabled(mode)) {
992 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
993 return;
994 }
995 /*
996 * When in softirq, check if the corresponding kcov_remote_start()
997 * actually found the remote handle and started collecting coverage.
998 */
999 if (in_serving_softirq() && !t->kcov_softirq) {
1000 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1001 return;
1002 }
1003 /* Make sure that kcov_softirq is only set when in softirq. */
1004 if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
1005 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1006 return;
1007 }
1008
1009 kcov = t->kcov;
1010 area = t->kcov_area;
1011 size = t->kcov_size;
1012 sequence = t->kcov_sequence;
1013
1014 kcov_stop(t);
1015 if (in_serving_softirq()) {
1016 t->kcov_softirq = 0;
1017 kcov_remote_softirq_stop(t);
1018 }
1019
1020 spin_lock(&kcov->lock);
1021 /*
1022 * KCOV_DISABLE could have been called between kcov_remote_start()
1023 * and kcov_remote_stop(), hence the sequence check.
1024 */
1025 if (sequence == kcov->sequence && kcov->remote)
1026 kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
1027 spin_unlock(&kcov->lock);
1028
1029 if (in_task()) {
1030 spin_lock(&kcov_remote_lock);
1031 kcov_remote_area_put(area, size);
1032 spin_unlock(&kcov_remote_lock);
1033 }
1034
1035 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1036
1037 /* Get in kcov_remote_start(). */
1038 kcov_put(kcov);
1039 }
1040 EXPORT_SYMBOL(kcov_remote_stop);
1041
1042 /* See the comment before kcov_remote_start() for usage details. */
kcov_common_handle(void)1043 u64 kcov_common_handle(void)
1044 {
1045 if (!in_task())
1046 return 0;
1047 return current->kcov_handle;
1048 }
1049 EXPORT_SYMBOL(kcov_common_handle);
1050
kcov_init(void)1051 static int __init kcov_init(void)
1052 {
1053 int cpu;
1054
1055 for_each_possible_cpu(cpu) {
1056 void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE *
1057 sizeof(unsigned long), cpu_to_node(cpu));
1058 if (!area)
1059 return -ENOMEM;
1060 per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
1061 }
1062
1063 /*
1064 * The kcov debugfs file won't ever get removed and thus,
1065 * there is no need to protect it against removal races. The
1066 * use of debugfs_create_file_unsafe() is actually safe here.
1067 */
1068 debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
1069
1070 return 0;
1071 }
1072
1073 device_initcall(kcov_init);
1074