xref: /netbsd-src/sys/kern/subr_kcov.c (revision 6ce322d076f3bf9050d6879387c9764006e3d9ca)
1 /*	$NetBSD: subr_kcov.c,v 1.18 2022/10/26 23:24:21 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2019-2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Siddharth Muralee.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 
34 #include <sys/module.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 
39 #include <sys/conf.h>
40 #include <sys/condvar.h>
41 #include <sys/file.h>
42 #include <sys/filedesc.h>
43 #include <sys/kmem.h>
44 #include <sys/mman.h>
45 #include <sys/mutex.h>
46 #include <sys/queue.h>
47 
48 #include <uvm/uvm_extern.h>
49 #include <sys/kcov.h>
50 
51 #define KCOV_BUF_MAX_ENTRIES	(256 << 10)
52 
53 #define KCOV_CMP_CONST		1
54 #define KCOV_CMP_SIZE(x)	((x) << 1)
55 
56 static dev_type_open(kcov_open);
57 
58 const struct cdevsw kcov_cdevsw = {
59 	.d_open = kcov_open,
60 	.d_close = noclose,
61 	.d_read = noread,
62 	.d_write = nowrite,
63 	.d_ioctl = noioctl,
64 	.d_stop = nostop,
65 	.d_tty = notty,
66 	.d_poll = nopoll,
67 	.d_mmap = nommap,
68 	.d_kqfilter = nokqfilter,
69 	.d_discard = nodiscard,
70 	.d_flag = D_OTHER | D_MPSAFE
71 };
72 
73 static int kcov_fops_ioctl(file_t *, u_long, void *);
74 static int kcov_fops_close(file_t *);
75 static int kcov_fops_mmap(file_t *, off_t *, size_t, int, int *, int *,
76     struct uvm_object **, int *);
77 
78 const struct fileops kcov_fileops = {
79 	.fo_read = fbadop_read,
80 	.fo_write = fbadop_write,
81 	.fo_ioctl = kcov_fops_ioctl,
82 	.fo_fcntl = fnullop_fcntl,
83 	.fo_poll = fnullop_poll,
84 	.fo_stat = fbadop_stat,
85 	.fo_close = kcov_fops_close,
86 	.fo_kqfilter = fnullop_kqfilter,
87 	.fo_restart = fnullop_restart,
88 	.fo_mmap = kcov_fops_mmap,
89 };
90 
91 /*
92  * The KCOV descriptors (KD) are allocated during open(), and are associated
93  * with a file descriptor.
94  *
95  * An LWP can 'enable' a KD. When this happens, this LWP becomes the owner of
96  * the KD, and no LWP can 'disable' this KD except the owner.
97  *
98  * A KD is freed when its file descriptor is closed _iff_ the KD is not active
99  * on an LWP. If it is, we ask the LWP to free it when it exits.
100  *
101  * The buffers mmapped are in a dedicated uobj, therefore there is no risk
102  * that the kernel frees a buffer still mmapped in a process: the uobj
103  * refcount will be non-zero, so the backing is not freed until an munmap
104  * occurs on said process.
105  */
106 
107 typedef struct kcov_desc {
108 	/* Local only */
109 	kmutex_t lock;
110 	bool lwpfree;
111 	bool silenced;
112 
113 	/* Pointer to the end of the structure, if any */
114 	struct kcov_desc *remote;
115 
116 	/* Can be remote */
117 	kcov_int_t *buf;
118 	struct uvm_object *uobj;
119 	size_t bufnent;
120 	size_t bufsize;
121 	int mode;
122 	bool enabled;
123 } kcov_t;
124 
125 /* -------------------------------------------------------------------------- */
126 
127 static void
kcov_lock(kcov_t * kd)128 kcov_lock(kcov_t *kd)
129 {
130 
131 	mutex_enter(&kd->lock);
132 }
133 
134 static void
kcov_unlock(kcov_t * kd)135 kcov_unlock(kcov_t *kd)
136 {
137 
138 	mutex_exit(&kd->lock);
139 }
140 
141 static bool
kcov_mode_is_valid(int mode)142 kcov_mode_is_valid(int mode)
143 {
144 	switch (mode) {
145 	case KCOV_MODE_NONE:
146 	case KCOV_MODE_TRACE_PC:
147 	case KCOV_MODE_TRACE_CMP:
148 		return true;
149 	default:
150 		return false;
151 	}
152 }
153 
154 /* -------------------------------------------------------------------------- */
155 
156 static void
kcov_free(kcov_t * kd)157 kcov_free(kcov_t *kd)
158 {
159 
160 	KASSERT(kd != NULL);
161 	if (kd->buf != NULL) {
162 		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, kd->bufsize);
163 	}
164 	mutex_destroy(&kd->lock);
165 	kmem_free(kd, sizeof(*kd));
166 }
167 
168 void
kcov_lwp_free(struct lwp * l)169 kcov_lwp_free(struct lwp *l)
170 {
171 	kcov_t *kd = (kcov_t *)l->l_kcov;
172 
173 	if (kd == NULL) {
174 		return;
175 	}
176 	kcov_lock(kd);
177 	kd->enabled = false;
178 	kcov_unlock(kd);
179 	if (kd->lwpfree) {
180 		kcov_free(kd);
181 	}
182 }
183 
184 static int
kcov_allocbuf(kcov_t * kd,uint64_t nent)185 kcov_allocbuf(kcov_t *kd, uint64_t nent)
186 {
187 	size_t size;
188 	int error;
189 
190 	if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES)
191 		return EINVAL;
192 	if (kd->buf != NULL)
193 		return EEXIST;
194 
195 	size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE);
196 	kd->bufnent = nent - 1;
197 	kd->bufsize = size;
198 	kd->uobj = uao_create(kd->bufsize, 0);
199 
200 	/* Map the uobj into the kernel address space, as wired. */
201 	kd->buf = NULL;
202 	error = uvm_map(kernel_map, (vaddr_t *)&kd->buf, kd->bufsize, kd->uobj,
203 	    0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
204 	    UVM_ADV_RANDOM, 0));
205 	if (error) {
206 		uao_detach(kd->uobj);
207 		return error;
208 	}
209 	error = uvm_map_pageable(kernel_map, (vaddr_t)kd->buf,
210 	    (vaddr_t)kd->buf + size, false, 0);
211 	if (error) {
212 		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, size);
213 		return error;
214 	}
215 
216 	return 0;
217 }
218 
219 /* -------------------------------------------------------------------------- */
220 
221 typedef struct kcov_remote {
222 	LIST_ENTRY(kcov_remote) list;
223 	uint64_t subsystem;
224 	uint64_t id;
225 	u_int refcount;
226 	kcov_t kd;
227 } kcov_remote_t;
228 
229 typedef LIST_HEAD(, kcov_remote) kcov_remote_list_t;
230 
231 static kcov_remote_list_t kcov_remote_list;
232 
233 static kcov_remote_t *
kcov_remote_find(uint64_t subsystem,uint64_t id)234 kcov_remote_find(uint64_t subsystem, uint64_t id)
235 {
236 	kcov_remote_t *kr;
237 
238 	LIST_FOREACH(kr, &kcov_remote_list, list) {
239 		if (kr->subsystem == subsystem && kr->id == id)
240 			return kr;
241 	}
242 
243 	return NULL;
244 }
245 
246 void
kcov_remote_register(uint64_t subsystem,uint64_t id)247 kcov_remote_register(uint64_t subsystem, uint64_t id)
248 {
249 	kcov_remote_t *kr;
250 	kcov_t *kd;
251 	int error;
252 
253 	if (kcov_remote_find(subsystem, id) != NULL) {
254 		panic("%s: kr already exists", __func__);
255 	}
256 
257 	kr = kmem_zalloc(sizeof(*kr), KM_SLEEP);
258 	kr->subsystem = subsystem;
259 	kr->id = id;
260 	kr->refcount = 0;
261 	kd = &kr->kd;
262 
263 	mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
264 	error = kcov_allocbuf(kd, KCOV_BUF_MAX_ENTRIES);
265 	if (error != 0)
266 		panic("%s: failed to allocate buffer", __func__);
267 
268 	LIST_INSERT_HEAD(&kcov_remote_list, kr, list);
269 }
270 
271 void
kcov_remote_enter(uint64_t subsystem,uint64_t id)272 kcov_remote_enter(uint64_t subsystem, uint64_t id)
273 {
274 	struct lwp *l = curlwp;
275 	kcov_remote_t *kr;
276 	kcov_t *kd;
277 	u_int refs __diagused;
278 
279 	kr = kcov_remote_find(subsystem, id);
280 	if (__predict_false(kr == NULL)) {
281 		panic("%s: unable to find kr", __func__);
282 	}
283 
284 	refs = atomic_inc_uint_nv(&kr->refcount);
285 	KASSERT(refs == 1);
286 
287 	KASSERT(l->l_kcov == NULL);
288 	kd = &kr->kd;
289 	if (atomic_load_relaxed(&kd->enabled)) {
290 		l->l_kcov = kd;
291 	}
292 }
293 
294 void
kcov_remote_leave(uint64_t subsystem,uint64_t id)295 kcov_remote_leave(uint64_t subsystem, uint64_t id)
296 {
297 	struct lwp *l = curlwp;
298 	kcov_remote_t *kr;
299 	u_int refs __diagused;
300 
301 	kr = kcov_remote_find(subsystem, id);
302 	if (__predict_false(kr == NULL)) {
303 		panic("%s: unable to find kr", __func__);
304 	}
305 
306 	refs = atomic_dec_uint_nv(&kr->refcount);
307 	KASSERT(refs == 0);
308 
309 	l->l_kcov = NULL;
310 }
311 
312 static int
kcov_remote_enable(kcov_t * kd,int mode)313 kcov_remote_enable(kcov_t *kd, int mode)
314 {
315 	kcov_lock(kd);
316 	if (kd->enabled) {
317 		kcov_unlock(kd);
318 		return EBUSY;
319 	}
320 	kd->mode = mode;
321 	atomic_store_relaxed(&kd->enabled, true);
322 	kcov_unlock(kd);
323 
324 	return 0;
325 }
326 
327 static int
kcov_remote_disable(kcov_t * kd)328 kcov_remote_disable(kcov_t *kd)
329 {
330 	kcov_lock(kd);
331 	if (!kd->enabled) {
332 		kcov_unlock(kd);
333 		return ENOENT;
334 	}
335 	atomic_store_relaxed(&kd->enabled, false);
336 	kcov_unlock(kd);
337 
338 	return 0;
339 }
340 
341 static int
kcov_remote_attach(kcov_t * kd,struct kcov_ioc_remote_attach * args)342 kcov_remote_attach(kcov_t *kd, struct kcov_ioc_remote_attach *args)
343 {
344 	kcov_remote_t *kr;
345 
346 	if (kd->enabled)
347 		return EEXIST;
348 
349 	kr = kcov_remote_find(args->subsystem, args->id);
350 	if (kr == NULL)
351 		return ENOENT;
352 	kd->remote = &kr->kd;
353 
354 	return 0;
355 }
356 
357 static int
kcov_remote_detach(kcov_t * kd)358 kcov_remote_detach(kcov_t *kd)
359 {
360 	if (kd->enabled)
361 		return EEXIST;
362 	if (kd->remote == NULL)
363 		return ENOENT;
364 	(void)kcov_remote_disable(kd->remote);
365 	kd->remote = NULL;
366 	return 0;
367 }
368 
369 /* -------------------------------------------------------------------------- */
370 
371 static int
kcov_setbufsize(kcov_t * kd,uint64_t * args)372 kcov_setbufsize(kcov_t *kd, uint64_t *args)
373 {
374 	if (kd->remote != NULL)
375 		return 0; /* buffer allocated remotely */
376 	if (kd->enabled)
377 		return EBUSY;
378 	return kcov_allocbuf(kd, *((uint64_t *)args));
379 }
380 
381 static int
kcov_enable(kcov_t * kd,uint64_t * args)382 kcov_enable(kcov_t *kd, uint64_t *args)
383 {
384 	struct lwp *l = curlwp;
385 	int mode;
386 
387 	mode = *((int *)args);
388 	if (!kcov_mode_is_valid(mode))
389 		return EINVAL;
390 
391 	if (kd->remote != NULL)
392 		return kcov_remote_enable(kd->remote, mode);
393 
394 	if (kd->enabled)
395 		return EBUSY;
396 	if (l->l_kcov != NULL)
397 		return EBUSY;
398 	if (kd->buf == NULL)
399 		return ENOBUFS;
400 
401 	l->l_kcov = kd;
402 	kd->mode = mode;
403 	kd->enabled = true;
404 	return 0;
405 }
406 
407 static int
kcov_disable(kcov_t * kd)408 kcov_disable(kcov_t *kd)
409 {
410 	struct lwp *l = curlwp;
411 
412 	if (kd->remote != NULL)
413 		return kcov_remote_disable(kd->remote);
414 
415 	if (!kd->enabled)
416 		return ENOENT;
417 	if (l->l_kcov != kd)
418 		return ENOENT;
419 
420 	l->l_kcov = NULL;
421 	kd->enabled = false;
422 	return 0;
423 }
424 
425 /* -------------------------------------------------------------------------- */
426 
427 void
kcov_silence_enter(void)428 kcov_silence_enter(void)
429 {
430 	kcov_t *kd = curlwp->l_kcov;
431 
432 	if (kd != NULL)
433 		kd->silenced = true;
434 }
435 
436 void
kcov_silence_leave(void)437 kcov_silence_leave(void)
438 {
439 	kcov_t *kd = curlwp->l_kcov;
440 
441 	if (kd != NULL)
442 		kd->silenced = false;
443 }
444 
445 /* -------------------------------------------------------------------------- */
446 
447 static int
kcov_open(dev_t dev,int flag,int mode,struct lwp * l)448 kcov_open(dev_t dev, int flag, int mode, struct lwp *l)
449 {
450 	struct file *fp;
451 	int error, fd;
452 	kcov_t *kd;
453 
454 	error = fd_allocfile(&fp, &fd);
455 	if (error)
456 		return error;
457 
458 	kd = kmem_zalloc(sizeof(*kd), KM_SLEEP);
459 	mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
460 
461 	return fd_clone(fp, fd, flag, &kcov_fileops, kd);
462 }
463 
464 static int
kcov_fops_close(file_t * fp)465 kcov_fops_close(file_t *fp)
466 {
467 	kcov_t *kd = fp->f_data;
468 
469 	kcov_lock(kd);
470 	if (kd->remote != NULL)
471 		(void)kcov_remote_disable(kd->remote);
472 	if (kd->enabled) {
473 		kd->lwpfree = true;
474 		kcov_unlock(kd);
475 	} else {
476 		kcov_unlock(kd);
477 		kcov_free(kd);
478 	}
479 	fp->f_data = NULL;
480 
481    	return 0;
482 }
483 
484 static int
kcov_fops_ioctl(file_t * fp,u_long cmd,void * addr)485 kcov_fops_ioctl(file_t *fp, u_long cmd, void *addr)
486 {
487 	kcov_t *kd;
488 	int error;
489 
490 	kd = fp->f_data;
491 	if (kd == NULL)
492 		return ENXIO;
493 	kcov_lock(kd);
494 
495 	switch (cmd) {
496 	case KCOV_IOC_SETBUFSIZE:
497 		error = kcov_setbufsize(kd, addr);
498 		break;
499 	case KCOV_IOC_ENABLE:
500 		error = kcov_enable(kd, addr);
501 		break;
502 	case KCOV_IOC_DISABLE:
503 		error = kcov_disable(kd);
504 		break;
505 	case KCOV_IOC_REMOTE_ATTACH:
506 		error = kcov_remote_attach(kd, addr);
507 		break;
508 	case KCOV_IOC_REMOTE_DETACH:
509 		error = kcov_remote_detach(kd);
510 		break;
511 	default:
512 		error = EINVAL;
513 	}
514 
515 	kcov_unlock(kd);
516 	return error;
517 }
518 
519 static int
kcov_fops_mmap(file_t * fp,off_t * offp,size_t size,int prot,int * flagsp,int * advicep,struct uvm_object ** uobjp,int * maxprotp)520 kcov_fops_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
521     int *advicep, struct uvm_object **uobjp, int *maxprotp)
522 {
523 	off_t off = *offp;
524 	kcov_t *kd, *kdbuf;
525 	int error = 0;
526 
527 	KASSERT(size > 0);
528 
529 	if (prot & PROT_EXEC)
530 		return EACCES;
531 	if (off < 0)
532 		return EINVAL;
533 	if (size > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
534 		return EINVAL;
535 	if (off > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
536 		return EINVAL;
537 
538 	kd = fp->f_data;
539 	if (kd == NULL)
540 		return ENXIO;
541 	kcov_lock(kd);
542 
543 	if (kd->remote != NULL)
544 		kdbuf = kd->remote;
545 	else
546 		kdbuf = kd;
547 
548 	if ((size + off) > kdbuf->bufsize) {
549 		error = ENOMEM;
550 		goto out;
551 	}
552 
553 	uao_reference(kdbuf->uobj);
554 
555 	*uobjp = kdbuf->uobj;
556 	*maxprotp = prot;
557 	*advicep = UVM_ADV_RANDOM;
558 
559 out:
560 	kcov_unlock(kd);
561 	return error;
562 }
563 
564 /* -------------------------------------------------------------------------- */
565 
566 /*
567  * Constraints on the functions here: they must be marked with __nomsan, and
568  * must not make any external call.
569  */
570 
571 static inline bool __nomsan
in_interrupt(void)572 in_interrupt(void)
573 {
574 	return curcpu()->ci_idepth >= 0;
575 }
576 
577 void __sanitizer_cov_trace_pc(void);
578 
579 void __nomsan
__sanitizer_cov_trace_pc(void)580 __sanitizer_cov_trace_pc(void)
581 {
582 	uint64_t idx;
583 	kcov_t *kd;
584 
585 	if (__predict_false(cold)) {
586 		/* Do not trace during boot. */
587 		return;
588 	}
589 
590 	if (in_interrupt()) {
591 		/* Do not trace in interrupts. */
592 		return;
593 	}
594 
595 	kd = curlwp->l_kcov;
596 	if (__predict_true(kd == NULL)) {
597 		/* Not traced. */
598 		return;
599 	}
600 
601 	if (!kd->enabled) {
602 		/* Tracing not enabled */
603 		return;
604 	}
605 
606 	if (__predict_false(kd->silenced)) {
607 		/* Silenced. */
608 		return;
609 	}
610 
611 	if (kd->mode != KCOV_MODE_TRACE_PC) {
612 		/* PC tracing mode not enabled */
613 		return;
614 	}
615 	KASSERT(kd->remote == NULL);
616 
617 	idx = kd->buf[0];
618 	if (idx < kd->bufnent) {
619 		kd->buf[idx+1] =
620 		    (intptr_t)__builtin_return_address(0);
621 		kd->buf[0] = idx + 1;
622 	}
623 }
624 
625 static void __nomsan
trace_cmp(uint64_t type,uint64_t arg1,uint64_t arg2,intptr_t pc)626 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, intptr_t pc)
627 {
628 	uint64_t idx;
629 	kcov_t *kd;
630 
631 	if (__predict_false(cold)) {
632 		/* Do not trace during boot. */
633 		return;
634 	}
635 
636 	if (in_interrupt()) {
637 		/* Do not trace in interrupts. */
638 		return;
639 	}
640 
641 	kd = curlwp->l_kcov;
642 	if (__predict_true(kd == NULL)) {
643 		/* Not traced. */
644 		return;
645 	}
646 
647 	if (!kd->enabled) {
648 		/* Tracing not enabled */
649 		return;
650 	}
651 
652 	if (__predict_false(kd->silenced)) {
653 		/* Silenced. */
654 		return;
655 	}
656 
657 	if (kd->mode != KCOV_MODE_TRACE_CMP) {
658 		/* CMP tracing mode not enabled */
659 		return;
660 	}
661 	KASSERT(kd->remote == NULL);
662 
663 	idx = kd->buf[0];
664 	if ((idx * 4 + 4) <= kd->bufnent) {
665 		kd->buf[idx * 4 + 1] = type;
666 		kd->buf[idx * 4 + 2] = arg1;
667 		kd->buf[idx * 4 + 3] = arg2;
668 		kd->buf[idx * 4 + 4] = pc;
669 		kd->buf[0] = idx + 1;
670 	}
671 }
672 
673 void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2);
674 
675 void __nomsan
__sanitizer_cov_trace_cmp1(uint8_t arg1,uint8_t arg2)676 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
677 {
678 
679 	trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
680 	    (intptr_t)__builtin_return_address(0));
681 }
682 
683 void __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2);
684 
685 void __nomsan
__sanitizer_cov_trace_cmp2(uint16_t arg1,uint16_t arg2)686 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
687 {
688 
689 	trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
690 	    (intptr_t)__builtin_return_address(0));
691 }
692 
693 void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2);
694 
695 void __nomsan
__sanitizer_cov_trace_cmp4(uint32_t arg1,uint32_t arg2)696 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
697 {
698 
699 	trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
700 	    (intptr_t)__builtin_return_address(0));
701 }
702 
703 void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2);
704 
705 void __nomsan
__sanitizer_cov_trace_cmp8(uint64_t arg1,uint64_t arg2)706 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
707 {
708 
709 	trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
710 	    (intptr_t)__builtin_return_address(0));
711 }
712 
713 void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2);
714 
715 void __nomsan
__sanitizer_cov_trace_const_cmp1(uint8_t arg1,uint8_t arg2)716 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
717 {
718 
719 	trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
720 	    (intptr_t)__builtin_return_address(0));
721 }
722 
723 void __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2);
724 
725 void __nomsan
__sanitizer_cov_trace_const_cmp2(uint16_t arg1,uint16_t arg2)726 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
727 {
728 
729 	trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
730 	    (intptr_t)__builtin_return_address(0));
731 }
732 
733 void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2);
734 
735 void __nomsan
__sanitizer_cov_trace_const_cmp4(uint32_t arg1,uint32_t arg2)736 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
737 {
738 
739 	trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
740 	    (intptr_t)__builtin_return_address(0));
741 }
742 
743 void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2);
744 
745 void __nomsan
__sanitizer_cov_trace_const_cmp8(uint64_t arg1,uint64_t arg2)746 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
747 {
748 
749 	trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
750 	    (intptr_t)__builtin_return_address(0));
751 }
752 
753 void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases);
754 
755 void __nomsan
__sanitizer_cov_trace_switch(uint64_t val,uint64_t * cases)756 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
757 {
758 	uint64_t i, nbits, ncases, type;
759 	intptr_t pc;
760 
761 	pc = (intptr_t)__builtin_return_address(0);
762 	ncases = cases[0];
763 	nbits = cases[1];
764 
765 	switch (nbits) {
766 	case 8:
767 		type = KCOV_CMP_SIZE(0);
768 		break;
769 	case 16:
770 		type = KCOV_CMP_SIZE(1);
771 		break;
772 	case 32:
773 		type = KCOV_CMP_SIZE(2);
774 		break;
775 	case 64:
776 		type = KCOV_CMP_SIZE(3);
777 		break;
778 	default:
779 		return;
780 	}
781 	type |= KCOV_CMP_CONST;
782 
783 	for (i = 0; i < ncases; i++)
784 		trace_cmp(type, cases[i + 2], val, pc);
785 }
786 
787 /* -------------------------------------------------------------------------- */
788 
789 MODULE(MODULE_CLASS_MISC, kcov, NULL);
790 
791 static int
kcov_modcmd(modcmd_t cmd,void * arg)792 kcov_modcmd(modcmd_t cmd, void *arg)
793 {
794 
795    	switch (cmd) {
796 	case MODULE_CMD_INIT:
797 		return 0;
798 	case MODULE_CMD_FINI:
799 		return EINVAL;
800 	default:
801 		return ENOTTY;
802 	}
803 }
804