xref: /openbsd-src/sys/dev/kcov.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /*	$OpenBSD: kcov.c,v 1.48 2022/01/19 06:46:55 anton Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/kcov.h>
23 #include <sys/malloc.h>
24 #include <sys/mutex.h>
25 #include <sys/pool.h>
26 #include <sys/stdint.h>
27 #include <sys/queue.h>
28 
29 /* kcov_vnode() */
30 #include <sys/conf.h>
31 #include <sys/vnode.h>
32 #include <sys/specdev.h>
33 
34 #include <uvm/uvm_extern.h>
35 
36 #define KCOV_BUF_MEMB_SIZE	sizeof(uintptr_t)
37 #define KCOV_BUF_MAX_NMEMB	(256 << 10)
38 
39 #define KCOV_CMP_CONST		0x1
40 #define KCOV_CMP_SIZE(x)	((x) << 1)
41 
42 #define KCOV_STATE_NONE		0
43 #define KCOV_STATE_READY	1
44 #define KCOV_STATE_TRACE	2
45 #define KCOV_STATE_DYING	3
46 
47 #define KCOV_STRIDE_TRACE_PC	1
48 #define KCOV_STRIDE_TRACE_CMP	4
49 
50 /*
51  * Coverage structure.
52  *
53  * Locking:
54  * 	I	immutable after creation
55  *	M	kcov_mtx
56  *	a	atomic operations
57  */
58 struct kcov_dev {
59 	int		 kd_state;	/* [M] */
60 	int		 kd_mode;	/* [M] */
61 	int		 kd_unit;	/* [I] D_CLONE unique device minor */
62 	int		 kd_intr;	/* [M] currently used in interrupt */
63 	uintptr_t	*kd_buf;	/* [a] traced coverage */
64 	size_t		 kd_nmemb;	/* [I] */
65 	size_t		 kd_size;	/* [I] */
66 
67 	struct kcov_remote *kd_kr;	/* [M] */
68 
69 	TAILQ_ENTRY(kcov_dev)	kd_entry;	/* [M] */
70 };
71 
72 /*
73  * Remote coverage structure.
74  *
75  * Locking:
76  * 	I	immutable after creation
77  *	M	kcov_mtx
78  */
79 struct kcov_remote {
80 	struct kcov_dev *kr_kd;	/* [M] */
81 	void *kr_id;		/* [I] */
82 	int kr_subsystem;	/* [I] */
83 	int kr_nsections;	/* [M] # threads in remote section */
84 	int kr_state;		/* [M] */
85 
86 	TAILQ_ENTRY(kcov_remote) kr_entry;	/* [M] */
87 };
88 
89 /*
90  * Per CPU coverage structure used to track coverage when executing in a remote
91  * interrupt context.
92  *
93  * Locking:
94  * 	I	immutable after creation
95  *	M	kcov_mtx
96  */
97 struct kcov_cpu {
98 	struct kcov_dev  kc_kd;
99 	struct kcov_dev *kc_kd_save;	/* [M] previous kcov_dev */
100 	int kc_cpuid;			/* [I] cpu number */
101 
102 	TAILQ_ENTRY(kcov_cpu) kc_entry;	/* [I] */
103 };
104 
105 void kcovattach(int);
106 
107 int kd_init(struct kcov_dev *, unsigned long);
108 void kd_free(struct kcov_dev *);
109 struct kcov_dev *kd_lookup(int);
110 void kd_copy(struct kcov_dev *, struct kcov_dev *);
111 
112 struct kcov_remote *kcov_remote_register_locked(int, void *);
113 int kcov_remote_attach(struct kcov_dev *, struct kio_remote_attach *);
114 void kcov_remote_detach(struct kcov_dev *, struct kcov_remote *);
115 void kr_free(struct kcov_remote *);
116 void kr_barrier(struct kcov_remote *);
117 struct kcov_remote *kr_lookup(int, void *);
118 
119 static struct kcov_dev *kd_curproc(int);
120 static struct kcov_cpu *kd_curcpu(void);
121 static uint64_t kd_claim(struct kcov_dev *, int, int);
122 static inline int inintr(void);
123 
124 TAILQ_HEAD(, kcov_dev) kd_list = TAILQ_HEAD_INITIALIZER(kd_list);
125 TAILQ_HEAD(, kcov_remote) kr_list = TAILQ_HEAD_INITIALIZER(kr_list);
126 TAILQ_HEAD(, kcov_cpu) kc_list = TAILQ_HEAD_INITIALIZER(kc_list);
127 
128 int kcov_cold = 1;
129 int kr_cold = 1;
130 struct mutex kcov_mtx = MUTEX_INITIALIZER(IPL_MPFLOOR);
131 struct pool kr_pool;
132 
133 /*
134  * Compiling the kernel with the `-fsanitize-coverage=trace-pc' option will
135  * cause the following function to be called upon function entry and before
136  * each block instructions that maps to a single line in the original source
137  * code.
138  *
139  * If kcov is enabled for the current thread, the kernel program counter will
140  * be stored in its corresponding coverage buffer.
141  */
142 void
143 __sanitizer_cov_trace_pc(void)
144 {
145 	struct kcov_dev *kd;
146 	uint64_t idx;
147 
148 	kd = kd_curproc(KCOV_MODE_TRACE_PC);
149 	if (kd == NULL)
150 		return;
151 
152 	if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_PC, 1)))
153 		kd->kd_buf[idx] = (uintptr_t)__builtin_return_address(0);
154 }
155 
156 /*
157  * Compiling the kernel with the `-fsanitize-coverage=trace-cmp' option will
158  * cause the following function to be called upon integer comparisons and switch
159  * statements.
160  *
161  * If kcov is enabled for the current thread, the comparison will be stored in
162  * its corresponding coverage buffer.
163  */
164 void
165 trace_cmp(struct kcov_dev *kd, uint64_t type, uint64_t arg1, uint64_t arg2,
166     uintptr_t pc)
167 {
168 	uint64_t idx;
169 
170 	if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_CMP, 1))) {
171 		kd->kd_buf[idx] = type;
172 		kd->kd_buf[idx + 1] = arg1;
173 		kd->kd_buf[idx + 2] = arg2;
174 		kd->kd_buf[idx + 3] = pc;
175 	}
176 }
177 
178 #define TRACE_CMP(type, arg1, arg2) do {				\
179 	struct kcov_dev *kd;						\
180 	if ((kd = kd_curproc(KCOV_MODE_TRACE_CMP)) == NULL)		\
181 		return;							\
182 	trace_cmp(kd, (type), (arg1), (arg2),				\
183 	    (uintptr_t)__builtin_return_address(0));			\
184 } while (0)
185 
186 void
187 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
188 {
189 	TRACE_CMP(KCOV_CMP_SIZE(0), arg1, arg2);
190 }
191 
192 void
193 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
194 {
195 	TRACE_CMP(KCOV_CMP_SIZE(1), arg1, arg2);
196 }
197 
198 void
199 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
200 {
201 	TRACE_CMP(KCOV_CMP_SIZE(2), arg1, arg2);
202 }
203 
204 void
205 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
206 {
207 	TRACE_CMP(KCOV_CMP_SIZE(3), arg1, arg2);
208 }
209 
210 void
211 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
212 {
213 	TRACE_CMP(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2);
214 }
215 
216 void
217 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
218 {
219 	TRACE_CMP(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2);
220 }
221 
222 void
223 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
224 {
225 	TRACE_CMP(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2);
226 }
227 
228 void
229 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
230 {
231 	TRACE_CMP(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2);
232 }
233 
234 void
235 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
236 {
237 	struct kcov_dev *kd;
238 	uint64_t i, nbits, ncases, type;
239 	uintptr_t pc;
240 
241 	kd = kd_curproc(KCOV_MODE_TRACE_CMP);
242 	if (kd == NULL)
243 		return;
244 
245 	pc = (uintptr_t)__builtin_return_address(0);
246 	ncases = cases[0];
247 	nbits = cases[1];
248 
249 	switch (nbits) {
250 	case 8:
251 		type = KCOV_CMP_SIZE(0);
252 		break;
253 	case 16:
254 		type = KCOV_CMP_SIZE(1);
255 		break;
256 	case 32:
257 		type = KCOV_CMP_SIZE(2);
258 		break;
259 	case 64:
260 		type = KCOV_CMP_SIZE(3);
261 		break;
262 	default:
263 		return;
264 	}
265 	type |= KCOV_CMP_CONST;
266 
267 	for (i = 0; i < ncases; i++)
268 		trace_cmp(kd, type, cases[i + 2], val, pc);
269 }
270 
271 void
272 kcovattach(int count)
273 {
274 	struct kcov_cpu *kc;
275 	int error, i;
276 
277 	pool_init(&kr_pool, sizeof(struct kcov_remote), 0, IPL_MPFLOOR, PR_WAITOK,
278 	    "kcovpl", NULL);
279 
280 	kc = mallocarray(ncpusfound, sizeof(*kc), M_DEVBUF, M_WAITOK | M_ZERO);
281 	mtx_enter(&kcov_mtx);
282 	for (i = 0; i < ncpusfound; i++) {
283 		kc[i].kc_cpuid = i;
284 		error = kd_init(&kc[i].kc_kd, KCOV_BUF_MAX_NMEMB);
285 		KASSERT(error == 0);
286 		TAILQ_INSERT_TAIL(&kc_list, &kc[i], kc_entry);
287 	}
288 	mtx_leave(&kcov_mtx);
289 
290 	kr_cold = 0;
291 }
292 
293 int
294 kcovopen(dev_t dev, int flag, int mode, struct proc *p)
295 {
296 	struct kcov_dev *kd;
297 
298 	kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO);
299 	kd->kd_unit = minor(dev);
300 	mtx_enter(&kcov_mtx);
301 	KASSERT(kd_lookup(kd->kd_unit) == NULL);
302 	TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry);
303 	if (kcov_cold)
304 		kcov_cold = 0;
305 	mtx_leave(&kcov_mtx);
306 	return (0);
307 }
308 
309 int
310 kcovclose(dev_t dev, int flag, int mode, struct proc *p)
311 {
312 	struct kcov_dev *kd;
313 
314 	mtx_enter(&kcov_mtx);
315 
316 	kd = kd_lookup(minor(dev));
317 	if (kd == NULL) {
318 		mtx_leave(&kcov_mtx);
319 		return (ENXIO);
320 	}
321 
322 	TAILQ_REMOVE(&kd_list, kd, kd_entry);
323 	if (kd->kd_state == KCOV_STATE_TRACE && kd->kd_kr == NULL) {
324 		/*
325 		 * Another thread is currently using the kcov descriptor,
326 		 * postpone freeing to kcov_exit().
327 		 */
328 		kd->kd_state = KCOV_STATE_DYING;
329 		kd->kd_mode = KCOV_MODE_NONE;
330 	} else {
331 		kd_free(kd);
332 	}
333 
334 	mtx_leave(&kcov_mtx);
335 	return (0);
336 }
337 
338 int
339 kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
340 {
341 	struct kcov_dev *kd;
342 	int mode;
343 	int error = 0;
344 
345 	mtx_enter(&kcov_mtx);
346 
347 	kd = kd_lookup(minor(dev));
348 	if (kd == NULL) {
349 		mtx_leave(&kcov_mtx);
350 		return (ENXIO);
351 	}
352 
353 	switch (cmd) {
354 	case KIOSETBUFSIZE:
355 		error = kd_init(kd, *((unsigned long *)data));
356 		break;
357 	case KIOENABLE:
358 		/* Only one kcov descriptor can be enabled per thread. */
359 		if (p->p_kd != NULL) {
360 			error = EBUSY;
361 			break;
362 		}
363 		if (kd->kd_state != KCOV_STATE_READY) {
364 			error = ENXIO;
365 			break;
366 		}
367 		mode = *((int *)data);
368 		if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
369 			error = EINVAL;
370 			break;
371 		}
372 		kd->kd_state = KCOV_STATE_TRACE;
373 		kd->kd_mode = mode;
374 		/* Remote coverage is mutually exclusive. */
375 		if (kd->kd_kr == NULL)
376 			p->p_kd = kd;
377 		break;
378 	case KIODISABLE:
379 		/* Only the enabled thread may disable itself. */
380 		if ((p->p_kd != kd && kd->kd_kr == NULL)) {
381 			error = EPERM;
382 			break;
383 		}
384 		if (kd->kd_state != KCOV_STATE_TRACE) {
385 			error = ENXIO;
386 			break;
387 		}
388 		kd->kd_state = KCOV_STATE_READY;
389 		kd->kd_mode = KCOV_MODE_NONE;
390 		if (kd->kd_kr != NULL)
391 			kr_barrier(kd->kd_kr);
392 		p->p_kd = NULL;
393 		break;
394 	case KIOREMOTEATTACH:
395 		error = kcov_remote_attach(kd,
396 		    (struct kio_remote_attach *)data);
397 		break;
398 	default:
399 		error = ENOTTY;
400 	}
401 	mtx_leave(&kcov_mtx);
402 
403 	return (error);
404 }
405 
406 paddr_t
407 kcovmmap(dev_t dev, off_t offset, int prot)
408 {
409 	struct kcov_dev *kd;
410 	paddr_t pa = -1;
411 	vaddr_t va;
412 
413 	mtx_enter(&kcov_mtx);
414 
415 	kd = kd_lookup(minor(dev));
416 	if (kd == NULL)
417 		goto out;
418 
419 	if (offset < 0 || offset >= kd->kd_nmemb * KCOV_BUF_MEMB_SIZE)
420 		goto out;
421 
422 	va = (vaddr_t)kd->kd_buf + offset;
423 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
424 		pa = -1;
425 
426 out:
427 	mtx_leave(&kcov_mtx);
428 	return (pa);
429 }
430 
431 void
432 kcov_exit(struct proc *p)
433 {
434 	struct kcov_dev *kd;
435 
436 	mtx_enter(&kcov_mtx);
437 
438 	kd = p->p_kd;
439 	if (kd == NULL) {
440 		mtx_leave(&kcov_mtx);
441 		return;
442 	}
443 
444 	if (kd->kd_state == KCOV_STATE_DYING) {
445 		p->p_kd = NULL;
446 		kd_free(kd);
447 	} else {
448 		kd->kd_state = KCOV_STATE_READY;
449 		kd->kd_mode = KCOV_MODE_NONE;
450 		if (kd->kd_kr != NULL)
451 			kr_barrier(kd->kd_kr);
452 		p->p_kd = NULL;
453 	}
454 
455 	mtx_leave(&kcov_mtx);
456 }
457 
458 /*
459  * Returns non-zero if the given vnode refers to a kcov device.
460  */
461 int
462 kcov_vnode(struct vnode *vp)
463 {
464 	return (vp->v_type == VCHR &&
465 	    cdevsw[major(vp->v_rdev)].d_open == kcovopen);
466 }
467 
468 struct kcov_dev *
469 kd_lookup(int unit)
470 {
471 	struct kcov_dev *kd;
472 
473 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
474 
475 	TAILQ_FOREACH(kd, &kd_list, kd_entry) {
476 		if (kd->kd_unit == unit)
477 			return (kd);
478 	}
479 	return (NULL);
480 }
481 
482 void
483 kd_copy(struct kcov_dev *dst, struct kcov_dev *src)
484 {
485 	uint64_t idx, nmemb;
486 	int stride;
487 
488 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
489 	KASSERT(dst->kd_mode == src->kd_mode);
490 
491 	nmemb = src->kd_buf[0];
492 	if (nmemb == 0)
493 		return;
494 	stride = src->kd_mode == KCOV_MODE_TRACE_CMP ? KCOV_STRIDE_TRACE_CMP :
495 	    KCOV_STRIDE_TRACE_PC;
496 	idx = kd_claim(dst, stride, nmemb);
497 	if (idx == 0)
498 		return;
499 	memcpy(&dst->kd_buf[idx], &src->kd_buf[1],
500 	    stride * nmemb * KCOV_BUF_MEMB_SIZE);
501 }
502 
503 int
504 kd_init(struct kcov_dev *kd, unsigned long nmemb)
505 {
506 	void *buf;
507 	size_t size;
508 	int error;
509 
510 	KASSERT(kd->kd_buf == NULL);
511 
512 	if (kd->kd_state != KCOV_STATE_NONE)
513 		return (EBUSY);
514 
515 	if (nmemb == 0 || nmemb > KCOV_BUF_MAX_NMEMB)
516 		return (EINVAL);
517 
518 	size = roundup(nmemb * KCOV_BUF_MEMB_SIZE, PAGE_SIZE);
519 	mtx_leave(&kcov_mtx);
520 	buf = km_alloc(size, &kv_any, &kp_zero, &kd_waitok);
521 	if (buf == NULL) {
522 		error = ENOMEM;
523 		goto err;
524 	}
525 	/* km_malloc() can sleep, ensure the race was won. */
526 	if (kd->kd_state != KCOV_STATE_NONE) {
527 		error = EBUSY;
528 		goto err;
529 	}
530 	mtx_enter(&kcov_mtx);
531 	kd->kd_buf = buf;
532 	/* The first element is reserved to hold the number of used elements. */
533 	kd->kd_nmemb = nmemb - 1;
534 	kd->kd_size = size;
535 	kd->kd_state = KCOV_STATE_READY;
536 	return (0);
537 
538 err:
539 	if (buf != NULL)
540 		km_free(buf, size, &kv_any, &kp_zero);
541 	mtx_enter(&kcov_mtx);
542 	return (error);
543 }
544 
545 void
546 kd_free(struct kcov_dev *kd)
547 {
548 	struct kcov_remote *kr;
549 
550 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
551 
552 	kr = kd->kd_kr;
553 	if (kr != NULL)
554 		kcov_remote_detach(kd, kr);
555 
556 	if (kd->kd_buf != NULL) {
557 		mtx_leave(&kcov_mtx);
558 		km_free(kd->kd_buf, kd->kd_size, &kv_any, &kp_zero);
559 		mtx_enter(&kcov_mtx);
560 	}
561 	free(kd, M_SUBPROC, sizeof(*kd));
562 }
563 
564 static struct kcov_dev *
565 kd_curproc(int mode)
566 {
567 	struct kcov_dev *kd;
568 
569 	/*
570 	 * Do not trace before kcovopen() has been called at least once.
571 	 * At this point, all secondary CPUs have booted and accessing curcpu()
572 	 * is safe.
573 	 */
574 	if (__predict_false(kcov_cold))
575 		return (NULL);
576 
577 	kd = curproc->p_kd;
578 	if (__predict_true(kd == NULL) || kd->kd_mode != mode)
579 		return (NULL);
580 
581 	/*
582 	 * Do not trace if the kernel has panicked. This could happen if curproc
583 	 * had kcov enabled while panicking.
584 	 */
585 	if (__predict_false(panicstr || db_active))
586 		return (NULL);
587 
588 	/* Do not trace in interrupt context unless this is a remote section. */
589 	if (inintr() && kd->kd_intr == 0)
590 		return (NULL);
591 
592 	return (kd);
593 
594 }
595 
596 static struct kcov_cpu *
597 kd_curcpu(void)
598 {
599 	struct kcov_cpu *kc;
600 	unsigned int cpuid = cpu_number();
601 
602 	TAILQ_FOREACH(kc, &kc_list, kc_entry) {
603 		if (kc->kc_cpuid == cpuid)
604 			return (kc);
605 	}
606 	return (NULL);
607 }
608 
609 /*
610  * Claim stride times nmemb number of elements in the coverage buffer. Returns
611  * the index of the first claimed element. If the claim cannot be fulfilled,
612  * zero is returned.
613  */
614 static uint64_t
615 kd_claim(struct kcov_dev *kd, int stride, int nmemb)
616 {
617 	uint64_t idx, was;
618 
619 	idx = kd->kd_buf[0];
620 	for (;;) {
621 		if (stride * (idx + nmemb) > kd->kd_nmemb)
622 			return (0);
623 
624 		was = atomic_cas_ulong(&kd->kd_buf[0], idx, idx + nmemb);
625 		if (was == idx)
626 			return (idx * stride + 1);
627 		idx = was;
628 	}
629 }
630 
631 static inline int
632 inintr(void)
633 {
634 #if defined(__amd64__) || defined(__arm__) || defined(__arm64__) || \
635     defined(__i386__)
636 	return (curcpu()->ci_idepth > 0);
637 #else
638 	return (0);
639 #endif
640 }
641 
642 void
643 kcov_remote_enter(int subsystem, void *id)
644 {
645 	struct kcov_cpu *kc;
646 	struct kcov_dev *kd;
647 	struct kcov_remote *kr;
648 	struct proc *p;
649 
650 	mtx_enter(&kcov_mtx);
651 	kr = kr_lookup(subsystem, id);
652 	if (kr == NULL || kr->kr_state != KCOV_STATE_READY)
653 		goto out;
654 	kd = kr->kr_kd;
655 	if (kd == NULL || kd->kd_state != KCOV_STATE_TRACE)
656 		goto out;
657 	p = curproc;
658 	if (inintr()) {
659 		/*
660 		 * XXX we only expect to be called from softclock interrupts at
661 		 * this point.
662 		 */
663 		kc = kd_curcpu();
664 		if (kc == NULL || kc->kc_kd.kd_intr == 1)
665 			goto out;
666 		kc->kc_kd.kd_state = KCOV_STATE_TRACE;
667 		kc->kc_kd.kd_mode = kd->kd_mode;
668 		kc->kc_kd.kd_intr = 1;
669 		kc->kc_kd_save = p->p_kd;
670 		kd = &kc->kc_kd;
671 		/* Reset coverage buffer. */
672 		kd->kd_buf[0] = 0;
673 	} else {
674 		KASSERT(p->p_kd == NULL);
675 	}
676 	kr->kr_nsections++;
677 	p->p_kd = kd;
678 
679 out:
680 	mtx_leave(&kcov_mtx);
681 }
682 
683 void
684 kcov_remote_leave(int subsystem, void *id)
685 {
686 	struct kcov_cpu *kc;
687 	struct kcov_remote *kr;
688 	struct proc *p;
689 
690 	mtx_enter(&kcov_mtx);
691 	p = curproc;
692 	if (p->p_kd == NULL)
693 		goto out;
694 	kr = kr_lookup(subsystem, id);
695 	if (kr == NULL)
696 		goto out;
697 	if (inintr()) {
698 		kc = kd_curcpu();
699 		if (kc == NULL || kc->kc_kd.kd_intr == 0)
700 			goto out;
701 
702 		/*
703 		 * Stop writing to the coverage buffer associated with this CPU
704 		 * before copying its contents.
705 		 */
706 		p->p_kd = kc->kc_kd_save;
707 		kc->kc_kd_save = NULL;
708 
709 		kd_copy(kr->kr_kd, &kc->kc_kd);
710 		kc->kc_kd.kd_state = KCOV_STATE_READY;
711 		kc->kc_kd.kd_mode = KCOV_MODE_NONE;
712 		kc->kc_kd.kd_intr = 0;
713 	} else {
714 		KASSERT(p->p_kd == kr->kr_kd);
715 		p->p_kd = NULL;
716 	}
717 	if (--kr->kr_nsections == 0)
718 		wakeup(kr);
719 out:
720 	mtx_leave(&kcov_mtx);
721 }
722 
723 void
724 kcov_remote_register(int subsystem, void *id)
725 {
726 	mtx_enter(&kcov_mtx);
727 	kcov_remote_register_locked(subsystem, id);
728 	mtx_leave(&kcov_mtx);
729 }
730 
731 void
732 kcov_remote_unregister(int subsystem, void *id)
733 {
734 	struct kcov_remote *kr;
735 
736 	mtx_enter(&kcov_mtx);
737 	kr = kr_lookup(subsystem, id);
738 	if (kr != NULL)
739 		kr_free(kr);
740 	mtx_leave(&kcov_mtx);
741 }
742 
743 struct kcov_remote *
744 kcov_remote_register_locked(int subsystem, void *id)
745 {
746 	struct kcov_remote *kr, *tmp;
747 
748 	/* Do not allow registrations before the pool is initialized. */
749 	KASSERT(kr_cold == 0);
750 
751 	/*
752 	 * Temporarily release the mutex since the allocation could end up
753 	 * sleeping.
754 	 */
755 	mtx_leave(&kcov_mtx);
756 	kr = pool_get(&kr_pool, PR_WAITOK | PR_ZERO);
757 	kr->kr_subsystem = subsystem;
758 	kr->kr_id = id;
759 	kr->kr_state = KCOV_STATE_NONE;
760 	mtx_enter(&kcov_mtx);
761 
762 	for (;;) {
763 		tmp = kr_lookup(subsystem, id);
764 		if (tmp == NULL)
765 			break;
766 		if (tmp->kr_state != KCOV_STATE_DYING) {
767 			pool_put(&kr_pool, kr);
768 			return (NULL);
769 		}
770 		/*
771 		 * The remote could already be deregistered while another
772 		 * thread is currently inside a kcov remote section.
773 		 */
774 		msleep_nsec(tmp, &kcov_mtx, PWAIT, "kcov", INFSLP);
775 	}
776 	TAILQ_INSERT_TAIL(&kr_list, kr, kr_entry);
777 	return (kr);
778 }
779 
780 int
781 kcov_remote_attach(struct kcov_dev *kd, struct kio_remote_attach *arg)
782 {
783 	struct kcov_remote *kr = NULL;
784 
785 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
786 
787 	if (kd->kd_state != KCOV_STATE_READY)
788 		return (ENXIO);
789 
790 	if (arg->subsystem == KCOV_REMOTE_COMMON) {
791 		kr = kcov_remote_register_locked(KCOV_REMOTE_COMMON,
792 		    curproc->p_p);
793 		if (kr == NULL)
794 			return (EBUSY);
795 	} else {
796 		return (EINVAL);
797 	}
798 
799 	kr->kr_state = KCOV_STATE_READY;
800 	kr->kr_kd = kd;
801 	kd->kd_kr = kr;
802 	return (0);
803 }
804 
805 void
806 kcov_remote_detach(struct kcov_dev *kd, struct kcov_remote *kr)
807 {
808 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
809 
810 	KASSERT(kd == kr->kr_kd);
811 	if (kr->kr_subsystem == KCOV_REMOTE_COMMON) {
812 		kr_free(kr);
813 	} else {
814 		kr->kr_state = KCOV_STATE_NONE;
815 		kr_barrier(kr);
816 		kd->kd_kr = NULL;
817 		kr->kr_kd = NULL;
818 	}
819 }
820 
821 void
822 kr_free(struct kcov_remote *kr)
823 {
824 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
825 
826 	kr->kr_state = KCOV_STATE_DYING;
827 	kr_barrier(kr);
828 	if (kr->kr_kd != NULL)
829 		kr->kr_kd->kd_kr = NULL;
830 	kr->kr_kd = NULL;
831 	TAILQ_REMOVE(&kr_list, kr, kr_entry);
832 	/* Notify thread(s) waiting in kcov_remote_register(). */
833 	wakeup(kr);
834 	pool_put(&kr_pool, kr);
835 }
836 
837 void
838 kr_barrier(struct kcov_remote *kr)
839 {
840 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
841 
842 	while (kr->kr_nsections > 0)
843 		msleep_nsec(kr, &kcov_mtx, PWAIT, "kcovbar", INFSLP);
844 }
845 
846 struct kcov_remote *
847 kr_lookup(int subsystem, void *id)
848 {
849 	struct kcov_remote *kr;
850 
851 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
852 
853 	TAILQ_FOREACH(kr, &kr_list, kr_entry) {
854 		if (kr->kr_subsystem == subsystem && kr->kr_id == id)
855 			return (kr);
856 	}
857 	return (NULL);
858 }
859