xref: /openbsd-src/sys/dev/kcov.c (revision ba619c7d8844389722886c02e67214cc60ac7d17)
1*ba619c7dSjsg /*	$OpenBSD: kcov.c,v 1.50 2024/11/10 10:04:33 jsg Exp $	*/
2af589a78Santon 
3af589a78Santon /*
4af589a78Santon  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5af589a78Santon  *
6af589a78Santon  * Permission to use, copy, modify, and distribute this software for any
7af589a78Santon  * purpose with or without fee is hereby granted, provided that the above
8af589a78Santon  * copyright notice and this permission notice appear in all copies.
9af589a78Santon  *
10af589a78Santon  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11af589a78Santon  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12af589a78Santon  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13af589a78Santon  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14af589a78Santon  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15af589a78Santon  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16af589a78Santon  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17af589a78Santon  */
18af589a78Santon 
19af589a78Santon #include <sys/param.h>
20af589a78Santon #include <sys/systm.h>
21af589a78Santon #include <sys/proc.h>
22af589a78Santon #include <sys/kcov.h>
23af589a78Santon #include <sys/malloc.h>
248430bc4bSanton #include <sys/mutex.h>
258430bc4bSanton #include <sys/pool.h>
26af589a78Santon #include <sys/stdint.h>
27af589a78Santon #include <sys/queue.h>
28af589a78Santon 
29982627fcSanton /* kcov_vnode() */
30982627fcSanton #include <sys/conf.h>
31982627fcSanton #include <sys/vnode.h>
32982627fcSanton #include <sys/specdev.h>
33982627fcSanton 
34af589a78Santon #include <uvm/uvm_extern.h>
35af589a78Santon 
3621f0e712Santon #define KCOV_BUF_MEMB_SIZE	sizeof(uintptr_t)
37b98c2012Santon #define KCOV_BUF_MAX_NMEMB	(256 << 10)
3821f0e712Santon 
39c204a25dSanton #define KCOV_CMP_CONST		0x1
40c204a25dSanton #define KCOV_CMP_SIZE(x)	((x) << 1)
41c204a25dSanton 
427a9d832aSanton #define KCOV_STATE_NONE		0
437a9d832aSanton #define KCOV_STATE_READY	1
447a9d832aSanton #define KCOV_STATE_TRACE	2
457a9d832aSanton #define KCOV_STATE_DYING	3
467a9d832aSanton 
47f8487ce0Santon #define KCOV_STRIDE_TRACE_PC	1
48f8487ce0Santon #define KCOV_STRIDE_TRACE_CMP	4
49f8487ce0Santon 
50ece33e2fSanton /*
51ece33e2fSanton  * Coverage structure.
52ece33e2fSanton  *
53ece33e2fSanton  * Locking:
54ece33e2fSanton  * 	I	immutable after creation
55ece33e2fSanton  *	M	kcov_mtx
56ece33e2fSanton  *	a	atomic operations
57ece33e2fSanton  */
58222039efSanton struct kcov_dev {
59ece33e2fSanton 	int		 kd_state;	/* [M] */
60ece33e2fSanton 	int		 kd_mode;	/* [M] */
6181a83fa0Santon 	int		 kd_unit;	/* [I] D_CLONE unique device minor */
6284cad3c2Santon 	int		 kd_intr;	/* [M] currently used in interrupt */
63ece33e2fSanton 	uintptr_t	*kd_buf;	/* [a] traced coverage */
64ece33e2fSanton 	size_t		 kd_nmemb;	/* [I] */
65ece33e2fSanton 	size_t		 kd_size;	/* [I] */
66af589a78Santon 
67ece33e2fSanton 	struct kcov_remote *kd_kr;	/* [M] */
688430bc4bSanton 
69ece33e2fSanton 	TAILQ_ENTRY(kcov_dev)	kd_entry;	/* [M] */
70af589a78Santon };
71af589a78Santon 
728430bc4bSanton /*
738430bc4bSanton  * Remote coverage structure.
748430bc4bSanton  *
758430bc4bSanton  * Locking:
768430bc4bSanton  * 	I	immutable after creation
7763a956acSanton  *	M	kcov_mtx
788430bc4bSanton  */
798430bc4bSanton struct kcov_remote {
808430bc4bSanton 	struct kcov_dev *kr_kd;	/* [M] */
818430bc4bSanton 	void *kr_id;		/* [I] */
828430bc4bSanton 	int kr_subsystem;	/* [I] */
838430bc4bSanton 	int kr_nsections;	/* [M] # threads in remote section */
848430bc4bSanton 	int kr_state;		/* [M] */
858430bc4bSanton 
868430bc4bSanton 	TAILQ_ENTRY(kcov_remote) kr_entry;	/* [M] */
878430bc4bSanton };
888430bc4bSanton 
8984cad3c2Santon /*
9084cad3c2Santon  * Per CPU coverage structure used to track coverage when executing in a remote
9184cad3c2Santon  * interrupt context.
9284cad3c2Santon  *
9384cad3c2Santon  * Locking:
9484cad3c2Santon  * 	I	immutable after creation
9584cad3c2Santon  *	M	kcov_mtx
9684cad3c2Santon  */
9784cad3c2Santon struct kcov_cpu {
9884cad3c2Santon 	struct kcov_dev  kc_kd;
9984cad3c2Santon 	struct kcov_dev *kc_kd_save;	/* [M] previous kcov_dev */
10084cad3c2Santon 	int kc_cpuid;			/* [I] cpu number */
10184cad3c2Santon 
10284cad3c2Santon 	TAILQ_ENTRY(kcov_cpu) kc_entry;	/* [I] */
10384cad3c2Santon };
10484cad3c2Santon 
105af589a78Santon void kcovattach(int);
106af589a78Santon 
1078180eaf2Santon int kd_init(struct kcov_dev *, unsigned long);
108222039efSanton void kd_free(struct kcov_dev *);
109222039efSanton struct kcov_dev *kd_lookup(int);
110e882eb89Santon void kd_copy(struct kcov_dev *, struct kcov_dev *);
111af589a78Santon 
1128430bc4bSanton struct kcov_remote *kcov_remote_register_locked(int, void *);
1138430bc4bSanton int kcov_remote_attach(struct kcov_dev *, struct kio_remote_attach *);
1148430bc4bSanton void kcov_remote_detach(struct kcov_dev *, struct kcov_remote *);
1158430bc4bSanton void kr_free(struct kcov_remote *);
116ece33e2fSanton void kr_barrier(struct kcov_remote *);
1178430bc4bSanton struct kcov_remote *kr_lookup(int, void *);
1188430bc4bSanton 
119865e2ca8Santon static struct kcov_dev *kd_curproc(int);
12084cad3c2Santon static struct kcov_cpu *kd_curcpu(void);
121f8487ce0Santon static uint64_t kd_claim(struct kcov_dev *, int, int);
122af589a78Santon 
123222039efSanton TAILQ_HEAD(, kcov_dev) kd_list = TAILQ_HEAD_INITIALIZER(kd_list);
1248430bc4bSanton TAILQ_HEAD(, kcov_remote) kr_list = TAILQ_HEAD_INITIALIZER(kr_list);
12584cad3c2Santon TAILQ_HEAD(, kcov_cpu) kc_list = TAILQ_HEAD_INITIALIZER(kc_list);
126af589a78Santon 
1274b7fdf29Santon int kcov_cold = 1;
1288430bc4bSanton int kr_cold = 1;
12963a956acSanton struct mutex kcov_mtx = MUTEX_INITIALIZER(IPL_MPFLOOR);
1308430bc4bSanton struct pool kr_pool;
1314b7fdf29Santon 
132aec2f735Santon static inline int
133aec2f735Santon inintr(struct cpu_info *ci)
134aec2f735Santon {
135aec2f735Santon 	return (ci->ci_idepth > 0);
136aec2f735Santon }
137aec2f735Santon 
138af589a78Santon /*
139af589a78Santon  * Compiling the kernel with the `-fsanitize-coverage=trace-pc' option will
140af589a78Santon  * cause the following function to be called upon function entry and before
141aec2f735Santon  * each block of instructions that maps to a single line in the original source
142af589a78Santon  * code.
143af589a78Santon  *
1449f082b73Santon  * If kcov is enabled for the current thread, the kernel program counter will
1459f082b73Santon  * be stored in its corresponding coverage buffer.
146af589a78Santon  */
147af589a78Santon void
148af589a78Santon __sanitizer_cov_trace_pc(void)
149af589a78Santon {
150222039efSanton 	struct kcov_dev *kd;
151af589a78Santon 	uint64_t idx;
152af589a78Santon 
153865e2ca8Santon 	kd = kd_curproc(KCOV_MODE_TRACE_PC);
154865e2ca8Santon 	if (kd == NULL)
155af589a78Santon 		return;
156af589a78Santon 
157f8487ce0Santon 	if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_PC, 1)))
158a187908aSanton 		kd->kd_buf[idx] = (uintptr_t)__builtin_return_address(0);
159af589a78Santon }
160af589a78Santon 
161c204a25dSanton /*
162c204a25dSanton  * Compiling the kernel with the `-fsanitize-coverage=trace-cmp' option will
163c204a25dSanton  * cause the following function to be called upon integer comparisons and switch
164c204a25dSanton  * statements.
165c204a25dSanton  *
166c204a25dSanton  * If kcov is enabled for the current thread, the comparison will be stored in
167c204a25dSanton  * its corresponding coverage buffer.
168c204a25dSanton  */
169c204a25dSanton void
170d3edd1e5Santon trace_cmp(struct kcov_dev *kd, uint64_t type, uint64_t arg1, uint64_t arg2,
171d3edd1e5Santon     uintptr_t pc)
172c204a25dSanton {
173c204a25dSanton 	uint64_t idx;
174c204a25dSanton 
175f8487ce0Santon 	if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_CMP, 1))) {
176a187908aSanton 		kd->kd_buf[idx] = type;
177a187908aSanton 		kd->kd_buf[idx + 1] = arg1;
178a187908aSanton 		kd->kd_buf[idx + 2] = arg2;
179a187908aSanton 		kd->kd_buf[idx + 3] = pc;
180c204a25dSanton 	}
181c204a25dSanton }
182c204a25dSanton 
183d3edd1e5Santon #define TRACE_CMP(type, arg1, arg2) do {				\
184d3edd1e5Santon 	struct kcov_dev *kd;						\
185d3edd1e5Santon 	if ((kd = kd_curproc(KCOV_MODE_TRACE_CMP)) == NULL)		\
186d3edd1e5Santon 		return;							\
187d3edd1e5Santon 	trace_cmp(kd, (type), (arg1), (arg2),				\
188d3edd1e5Santon 	    (uintptr_t)__builtin_return_address(0));			\
189d3edd1e5Santon } while (0)
190d3edd1e5Santon 
191c204a25dSanton void
192c204a25dSanton __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
193c204a25dSanton {
194d3edd1e5Santon 	TRACE_CMP(KCOV_CMP_SIZE(0), arg1, arg2);
195c204a25dSanton }
196c204a25dSanton 
197c204a25dSanton void
198c204a25dSanton __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
199c204a25dSanton {
200d3edd1e5Santon 	TRACE_CMP(KCOV_CMP_SIZE(1), arg1, arg2);
201c204a25dSanton }
202c204a25dSanton 
203c204a25dSanton void
204c204a25dSanton __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
205c204a25dSanton {
206d3edd1e5Santon 	TRACE_CMP(KCOV_CMP_SIZE(2), arg1, arg2);
207c204a25dSanton }
208c204a25dSanton 
209c204a25dSanton void
210c204a25dSanton __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
211c204a25dSanton {
212d3edd1e5Santon 	TRACE_CMP(KCOV_CMP_SIZE(3), arg1, arg2);
213c204a25dSanton }
214c204a25dSanton 
215c204a25dSanton void
216c204a25dSanton __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
217c204a25dSanton {
218d3edd1e5Santon 	TRACE_CMP(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2);
219c204a25dSanton }
220c204a25dSanton 
221c204a25dSanton void
222c204a25dSanton __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
223c204a25dSanton {
224d3edd1e5Santon 	TRACE_CMP(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2);
225c204a25dSanton }
226c204a25dSanton 
227c204a25dSanton void
228c204a25dSanton __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
229c204a25dSanton {
230d3edd1e5Santon 	TRACE_CMP(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2);
231c204a25dSanton }
232c204a25dSanton 
233c204a25dSanton void
234c204a25dSanton __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
235c204a25dSanton {
236d3edd1e5Santon 	TRACE_CMP(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2);
237c204a25dSanton }
238c204a25dSanton 
239c204a25dSanton void
240c204a25dSanton __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
241c204a25dSanton {
242d3edd1e5Santon 	struct kcov_dev *kd;
243c204a25dSanton 	uint64_t i, nbits, ncases, type;
244c204a25dSanton 	uintptr_t pc;
245c204a25dSanton 
246d3edd1e5Santon 	kd = kd_curproc(KCOV_MODE_TRACE_CMP);
247d3edd1e5Santon 	if (kd == NULL)
248d3edd1e5Santon 		return;
249d3edd1e5Santon 
250c204a25dSanton 	pc = (uintptr_t)__builtin_return_address(0);
251c204a25dSanton 	ncases = cases[0];
252c204a25dSanton 	nbits = cases[1];
253c204a25dSanton 
254c204a25dSanton 	switch (nbits) {
255c204a25dSanton 	case 8:
256c204a25dSanton 		type = KCOV_CMP_SIZE(0);
257c204a25dSanton 		break;
258c204a25dSanton 	case 16:
259c204a25dSanton 		type = KCOV_CMP_SIZE(1);
260c204a25dSanton 		break;
261c204a25dSanton 	case 32:
262c204a25dSanton 		type = KCOV_CMP_SIZE(2);
263c204a25dSanton 		break;
264c204a25dSanton 	case 64:
265c204a25dSanton 		type = KCOV_CMP_SIZE(3);
266c204a25dSanton 		break;
267c204a25dSanton 	default:
268c204a25dSanton 		return;
269c204a25dSanton 	}
270c204a25dSanton 	type |= KCOV_CMP_CONST;
271c204a25dSanton 
272c204a25dSanton 	for (i = 0; i < ncases; i++)
273d3edd1e5Santon 		trace_cmp(kd, type, cases[i + 2], val, pc);
274c204a25dSanton }
275c204a25dSanton 
276af589a78Santon void
277af589a78Santon kcovattach(int count)
278af589a78Santon {
27984cad3c2Santon 	struct kcov_cpu *kc;
28084cad3c2Santon 	int error, i;
28184cad3c2Santon 
2828430bc4bSanton 	pool_init(&kr_pool, sizeof(struct kcov_remote), 0, IPL_MPFLOOR, PR_WAITOK,
2838430bc4bSanton 	    "kcovpl", NULL);
28484cad3c2Santon 
28584cad3c2Santon 	kc = mallocarray(ncpusfound, sizeof(*kc), M_DEVBUF, M_WAITOK | M_ZERO);
28684cad3c2Santon 	mtx_enter(&kcov_mtx);
28784cad3c2Santon 	for (i = 0; i < ncpusfound; i++) {
28884cad3c2Santon 		kc[i].kc_cpuid = i;
28984cad3c2Santon 		error = kd_init(&kc[i].kc_kd, KCOV_BUF_MAX_NMEMB);
29084cad3c2Santon 		KASSERT(error == 0);
29184cad3c2Santon 		TAILQ_INSERT_TAIL(&kc_list, &kc[i], kc_entry);
29284cad3c2Santon 	}
29384cad3c2Santon 	mtx_leave(&kcov_mtx);
29484cad3c2Santon 
2958430bc4bSanton 	kr_cold = 0;
296af589a78Santon }
297af589a78Santon 
298af589a78Santon int
299af589a78Santon kcovopen(dev_t dev, int flag, int mode, struct proc *p)
300af589a78Santon {
301222039efSanton 	struct kcov_dev *kd;
302af589a78Santon 
303af589a78Santon 	kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO);
304af589a78Santon 	kd->kd_unit = minor(dev);
305ece33e2fSanton 	mtx_enter(&kcov_mtx);
30681a83fa0Santon 	KASSERT(kd_lookup(kd->kd_unit) == NULL);
307af589a78Santon 	TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry);
30881a83fa0Santon 	if (kcov_cold)
30981a83fa0Santon 		kcov_cold = 0;
310ece33e2fSanton 	mtx_leave(&kcov_mtx);
311af589a78Santon 	return (0);
312af589a78Santon }
313af589a78Santon 
314af589a78Santon int
315af589a78Santon kcovclose(dev_t dev, int flag, int mode, struct proc *p)
316af589a78Santon {
317222039efSanton 	struct kcov_dev *kd;
318af589a78Santon 
319ece33e2fSanton 	mtx_enter(&kcov_mtx);
320ece33e2fSanton 
321af589a78Santon 	kd = kd_lookup(minor(dev));
322ece33e2fSanton 	if (kd == NULL) {
323ece33e2fSanton 		mtx_leave(&kcov_mtx);
32448b3e30fSanton 		return (ENXIO);
325ece33e2fSanton 	}
326af589a78Santon 
327c1a72b65Santon 	TAILQ_REMOVE(&kd_list, kd, kd_entry);
3288430bc4bSanton 	if (kd->kd_state == KCOV_STATE_TRACE && kd->kd_kr == NULL) {
329a96435eeSanton 		/*
330a96435eeSanton 		 * Another thread is currently using the kcov descriptor,
331a96435eeSanton 		 * postpone freeing to kcov_exit().
332a96435eeSanton 		 */
333070323f8Santon 		kd->kd_state = KCOV_STATE_DYING;
334070323f8Santon 		kd->kd_mode = KCOV_MODE_NONE;
335070323f8Santon 	} else {
3369f082b73Santon 		kd_free(kd);
337070323f8Santon 	}
3389f082b73Santon 
339ece33e2fSanton 	mtx_leave(&kcov_mtx);
340af589a78Santon 	return (0);
341af589a78Santon }
342af589a78Santon 
343af589a78Santon int
344af589a78Santon kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
345af589a78Santon {
346222039efSanton 	struct kcov_dev *kd;
347c4b2804fSanton 	int mode;
348af589a78Santon 	int error = 0;
349af589a78Santon 
350ece33e2fSanton 	mtx_enter(&kcov_mtx);
351ece33e2fSanton 
352af589a78Santon 	kd = kd_lookup(minor(dev));
353ece33e2fSanton 	if (kd == NULL) {
354ece33e2fSanton 		mtx_leave(&kcov_mtx);
355af589a78Santon 		return (ENXIO);
356ece33e2fSanton 	}
357af589a78Santon 
358af589a78Santon 	switch (cmd) {
359af589a78Santon 	case KIOSETBUFSIZE:
3608180eaf2Santon 		error = kd_init(kd, *((unsigned long *)data));
361af589a78Santon 		break;
362af589a78Santon 	case KIOENABLE:
3639f082b73Santon 		/* Only one kcov descriptor can be enabled per thread. */
364706a9991Santon 		if (p->p_kd != NULL) {
365af589a78Santon 			error = EBUSY;
366af589a78Santon 			break;
367af589a78Santon 		}
368706a9991Santon 		if (kd->kd_state != KCOV_STATE_READY) {
369706a9991Santon 			error = ENXIO;
370706a9991Santon 			break;
371706a9991Santon 		}
372c4b2804fSanton 		mode = *((int *)data);
373c204a25dSanton 		if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
374c4b2804fSanton 			error = EINVAL;
375c4b2804fSanton 			break;
376c4b2804fSanton 		}
377070323f8Santon 		kd->kd_state = KCOV_STATE_TRACE;
378c4b2804fSanton 		kd->kd_mode = mode;
3798430bc4bSanton 		/* Remote coverage is mutually exclusive. */
3808430bc4bSanton 		if (kd->kd_kr == NULL)
3819f082b73Santon 			p->p_kd = kd;
382af589a78Santon 		break;
383af589a78Santon 	case KIODISABLE:
3849f082b73Santon 		/* Only the enabled thread may disable itself. */
385706a9991Santon 		if ((p->p_kd != kd && kd->kd_kr == NULL)) {
386706a9991Santon 			error = EPERM;
387706a9991Santon 			break;
388706a9991Santon 		}
389706a9991Santon 		if (kd->kd_state != KCOV_STATE_TRACE) {
390706a9991Santon 			error = ENXIO;
391af589a78Santon 			break;
392af589a78Santon 		}
393070323f8Santon 		kd->kd_state = KCOV_STATE_READY;
394070323f8Santon 		kd->kd_mode = KCOV_MODE_NONE;
395ece33e2fSanton 		if (kd->kd_kr != NULL)
396ece33e2fSanton 			kr_barrier(kd->kd_kr);
3979f082b73Santon 		p->p_kd = NULL;
398af589a78Santon 		break;
3998430bc4bSanton 	case KIOREMOTEATTACH:
4008430bc4bSanton 		error = kcov_remote_attach(kd,
4018430bc4bSanton 		    (struct kio_remote_attach *)data);
4028430bc4bSanton 		break;
403af589a78Santon 	default:
404974c3abeSanton 		error = ENOTTY;
405af589a78Santon 	}
406ece33e2fSanton 	mtx_leave(&kcov_mtx);
407af589a78Santon 
408af589a78Santon 	return (error);
409af589a78Santon }
410af589a78Santon 
411af589a78Santon paddr_t
412af589a78Santon kcovmmap(dev_t dev, off_t offset, int prot)
413af589a78Santon {
414222039efSanton 	struct kcov_dev *kd;
415ece33e2fSanton 	paddr_t pa = -1;
416af589a78Santon 	vaddr_t va;
417af589a78Santon 
418ece33e2fSanton 	mtx_enter(&kcov_mtx);
419ece33e2fSanton 
420af589a78Santon 	kd = kd_lookup(minor(dev));
421173b8731Santon 	if (kd == NULL)
422ece33e2fSanton 		goto out;
423af589a78Santon 
42421f0e712Santon 	if (offset < 0 || offset >= kd->kd_nmemb * KCOV_BUF_MEMB_SIZE)
425ece33e2fSanton 		goto out;
426af589a78Santon 
427af589a78Santon 	va = (vaddr_t)kd->kd_buf + offset;
428af589a78Santon 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
429ece33e2fSanton 		pa = -1;
430ece33e2fSanton 
431ece33e2fSanton out:
432ece33e2fSanton 	mtx_leave(&kcov_mtx);
433af589a78Santon 	return (pa);
434af589a78Santon }
435af589a78Santon 
436af589a78Santon void
437af589a78Santon kcov_exit(struct proc *p)
438af589a78Santon {
439222039efSanton 	struct kcov_dev *kd;
440af589a78Santon 
441ece33e2fSanton 	mtx_enter(&kcov_mtx);
442ece33e2fSanton 
4439f082b73Santon 	kd = p->p_kd;
444ece33e2fSanton 	if (kd == NULL) {
445ece33e2fSanton 		mtx_leave(&kcov_mtx);
446af589a78Santon 		return;
447ece33e2fSanton 	}
448af589a78Santon 
449070323f8Santon 	if (kd->kd_state == KCOV_STATE_DYING) {
450c6fb730aSanton 		p->p_kd = NULL;
4519f082b73Santon 		kd_free(kd);
452070323f8Santon 	} else {
453070323f8Santon 		kd->kd_state = KCOV_STATE_READY;
454070323f8Santon 		kd->kd_mode = KCOV_MODE_NONE;
455ece33e2fSanton 		if (kd->kd_kr != NULL)
456ece33e2fSanton 			kr_barrier(kd->kd_kr);
4579f082b73Santon 		p->p_kd = NULL;
458c6fb730aSanton 	}
459ece33e2fSanton 
460ece33e2fSanton 	mtx_leave(&kcov_mtx);
461af589a78Santon }
462af589a78Santon 
463982627fcSanton /*
464982627fcSanton  * Returns non-zero if the given vnode refers to a kcov device.
465982627fcSanton  */
466982627fcSanton int
467982627fcSanton kcov_vnode(struct vnode *vp)
468982627fcSanton {
469982627fcSanton 	return (vp->v_type == VCHR &&
470982627fcSanton 	    cdevsw[major(vp->v_rdev)].d_open == kcovopen);
471982627fcSanton }
472982627fcSanton 
473222039efSanton struct kcov_dev *
474af589a78Santon kd_lookup(int unit)
475af589a78Santon {
476222039efSanton 	struct kcov_dev *kd;
477af589a78Santon 
478ece33e2fSanton 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
479ece33e2fSanton 
480af589a78Santon 	TAILQ_FOREACH(kd, &kd_list, kd_entry) {
481af589a78Santon 		if (kd->kd_unit == unit)
482af589a78Santon 			return (kd);
483af589a78Santon 	}
484af589a78Santon 	return (NULL);
485af589a78Santon }
486af589a78Santon 
48784cad3c2Santon void
488e882eb89Santon kd_copy(struct kcov_dev *dst, struct kcov_dev *src)
48984cad3c2Santon {
49084cad3c2Santon 	uint64_t idx, nmemb;
49184cad3c2Santon 	int stride;
49284cad3c2Santon 
49384cad3c2Santon 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
49484cad3c2Santon 	KASSERT(dst->kd_mode == src->kd_mode);
49584cad3c2Santon 
49684cad3c2Santon 	nmemb = src->kd_buf[0];
49784cad3c2Santon 	if (nmemb == 0)
49884cad3c2Santon 		return;
49984cad3c2Santon 	stride = src->kd_mode == KCOV_MODE_TRACE_CMP ? KCOV_STRIDE_TRACE_CMP :
50084cad3c2Santon 	    KCOV_STRIDE_TRACE_PC;
50184cad3c2Santon 	idx = kd_claim(dst, stride, nmemb);
50284cad3c2Santon 	if (idx == 0)
50384cad3c2Santon 		return;
50484cad3c2Santon 	memcpy(&dst->kd_buf[idx], &src->kd_buf[1],
50584cad3c2Santon 	    stride * nmemb * KCOV_BUF_MEMB_SIZE);
50684cad3c2Santon }
50784cad3c2Santon 
508af589a78Santon int
5098180eaf2Santon kd_init(struct kcov_dev *kd, unsigned long nmemb)
510af589a78Santon {
5118180eaf2Santon 	void *buf;
512af589a78Santon 	size_t size;
513ece33e2fSanton 	int error;
514af589a78Santon 
515af589a78Santon 	KASSERT(kd->kd_buf == NULL);
516af589a78Santon 
517070323f8Santon 	if (kd->kd_state != KCOV_STATE_NONE)
5188180eaf2Santon 		return (EBUSY);
5198180eaf2Santon 
520af589a78Santon 	if (nmemb == 0 || nmemb > KCOV_BUF_MAX_NMEMB)
521af589a78Santon 		return (EINVAL);
522af589a78Santon 
52321f0e712Santon 	size = roundup(nmemb * KCOV_BUF_MEMB_SIZE, PAGE_SIZE);
524ece33e2fSanton 	mtx_leave(&kcov_mtx);
52521f0e712Santon 	buf = km_alloc(size, &kv_any, &kp_zero, &kd_waitok);
526ece33e2fSanton 	if (buf == NULL) {
527ece33e2fSanton 		error = ENOMEM;
528ece33e2fSanton 		goto err;
529ece33e2fSanton 	}
53021f0e712Santon 	/* km_malloc() can sleep, ensure the race was won. */
531070323f8Santon 	if (kd->kd_state != KCOV_STATE_NONE) {
532ece33e2fSanton 		error = EBUSY;
533ece33e2fSanton 		goto err;
5348180eaf2Santon 	}
535ece33e2fSanton 	mtx_enter(&kcov_mtx);
5368180eaf2Santon 	kd->kd_buf = buf;
537af589a78Santon 	/* The first element is reserved to hold the number of used elements. */
538af589a78Santon 	kd->kd_nmemb = nmemb - 1;
539af589a78Santon 	kd->kd_size = size;
540070323f8Santon 	kd->kd_state = KCOV_STATE_READY;
541af589a78Santon 	return (0);
542ece33e2fSanton 
543ece33e2fSanton err:
544ece33e2fSanton 	if (buf != NULL)
545ece33e2fSanton 		km_free(buf, size, &kv_any, &kp_zero);
546ece33e2fSanton 	mtx_enter(&kcov_mtx);
547ece33e2fSanton 	return (error);
548af589a78Santon }
549af589a78Santon 
5509f082b73Santon void
551222039efSanton kd_free(struct kcov_dev *kd)
552af589a78Santon {
5538430bc4bSanton 	struct kcov_remote *kr;
5548430bc4bSanton 
555ece33e2fSanton 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
556ece33e2fSanton 
5578430bc4bSanton 	kr = kd->kd_kr;
5588430bc4bSanton 	if (kr != NULL)
5598430bc4bSanton 		kcov_remote_detach(kd, kr);
5608430bc4bSanton 
561ece33e2fSanton 	if (kd->kd_buf != NULL) {
562ece33e2fSanton 		mtx_leave(&kcov_mtx);
56321f0e712Santon 		km_free(kd->kd_buf, kd->kd_size, &kv_any, &kp_zero);
564ece33e2fSanton 		mtx_enter(&kcov_mtx);
565ece33e2fSanton 	}
566222039efSanton 	free(kd, M_SUBPROC, sizeof(*kd));
567af589a78Santon }
568af589a78Santon 
569865e2ca8Santon static struct kcov_dev *
570865e2ca8Santon kd_curproc(int mode)
571af589a78Santon {
572aec2f735Santon 	struct cpu_info *ci;
573865e2ca8Santon 	struct kcov_dev *kd;
574865e2ca8Santon 
575865e2ca8Santon 	/*
576865e2ca8Santon 	 * Do not trace before kcovopen() has been called at least once.
577865e2ca8Santon 	 * At this point, all secondary CPUs have booted and accessing curcpu()
578865e2ca8Santon 	 * is safe.
579865e2ca8Santon 	 */
580865e2ca8Santon 	if (__predict_false(kcov_cold))
581865e2ca8Santon 		return (NULL);
582865e2ca8Santon 
583aec2f735Santon 	ci = curcpu();
584aec2f735Santon 	kd = ci->ci_curproc->p_kd;
585865e2ca8Santon 	if (__predict_true(kd == NULL) || kd->kd_mode != mode)
586865e2ca8Santon 		return (NULL);
587191a7626Santon 
588191a7626Santon 	/*
589191a7626Santon 	 * Do not trace if the kernel has panicked. This could happen if curproc
590191a7626Santon 	 * had kcov enabled while panicking.
591191a7626Santon 	 */
592191a7626Santon 	if (__predict_false(panicstr || db_active))
593191a7626Santon 		return (NULL);
594191a7626Santon 
595191a7626Santon 	/* Do not trace in interrupt context unless this is a remote section. */
596aec2f735Santon 	if (inintr(ci) && kd->kd_intr == 0)
59784cad3c2Santon 		return (NULL);
598191a7626Santon 
599865e2ca8Santon 	return (kd);
600865e2ca8Santon 
601af589a78Santon }
602a187908aSanton 
60384cad3c2Santon static struct kcov_cpu *
60484cad3c2Santon kd_curcpu(void)
60584cad3c2Santon {
60684cad3c2Santon 	struct kcov_cpu *kc;
60784cad3c2Santon 	unsigned int cpuid = cpu_number();
60884cad3c2Santon 
60984cad3c2Santon 	TAILQ_FOREACH(kc, &kc_list, kc_entry) {
61084cad3c2Santon 		if (kc->kc_cpuid == cpuid)
61184cad3c2Santon 			return (kc);
61284cad3c2Santon 	}
61384cad3c2Santon 	return (NULL);
61484cad3c2Santon }
61584cad3c2Santon 
616a187908aSanton /*
617f8487ce0Santon  * Claim stride times nmemb number of elements in the coverage buffer. Returns
618f8487ce0Santon  * the index of the first claimed element. If the claim cannot be fulfilled,
619f8487ce0Santon  * zero is returned.
620a187908aSanton  */
621a187908aSanton static uint64_t
622f8487ce0Santon kd_claim(struct kcov_dev *kd, int stride, int nmemb)
623a187908aSanton {
624a187908aSanton 	uint64_t idx, was;
625a187908aSanton 
626a187908aSanton 	idx = kd->kd_buf[0];
627a187908aSanton 	for (;;) {
628f8487ce0Santon 		if (stride * (idx + nmemb) > kd->kd_nmemb)
629a187908aSanton 			return (0);
630a187908aSanton 
631f8487ce0Santon 		was = atomic_cas_ulong(&kd->kd_buf[0], idx, idx + nmemb);
632a187908aSanton 		if (was == idx)
633a187908aSanton 			return (idx * stride + 1);
634a187908aSanton 		idx = was;
635a187908aSanton 	}
636a187908aSanton }
6378430bc4bSanton 
6388430bc4bSanton void
6398430bc4bSanton kcov_remote_enter(int subsystem, void *id)
6408430bc4bSanton {
641aec2f735Santon 	struct cpu_info *ci;
64284cad3c2Santon 	struct kcov_cpu *kc;
6438430bc4bSanton 	struct kcov_dev *kd;
6448430bc4bSanton 	struct kcov_remote *kr;
645fdb37709Santon 	struct proc *p;
6468430bc4bSanton 
64763a956acSanton 	mtx_enter(&kcov_mtx);
6488430bc4bSanton 	kr = kr_lookup(subsystem, id);
6498430bc4bSanton 	if (kr == NULL || kr->kr_state != KCOV_STATE_READY)
6508430bc4bSanton 		goto out;
6518430bc4bSanton 	kd = kr->kr_kd;
65284cad3c2Santon 	if (kd == NULL || kd->kd_state != KCOV_STATE_TRACE)
65384cad3c2Santon 		goto out;
654aec2f735Santon 	ci = curcpu();
655aec2f735Santon 	p = ci->ci_curproc;
656aec2f735Santon 	if (inintr(ci)) {
65784cad3c2Santon 		/*
65884cad3c2Santon 		 * XXX we only expect to be called from softclock interrupts at
65984cad3c2Santon 		 * this point.
66084cad3c2Santon 		 */
66184cad3c2Santon 		kc = kd_curcpu();
66284cad3c2Santon 		if (kc == NULL || kc->kc_kd.kd_intr == 1)
66384cad3c2Santon 			goto out;
66484cad3c2Santon 		kc->kc_kd.kd_state = KCOV_STATE_TRACE;
66584cad3c2Santon 		kc->kc_kd.kd_mode = kd->kd_mode;
66684cad3c2Santon 		kc->kc_kd.kd_intr = 1;
66784cad3c2Santon 		kc->kc_kd_save = p->p_kd;
66884cad3c2Santon 		kd = &kc->kc_kd;
66984cad3c2Santon 		/* Reset coverage buffer. */
67084cad3c2Santon 		kd->kd_buf[0] = 0;
67184cad3c2Santon 	} else {
672fdb37709Santon 		KASSERT(p->p_kd == NULL);
6738430bc4bSanton 	}
67484cad3c2Santon 	kr->kr_nsections++;
67584cad3c2Santon 	p->p_kd = kd;
67684cad3c2Santon 
6778430bc4bSanton out:
67863a956acSanton 	mtx_leave(&kcov_mtx);
6798430bc4bSanton }
6808430bc4bSanton 
6818430bc4bSanton void
6828430bc4bSanton kcov_remote_leave(int subsystem, void *id)
6838430bc4bSanton {
684aec2f735Santon 	struct cpu_info *ci;
68584cad3c2Santon 	struct kcov_cpu *kc;
6868430bc4bSanton 	struct kcov_remote *kr;
687fdb37709Santon 	struct proc *p;
6888430bc4bSanton 
68963a956acSanton 	mtx_enter(&kcov_mtx);
690aec2f735Santon 	ci = curcpu();
691aec2f735Santon 	p = ci->ci_curproc;
692fdb37709Santon 	if (p->p_kd == NULL)
6938430bc4bSanton 		goto out;
69477b41c92Santon 	kr = kr_lookup(subsystem, id);
69577b41c92Santon 	if (kr == NULL)
69677b41c92Santon 		goto out;
697aec2f735Santon 	if (inintr(ci)) {
69884cad3c2Santon 		kc = kd_curcpu();
69984cad3c2Santon 		if (kc == NULL || kc->kc_kd.kd_intr == 0)
70084cad3c2Santon 			goto out;
70184cad3c2Santon 
70284cad3c2Santon 		/*
70384cad3c2Santon 		 * Stop writing to the coverage buffer associated with this CPU
70484cad3c2Santon 		 * before copying its contents.
70584cad3c2Santon 		 */
70684cad3c2Santon 		p->p_kd = kc->kc_kd_save;
70784cad3c2Santon 		kc->kc_kd_save = NULL;
70884cad3c2Santon 
709e882eb89Santon 		kd_copy(kr->kr_kd, &kc->kc_kd);
71084cad3c2Santon 		kc->kc_kd.kd_state = KCOV_STATE_READY;
71184cad3c2Santon 		kc->kc_kd.kd_mode = KCOV_MODE_NONE;
71284cad3c2Santon 		kc->kc_kd.kd_intr = 0;
71384cad3c2Santon 	} else {
714fdb37709Santon 		KASSERT(p->p_kd == kr->kr_kd);
715fdb37709Santon 		p->p_kd = NULL;
71684cad3c2Santon 	}
717ece33e2fSanton 	if (--kr->kr_nsections == 0)
7188430bc4bSanton 		wakeup(kr);
7198430bc4bSanton out:
72063a956acSanton 	mtx_leave(&kcov_mtx);
7218430bc4bSanton }
7228430bc4bSanton 
7238430bc4bSanton void
7248430bc4bSanton kcov_remote_register(int subsystem, void *id)
7258430bc4bSanton {
72663a956acSanton 	mtx_enter(&kcov_mtx);
7278430bc4bSanton 	kcov_remote_register_locked(subsystem, id);
72863a956acSanton 	mtx_leave(&kcov_mtx);
7298430bc4bSanton }
7308430bc4bSanton 
7318430bc4bSanton void
7328430bc4bSanton kcov_remote_unregister(int subsystem, void *id)
7338430bc4bSanton {
7348430bc4bSanton 	struct kcov_remote *kr;
7358430bc4bSanton 
73663a956acSanton 	mtx_enter(&kcov_mtx);
7378430bc4bSanton 	kr = kr_lookup(subsystem, id);
7388430bc4bSanton 	if (kr != NULL)
7398430bc4bSanton 		kr_free(kr);
74063a956acSanton 	mtx_leave(&kcov_mtx);
7418430bc4bSanton }
7428430bc4bSanton 
7438430bc4bSanton struct kcov_remote *
7448430bc4bSanton kcov_remote_register_locked(int subsystem, void *id)
7458430bc4bSanton {
7468430bc4bSanton 	struct kcov_remote *kr, *tmp;
7478430bc4bSanton 
7488430bc4bSanton 	/* Do not allow registrations before the pool is initialized. */
7498430bc4bSanton 	KASSERT(kr_cold == 0);
7508430bc4bSanton 
7518430bc4bSanton 	/*
7528430bc4bSanton 	 * Temporarily release the mutex since the allocation could end up
7538430bc4bSanton 	 * sleeping.
7548430bc4bSanton 	 */
75563a956acSanton 	mtx_leave(&kcov_mtx);
7568430bc4bSanton 	kr = pool_get(&kr_pool, PR_WAITOK | PR_ZERO);
7578430bc4bSanton 	kr->kr_subsystem = subsystem;
7588430bc4bSanton 	kr->kr_id = id;
7598430bc4bSanton 	kr->kr_state = KCOV_STATE_NONE;
76063a956acSanton 	mtx_enter(&kcov_mtx);
7618430bc4bSanton 
7628430bc4bSanton 	for (;;) {
7638430bc4bSanton 		tmp = kr_lookup(subsystem, id);
7648430bc4bSanton 		if (tmp == NULL)
7658430bc4bSanton 			break;
7668430bc4bSanton 		if (tmp->kr_state != KCOV_STATE_DYING) {
7673d1fc20eSanton 			pool_put(&kr_pool, kr);
7688430bc4bSanton 			return (NULL);
7698430bc4bSanton 		}
7708430bc4bSanton 		/*
7718430bc4bSanton 		 * The remote could already be deregistered while another
7728430bc4bSanton 		 * thread is currently inside a kcov remote section.
7738430bc4bSanton 		 */
77463a956acSanton 		msleep_nsec(tmp, &kcov_mtx, PWAIT, "kcov", INFSLP);
7758430bc4bSanton 	}
7768430bc4bSanton 	TAILQ_INSERT_TAIL(&kr_list, kr, kr_entry);
7778430bc4bSanton 	return (kr);
7788430bc4bSanton }
7798430bc4bSanton 
7808430bc4bSanton int
7818430bc4bSanton kcov_remote_attach(struct kcov_dev *kd, struct kio_remote_attach *arg)
7828430bc4bSanton {
7838430bc4bSanton 	struct kcov_remote *kr = NULL;
784ece33e2fSanton 
785ece33e2fSanton 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
7868430bc4bSanton 
7878430bc4bSanton 	if (kd->kd_state != KCOV_STATE_READY)
788706a9991Santon 		return (ENXIO);
7898430bc4bSanton 
790706a9991Santon 	if (arg->subsystem == KCOV_REMOTE_COMMON) {
7918430bc4bSanton 		kr = kcov_remote_register_locked(KCOV_REMOTE_COMMON,
7928430bc4bSanton 		    curproc->p_p);
793ece33e2fSanton 		if (kr == NULL)
794706a9991Santon 			return (EBUSY);
795706a9991Santon 	} else {
796ece33e2fSanton 		return (EINVAL);
797706a9991Santon 	}
7988430bc4bSanton 
7998430bc4bSanton 	kr->kr_state = KCOV_STATE_READY;
8008430bc4bSanton 	kr->kr_kd = kd;
8018430bc4bSanton 	kd->kd_kr = kr;
802ece33e2fSanton 	return (0);
8038430bc4bSanton }
8048430bc4bSanton 
8058430bc4bSanton void
8068430bc4bSanton kcov_remote_detach(struct kcov_dev *kd, struct kcov_remote *kr)
8078430bc4bSanton {
808ece33e2fSanton 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
809ece33e2fSanton 
8108430bc4bSanton 	KASSERT(kd == kr->kr_kd);
8112fa3abddSanton 	if (kr->kr_subsystem == KCOV_REMOTE_COMMON) {
8122fa3abddSanton 		kr_free(kr);
8132fa3abddSanton 	} else {
8142fa3abddSanton 		kr->kr_state = KCOV_STATE_NONE;
815ece33e2fSanton 		kr_barrier(kr);
8168430bc4bSanton 		kd->kd_kr = NULL;
8178430bc4bSanton 		kr->kr_kd = NULL;
8182fa3abddSanton 	}
8198430bc4bSanton }
8208430bc4bSanton 
8218430bc4bSanton void
8228430bc4bSanton kr_free(struct kcov_remote *kr)
8238430bc4bSanton {
82463a956acSanton 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
8258430bc4bSanton 
8268430bc4bSanton 	kr->kr_state = KCOV_STATE_DYING;
827ece33e2fSanton 	kr_barrier(kr);
8288430bc4bSanton 	if (kr->kr_kd != NULL)
8298430bc4bSanton 		kr->kr_kd->kd_kr = NULL;
8308430bc4bSanton 	kr->kr_kd = NULL;
8318430bc4bSanton 	TAILQ_REMOVE(&kr_list, kr, kr_entry);
832925b9196Santon 	/* Notify thread(s) waiting in kcov_remote_register(). */
8331fbdca8fSanton 	wakeup(kr);
8348430bc4bSanton 	pool_put(&kr_pool, kr);
8358430bc4bSanton }
8368430bc4bSanton 
837ece33e2fSanton void
838ece33e2fSanton kr_barrier(struct kcov_remote *kr)
839ece33e2fSanton {
840ece33e2fSanton 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
841ece33e2fSanton 
842ece33e2fSanton 	while (kr->kr_nsections > 0)
8434d0124bdSanton 		msleep_nsec(kr, &kcov_mtx, PWAIT, "kcovbar", INFSLP);
844ece33e2fSanton }
845ece33e2fSanton 
8468430bc4bSanton struct kcov_remote *
8478430bc4bSanton kr_lookup(int subsystem, void *id)
8488430bc4bSanton {
8498430bc4bSanton 	struct kcov_remote *kr;
8508430bc4bSanton 
85163a956acSanton 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
8528430bc4bSanton 
8538430bc4bSanton 	TAILQ_FOREACH(kr, &kr_list, kr_entry) {
8548430bc4bSanton 		if (kr->kr_subsystem == subsystem && kr->kr_id == id)
8558430bc4bSanton 			return (kr);
8568430bc4bSanton 	}
8578430bc4bSanton 	return (NULL);
8588430bc4bSanton }
859