xref: /openbsd-src/sys/dev/kcov.c (revision c90a81c56dcebd6a1b73fe4aff9b03385b8e63b3)
1 /*	$OpenBSD: kcov.c,v 1.10 2019/01/16 19:27:07 anton Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/kcov.h>
23 #include <sys/malloc.h>
24 #include <sys/stdint.h>
25 #include <sys/queue.h>
26 
27 #include <uvm/uvm_extern.h>
28 
29 /* #define KCOV_DEBUG */
30 #ifdef KCOV_DEBUG
31 #define DPRINTF(x...) do { if (kcov_debug) printf(x); } while (0)
32 #else
33 #define DPRINTF(x...)
34 #endif
35 
36 struct kcov_dev {
37 	enum {
38 		KCOV_STATE_NONE,
39 		KCOV_STATE_READY,
40 		KCOV_STATE_TRACE,
41 		KCOV_STATE_DYING,
42 	}		 kd_state;
43 	int		 kd_mode;
44 	int		 kd_unit;	/* device minor */
45 	uintptr_t	*kd_buf;	/* traced coverage */
46 	size_t		 kd_nmemb;
47 	size_t		 kd_size;
48 
49 	TAILQ_ENTRY(kcov_dev)	kd_entry;
50 };
51 
52 void kcovattach(int);
53 
54 int kd_init(struct kcov_dev *, unsigned long);
55 void kd_free(struct kcov_dev *);
56 struct kcov_dev *kd_lookup(int);
57 
58 static inline int inintr(void);
59 
60 TAILQ_HEAD(, kcov_dev) kd_list = TAILQ_HEAD_INITIALIZER(kd_list);
61 
62 int kcov_cold = 1;
63 
64 #ifdef KCOV_DEBUG
65 int kcov_debug = 1;
66 #endif
67 
68 /*
69  * Compiling the kernel with the `-fsanitize-coverage=trace-pc' option will
70  * cause the following function to be called upon function entry and before
71  * each block instructions that maps to a single line in the original source
72  * code.
73  *
74  * If kcov is enabled for the current thread, the kernel program counter will
75  * be stored in its corresponding coverage buffer.
76  * The first element in the coverage buffer holds the index of next available
77  * element.
78  */
79 void
80 __sanitizer_cov_trace_pc(void)
81 {
82 	struct kcov_dev *kd;
83 	uint64_t idx;
84 
85 	/*
86 	 * Do not trace before kcovopen() has been called at least once.
87 	 * At this point, all secondary CPUs have booted and accessing curcpu()
88 	 * is safe.
89 	 */
90 	if (kcov_cold)
91 		return;
92 
93 	/* Do not trace in interrupts to prevent noisy coverage. */
94 	if (inintr())
95 		return;
96 
97 	kd = curproc->p_kd;
98 	if (kd == NULL || kd->kd_mode != KCOV_MODE_TRACE_PC)
99 		return;
100 
101 	idx = kd->kd_buf[0];
102 	if (idx < kd->kd_nmemb) {
103 		kd->kd_buf[idx + 1] = (uintptr_t)__builtin_return_address(0);
104 		kd->kd_buf[0] = idx + 1;
105 	}
106 }
107 
108 void
109 kcovattach(int count)
110 {
111 }
112 
113 int
114 kcovopen(dev_t dev, int flag, int mode, struct proc *p)
115 {
116 	struct kcov_dev *kd;
117 
118 	if (kd_lookup(minor(dev)) != NULL)
119 		return (EBUSY);
120 
121 	if (kcov_cold)
122 		kcov_cold = 0;
123 
124 	DPRINTF("%s: unit=%d\n", __func__, minor(dev));
125 
126 	kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO);
127 	kd->kd_unit = minor(dev);
128 	TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry);
129 	return (0);
130 }
131 
132 int
133 kcovclose(dev_t dev, int flag, int mode, struct proc *p)
134 {
135 	struct kcov_dev *kd;
136 
137 	kd = kd_lookup(minor(dev));
138 	if (kd == NULL)
139 		return (EINVAL);
140 
141 	DPRINTF("%s: unit=%d, state=%d, mode=%d\n",
142 	    __func__, kd->kd_unit, kd->kd_state, kd->kd_mode);
143 
144 	if (kd->kd_state == KCOV_STATE_TRACE) {
145 		kd->kd_state = KCOV_STATE_DYING;
146 		kd->kd_mode = KCOV_MODE_NONE;
147 	} else {
148 		kd_free(kd);
149 	}
150 
151 	return (0);
152 }
153 
154 int
155 kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
156 {
157 	struct kcov_dev *kd;
158 	int mode;
159 	int error = 0;
160 
161 	kd = kd_lookup(minor(dev));
162 	if (kd == NULL)
163 		return (ENXIO);
164 
165 	switch (cmd) {
166 	case KIOSETBUFSIZE:
167 		error = kd_init(kd, *((unsigned long *)data));
168 		break;
169 	case KIOENABLE:
170 		/* Only one kcov descriptor can be enabled per thread. */
171 		if (p->p_kd != NULL || kd->kd_state != KCOV_STATE_READY) {
172 			error = EBUSY;
173 			break;
174 		}
175 		mode = *((int *)data);
176 		if (mode != KCOV_MODE_TRACE_PC) {
177 			error = EINVAL;
178 			break;
179 		}
180 		kd->kd_state = KCOV_STATE_TRACE;
181 		kd->kd_mode = mode;
182 		p->p_kd = kd;
183 		break;
184 	case KIODISABLE:
185 		/* Only the enabled thread may disable itself. */
186 		if (p->p_kd != kd || kd->kd_state != KCOV_STATE_TRACE) {
187 			error = EBUSY;
188 			break;
189 		}
190 		kd->kd_state = KCOV_STATE_READY;
191 		kd->kd_mode = KCOV_MODE_NONE;
192 		p->p_kd = NULL;
193 		break;
194 	default:
195 		error = ENOTTY;
196 	}
197 
198 	DPRINTF("%s: unit=%d, state=%d, mode=%d, error=%d\n",
199 	    __func__, kd->kd_unit, kd->kd_state, kd->kd_mode, error);
200 
201 	return (error);
202 }
203 
204 paddr_t
205 kcovmmap(dev_t dev, off_t offset, int prot)
206 {
207 	struct kcov_dev *kd;
208 	paddr_t pa;
209 	vaddr_t va;
210 
211 	kd = kd_lookup(minor(dev));
212 	if (kd == NULL || kd->kd_state != KCOV_STATE_READY)
213 		return (paddr_t)(-1);
214 
215 	if (offset < 0 || offset >= kd->kd_nmemb * sizeof(uintptr_t))
216 		return (paddr_t)(-1);
217 
218 	va = (vaddr_t)kd->kd_buf + offset;
219 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
220 		return (paddr_t)(-1);
221 	return (pa);
222 }
223 
224 void
225 kcov_exit(struct proc *p)
226 {
227 	struct kcov_dev *kd;
228 
229 	kd = p->p_kd;
230 	if (kd == NULL)
231 		return;
232 
233 	DPRINTF("%s: unit=%d, state=%d, mode=%d\n",
234 	    __func__, kd->kd_unit, kd->kd_state, kd->kd_mode);
235 
236 	if (kd->kd_state == KCOV_STATE_DYING) {
237 		kd_free(kd);
238 	} else {
239 		kd->kd_state = KCOV_STATE_READY;
240 		kd->kd_mode = KCOV_MODE_NONE;
241 	}
242 	p->p_kd = NULL;
243 }
244 
245 struct kcov_dev *
246 kd_lookup(int unit)
247 {
248 	struct kcov_dev *kd;
249 
250 	TAILQ_FOREACH(kd, &kd_list, kd_entry) {
251 		if (kd->kd_unit == unit)
252 			return (kd);
253 	}
254 	return (NULL);
255 }
256 
257 int
258 kd_init(struct kcov_dev *kd, unsigned long nmemb)
259 {
260 	void *buf;
261 	size_t size;
262 
263 	KASSERT(kd->kd_buf == NULL);
264 
265 	if (kd->kd_state != KCOV_STATE_NONE)
266 		return (EBUSY);
267 
268 	if (nmemb == 0 || nmemb > KCOV_BUF_MAX_NMEMB)
269 		return (EINVAL);
270 
271 	size = roundup(nmemb * sizeof(uintptr_t), PAGE_SIZE);
272 	buf = malloc(size, M_SUBPROC, M_WAITOK | M_ZERO);
273 	/* malloc() can sleep, ensure the race was won. */
274 	if (kd->kd_state != KCOV_STATE_NONE) {
275 		free(buf, M_SUBPROC, size);
276 		return (EBUSY);
277 	}
278 	kd->kd_buf = buf;
279 	/* The first element is reserved to hold the number of used elements. */
280 	kd->kd_nmemb = nmemb - 1;
281 	kd->kd_size = size;
282 	kd->kd_state = KCOV_STATE_READY;
283 	return (0);
284 }
285 
286 void
287 kd_free(struct kcov_dev *kd)
288 {
289 	DPRINTF("%s: unit=%d, state=%d, mode=%d\n",
290 	    __func__, kd->kd_unit, kd->kd_state, kd->kd_mode);
291 
292 	TAILQ_REMOVE(&kd_list, kd, kd_entry);
293 	free(kd->kd_buf, M_SUBPROC, kd->kd_size);
294 	free(kd, M_SUBPROC, sizeof(*kd));
295 }
296 
297 static inline int
298 inintr(void)
299 {
300 #if defined(__amd64__) || defined(__i386__)
301 	return (curcpu()->ci_idepth > 0);
302 #else
303 	return (0);
304 #endif
305 }
306