xref: /openbsd-src/sys/dev/kcov.c (revision 505ee9ea3b177e2387d907a91ca7da069f3f14d8)
1 /*	$OpenBSD: kcov.c,v 1.20 2020/06/07 19:23:33 anton Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/kcov.h>
23 #include <sys/malloc.h>
24 #include <sys/stdint.h>
25 #include <sys/queue.h>
26 
27 #include <uvm/uvm_extern.h>
28 
29 #define KCOV_BUF_MEMB_SIZE	sizeof(uintptr_t)
30 
31 #define KCOV_CMP_CONST		0x1
32 #define KCOV_CMP_SIZE(x)	((x) << 1)
33 
34 #define KCOV_STATE_NONE		0
35 #define KCOV_STATE_READY	1
36 #define KCOV_STATE_TRACE	2
37 #define KCOV_STATE_DYING	3
38 
39 struct kcov_dev {
40 	int		 kd_state;
41 	int		 kd_mode;
42 	int		 kd_unit;	/* device minor */
43 	uintptr_t	*kd_buf;	/* traced coverage */
44 	size_t		 kd_nmemb;
45 	size_t		 kd_size;
46 
47 	TAILQ_ENTRY(kcov_dev)	kd_entry;
48 };
49 
50 void kcovattach(int);
51 
52 int kd_init(struct kcov_dev *, unsigned long);
53 void kd_free(struct kcov_dev *);
54 struct kcov_dev *kd_lookup(int);
55 
56 static struct kcov_dev *kd_curproc(int);
57 
58 TAILQ_HEAD(, kcov_dev) kd_list = TAILQ_HEAD_INITIALIZER(kd_list);
59 
60 int kcov_cold = 1;
61 
62 /*
63  * Compiling the kernel with the `-fsanitize-coverage=trace-pc' option will
64  * cause the following function to be called upon function entry and before
65  * each block instructions that maps to a single line in the original source
66  * code.
67  *
68  * If kcov is enabled for the current thread, the kernel program counter will
69  * be stored in its corresponding coverage buffer.
70  * The first element in the coverage buffer holds the index of next available
71  * element.
72  */
73 void
74 __sanitizer_cov_trace_pc(void)
75 {
76 	struct kcov_dev *kd;
77 	uint64_t idx;
78 
79 	kd = kd_curproc(KCOV_MODE_TRACE_PC);
80 	if (kd == NULL)
81 		return;
82 
83 	idx = kd->kd_buf[0];
84 	if (idx + 1 <= kd->kd_nmemb) {
85 		kd->kd_buf[idx + 1] = (uintptr_t)__builtin_return_address(0);
86 		kd->kd_buf[0] = idx + 1;
87 	}
88 }
89 
90 /*
91  * Compiling the kernel with the `-fsanitize-coverage=trace-cmp' option will
92  * cause the following function to be called upon integer comparisons and switch
93  * statements.
94  *
95  * If kcov is enabled for the current thread, the comparison will be stored in
96  * its corresponding coverage buffer.
97  */
98 void
99 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, uintptr_t pc)
100 {
101 	struct kcov_dev *kd;
102 	uint64_t idx;
103 
104 	kd = kd_curproc(KCOV_MODE_TRACE_CMP);
105 	if (kd == NULL)
106 		return;
107 
108 	idx = kd->kd_buf[0];
109 	if (idx * 4 + 4 <= kd->kd_nmemb) {
110 		kd->kd_buf[idx * 4 + 1] = type;
111 		kd->kd_buf[idx * 4 + 2] = arg1;
112 		kd->kd_buf[idx * 4 + 3] = arg2;
113 		kd->kd_buf[idx * 4 + 4] = pc;
114 		kd->kd_buf[0] = idx + 1;
115 	}
116 }
117 
118 void
119 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
120 {
121 	trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
122 	    (uintptr_t)__builtin_return_address(0));
123 }
124 
125 void
126 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
127 {
128 	trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
129 	    (uintptr_t)__builtin_return_address(0));
130 }
131 
132 void
133 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
134 {
135 	trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
136 	    (uintptr_t)__builtin_return_address(0));
137 }
138 
139 void
140 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
141 {
142 	trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
143 	    (uintptr_t)__builtin_return_address(0));
144 }
145 
146 void
147 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
148 {
149 	trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
150 	    (uintptr_t)__builtin_return_address(0));
151 }
152 
153 void
154 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
155 {
156 	trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
157 	    (uintptr_t)__builtin_return_address(0));
158 }
159 
160 void
161 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
162 {
163 	trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
164 	    (uintptr_t)__builtin_return_address(0));
165 }
166 
167 void
168 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
169 {
170 	trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
171 	    (uintptr_t)__builtin_return_address(0));
172 }
173 
174 void
175 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
176 {
177 	uint64_t i, nbits, ncases, type;
178 	uintptr_t pc;
179 
180 	pc = (uintptr_t)__builtin_return_address(0);
181 	ncases = cases[0];
182 	nbits = cases[1];
183 
184 	switch (nbits) {
185 	case 8:
186 		type = KCOV_CMP_SIZE(0);
187 		break;
188 	case 16:
189 		type = KCOV_CMP_SIZE(1);
190 		break;
191 	case 32:
192 		type = KCOV_CMP_SIZE(2);
193 		break;
194 	case 64:
195 		type = KCOV_CMP_SIZE(3);
196 		break;
197 	default:
198 		return;
199 	}
200 	type |= KCOV_CMP_CONST;
201 
202 	for (i = 0; i < ncases; i++)
203 		trace_cmp(type, cases[i + 2], val, pc);
204 }
205 
206 void
207 kcovattach(int count)
208 {
209 }
210 
211 int
212 kcovopen(dev_t dev, int flag, int mode, struct proc *p)
213 {
214 	struct kcov_dev *kd;
215 
216 	if (kd_lookup(minor(dev)) != NULL)
217 		return (EBUSY);
218 
219 	if (kcov_cold)
220 		kcov_cold = 0;
221 
222 	kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO);
223 	kd->kd_unit = minor(dev);
224 	TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry);
225 	return (0);
226 }
227 
228 int
229 kcovclose(dev_t dev, int flag, int mode, struct proc *p)
230 {
231 	struct kcov_dev *kd;
232 
233 	kd = kd_lookup(minor(dev));
234 	if (kd == NULL)
235 		return (EINVAL);
236 
237 	if (kd->kd_state == KCOV_STATE_TRACE) {
238 		/*
239 		 * Another thread is currently using the kcov descriptor,
240 		 * postpone freeing to kcov_exit().
241 		 */
242 		kd->kd_state = KCOV_STATE_DYING;
243 		kd->kd_mode = KCOV_MODE_NONE;
244 	} else {
245 		kd_free(kd);
246 	}
247 
248 	return (0);
249 }
250 
251 int
252 kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
253 {
254 	struct kcov_dev *kd;
255 	int mode;
256 	int error = 0;
257 
258 	kd = kd_lookup(minor(dev));
259 	if (kd == NULL)
260 		return (ENXIO);
261 
262 	switch (cmd) {
263 	case KIOSETBUFSIZE:
264 		error = kd_init(kd, *((unsigned long *)data));
265 		break;
266 	case KIOENABLE:
267 		/* Only one kcov descriptor can be enabled per thread. */
268 		if (p->p_kd != NULL || kd->kd_state != KCOV_STATE_READY) {
269 			error = EBUSY;
270 			break;
271 		}
272 		mode = *((int *)data);
273 		if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
274 			error = EINVAL;
275 			break;
276 		}
277 		kd->kd_state = KCOV_STATE_TRACE;
278 		kd->kd_mode = mode;
279 		p->p_kd = kd;
280 		break;
281 	case KIODISABLE:
282 		/* Only the enabled thread may disable itself. */
283 		if (p->p_kd != kd || kd->kd_state != KCOV_STATE_TRACE) {
284 			error = EBUSY;
285 			break;
286 		}
287 		kd->kd_state = KCOV_STATE_READY;
288 		kd->kd_mode = KCOV_MODE_NONE;
289 		p->p_kd = NULL;
290 		break;
291 	default:
292 		error = ENOTTY;
293 	}
294 
295 	return (error);
296 }
297 
298 paddr_t
299 kcovmmap(dev_t dev, off_t offset, int prot)
300 {
301 	struct kcov_dev *kd;
302 	paddr_t pa;
303 	vaddr_t va;
304 
305 	kd = kd_lookup(minor(dev));
306 	if (kd == NULL)
307 		return (paddr_t)(-1);
308 
309 	if (offset < 0 || offset >= kd->kd_nmemb * KCOV_BUF_MEMB_SIZE)
310 		return (paddr_t)(-1);
311 
312 	va = (vaddr_t)kd->kd_buf + offset;
313 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
314 		return (paddr_t)(-1);
315 	return (pa);
316 }
317 
318 void
319 kcov_exit(struct proc *p)
320 {
321 	struct kcov_dev *kd;
322 
323 	kd = p->p_kd;
324 	if (kd == NULL)
325 		return;
326 
327 	if (kd->kd_state == KCOV_STATE_DYING) {
328 		kd_free(kd);
329 	} else {
330 		kd->kd_state = KCOV_STATE_READY;
331 		kd->kd_mode = KCOV_MODE_NONE;
332 	}
333 	p->p_kd = NULL;
334 }
335 
336 struct kcov_dev *
337 kd_lookup(int unit)
338 {
339 	struct kcov_dev *kd;
340 
341 	TAILQ_FOREACH(kd, &kd_list, kd_entry) {
342 		if (kd->kd_unit == unit)
343 			return (kd);
344 	}
345 	return (NULL);
346 }
347 
348 int
349 kd_init(struct kcov_dev *kd, unsigned long nmemb)
350 {
351 	void *buf;
352 	size_t size;
353 
354 	KASSERT(kd->kd_buf == NULL);
355 
356 	if (kd->kd_state != KCOV_STATE_NONE)
357 		return (EBUSY);
358 
359 	if (nmemb == 0 || nmemb > KCOV_BUF_MAX_NMEMB)
360 		return (EINVAL);
361 
362 	size = roundup(nmemb * KCOV_BUF_MEMB_SIZE, PAGE_SIZE);
363 	buf = km_alloc(size, &kv_any, &kp_zero, &kd_waitok);
364 	if (buf == NULL)
365 		return (ENOMEM);
366 	/* km_malloc() can sleep, ensure the race was won. */
367 	if (kd->kd_state != KCOV_STATE_NONE) {
368 		km_free(buf, size, &kv_any, &kp_zero);
369 		return (EBUSY);
370 	}
371 	kd->kd_buf = buf;
372 	/* The first element is reserved to hold the number of used elements. */
373 	kd->kd_nmemb = nmemb - 1;
374 	kd->kd_size = size;
375 	kd->kd_state = KCOV_STATE_READY;
376 	return (0);
377 }
378 
379 void
380 kd_free(struct kcov_dev *kd)
381 {
382 	TAILQ_REMOVE(&kd_list, kd, kd_entry);
383 	if (kd->kd_buf != NULL)
384 		km_free(kd->kd_buf, kd->kd_size, &kv_any, &kp_zero);
385 	free(kd, M_SUBPROC, sizeof(*kd));
386 }
387 
388 static struct kcov_dev *
389 kd_curproc(int mode)
390 {
391 	struct kcov_dev *kd;
392 
393 	/*
394 	 * Do not trace if the kernel has panicked. This could happen if curproc
395 	 * had kcov enabled while panicking.
396 	 */
397 	if (__predict_false(panicstr || db_active))
398 		return (NULL);
399 
400 	/*
401 	 * Do not trace before kcovopen() has been called at least once.
402 	 * At this point, all secondary CPUs have booted and accessing curcpu()
403 	 * is safe.
404 	 */
405 	if (__predict_false(kcov_cold))
406 		return (NULL);
407 
408 	/* Do not trace in interrupts to prevent noisy coverage. */
409 #if defined(__amd64__) || defined(__arm__) || defined(__arm64__) || \
410     defined(__i386__)
411 	if (curcpu()->ci_idepth > 0)
412 		return (NULL);
413 #endif
414 
415 	kd = curproc->p_kd;
416 	if (__predict_true(kd == NULL) || kd->kd_mode != mode)
417 		return (NULL);
418 	return (kd);
419 
420 }
421