xref: /openbsd-src/sys/dev/kcov.c (revision d1df930ffab53da22f3324c32bed7ac5709915e6)
1 /*	$OpenBSD: kcov.c,v 1.4 2018/08/27 15:57:39 anton Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/kcov.h>
23 #include <sys/malloc.h>
24 #include <sys/stdint.h>
25 #include <sys/queue.h>
26 
27 #include <uvm/uvm_extern.h>
28 
29 /* #define KCOV_DEBUG */
30 #ifdef KCOV_DEBUG
31 #define DPRINTF(x...) do { if (kcov_debug) printf(x); } while (0)
32 #else
33 #define DPRINTF(x...)
34 #endif
35 
36 struct kcov_dev {
37 	enum {
38 		KCOV_MODE_DISABLED,
39 		KCOV_MODE_INIT,
40 		KCOV_MODE_TRACE_PC,
41 		KCOV_MODE_DYING,
42 	}		 kd_mode;
43 	int		 kd_unit;	/* device minor */
44 	uintptr_t	*kd_buf;	/* traced coverage */
45 	size_t		 kd_nmemb;
46 	size_t		 kd_size;
47 
48 	TAILQ_ENTRY(kcov_dev)	kd_entry;
49 };
50 
51 void kcovattach(int);
52 
53 int kd_alloc(struct kcov_dev *, unsigned long);
54 void kd_free(struct kcov_dev *);
55 struct kcov_dev *kd_lookup(int);
56 
57 static inline int inintr(void);
58 
59 TAILQ_HEAD(, kcov_dev) kd_list = TAILQ_HEAD_INITIALIZER(kd_list);
60 
61 #ifdef KCOV_DEBUG
62 int kcov_debug = 1;
63 #endif
64 
65 /*
66  * Compiling the kernel with the `-fsanitize-coverage=trace-pc' option will
67  * cause the following function to be called upon function entry and before
68  * each block instructions that maps to a single line in the original source
69  * code.
70  *
71  * If kcov is enabled for the current thread, the kernel program counter will
72  * be stored in its corresponding coverage buffer.
73  * The first element in the coverage buffer holds the index of next available
74  * element.
75  */
76 void
77 __sanitizer_cov_trace_pc(void)
78 {
79 	extern int cold;
80 	struct kcov_dev *kd;
81 	uint64_t idx;
82 
83 	/* Do not trace during boot. */
84 	if (cold)
85 		return;
86 
87 	/* Do not trace in interrupts to prevent noisy coverage. */
88 	if (inintr())
89 		return;
90 
91 	kd = curproc->p_kd;
92 	if (kd == NULL || kd->kd_mode != KCOV_MODE_TRACE_PC)
93 		return;
94 
95 	idx = kd->kd_buf[0];
96 	if (idx < kd->kd_nmemb) {
97 		kd->kd_buf[idx + 1] = (uintptr_t)__builtin_return_address(0);
98 		kd->kd_buf[0] = idx + 1;
99 	}
100 }
101 
102 void
103 kcovattach(int count)
104 {
105 }
106 
107 int
108 kcovopen(dev_t dev, int flag, int mode, struct proc *p)
109 {
110 	struct kcov_dev *kd;
111 
112 	if (kd_lookup(minor(dev)) != NULL)
113 		return (EBUSY);
114 
115 	DPRINTF("%s: unit=%d\n", __func__, minor(dev));
116 
117 	kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO);
118 	kd->kd_unit = minor(dev);
119 	TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry);
120 	return (0);
121 }
122 
123 int
124 kcovclose(dev_t dev, int flag, int mode, struct proc *p)
125 {
126 	struct kcov_dev *kd;
127 
128 	kd = kd_lookup(minor(dev));
129 	if (kd == NULL)
130 		return (EINVAL);
131 
132 	DPRINTF("%s: unit=%d\n", __func__, minor(dev));
133 
134 	if (kd->kd_mode == KCOV_MODE_TRACE_PC)
135 		kd->kd_mode = KCOV_MODE_DYING;
136 	else
137 		kd_free(kd);
138 
139 	return (0);
140 }
141 
142 int
143 kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
144 {
145 	struct kcov_dev *kd;
146 	int error = 0;
147 
148 	kd = kd_lookup(minor(dev));
149 	if (kd == NULL)
150 		return (ENXIO);
151 
152 	switch (cmd) {
153 	case KIOSETBUFSIZE:
154 		if (kd->kd_mode != KCOV_MODE_DISABLED) {
155 			error = EBUSY;
156 			break;
157 		}
158 		error = kd_alloc(kd, *((unsigned long *)data));
159 		if (error == 0)
160 			kd->kd_mode = KCOV_MODE_INIT;
161 		break;
162 	case KIOENABLE:
163 		/* Only one kcov descriptor can be enabled per thread. */
164 		if (p->p_kd != NULL || kd->kd_mode != KCOV_MODE_INIT) {
165 			error = EBUSY;
166 			break;
167 		}
168 		kd->kd_mode = KCOV_MODE_TRACE_PC;
169 		p->p_kd = kd;
170 		break;
171 	case KIODISABLE:
172 		/* Only the enabled thread may disable itself. */
173 		if (p->p_kd != kd || kd->kd_mode != KCOV_MODE_TRACE_PC) {
174 			error = EBUSY;
175 			break;
176 		}
177 		kd->kd_mode = KCOV_MODE_INIT;
178 		p->p_kd = NULL;
179 		break;
180 	default:
181 		error = EINVAL;
182 		DPRINTF("%s: %lu: unknown command\n", __func__, cmd);
183 	}
184 
185 	DPRINTF("%s: unit=%d, mode=%d, error=%d\n",
186 	    __func__, kd->kd_unit, kd->kd_mode, error);
187 
188 	return (error);
189 }
190 
191 paddr_t
192 kcovmmap(dev_t dev, off_t offset, int prot)
193 {
194 	struct kcov_dev *kd;
195 	paddr_t pa;
196 	vaddr_t va;
197 
198 	kd = kd_lookup(minor(dev));
199 	if (kd == NULL)
200 		return (paddr_t)(-1);
201 
202 	if (offset < 0 || offset >= kd->kd_nmemb * sizeof(uintptr_t))
203 		return (paddr_t)(-1);
204 
205 	va = (vaddr_t)kd->kd_buf + offset;
206 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
207 		return (paddr_t)(-1);
208 	return (pa);
209 }
210 
211 void
212 kcov_exit(struct proc *p)
213 {
214 	struct kcov_dev *kd;
215 
216 	kd = p->p_kd;
217 	if (kd == NULL)
218 		return;
219 
220 	DPRINTF("%s: unit=%d\n", __func__, kd->kd_unit);
221 
222 	if (kd->kd_mode == KCOV_MODE_DYING)
223 		kd_free(kd);
224 	else
225 		kd->kd_mode = KCOV_MODE_INIT;
226 	p->p_kd = NULL;
227 }
228 
229 struct kcov_dev *
230 kd_lookup(int unit)
231 {
232 	struct kcov_dev *kd;
233 
234 	TAILQ_FOREACH(kd, &kd_list, kd_entry) {
235 		if (kd->kd_unit == unit)
236 			return (kd);
237 	}
238 	return (NULL);
239 }
240 
241 int
242 kd_alloc(struct kcov_dev *kd, unsigned long nmemb)
243 {
244 	size_t size;
245 
246 	KASSERT(kd->kd_buf == NULL);
247 
248 	if (nmemb == 0 || nmemb > KCOV_BUF_MAX_NMEMB)
249 		return (EINVAL);
250 
251 	size = roundup(nmemb * sizeof(uintptr_t), PAGE_SIZE);
252 	kd->kd_buf = malloc(size, M_SUBPROC, M_WAITOK | M_ZERO);
253 	/* The first element is reserved to hold the number of used elements. */
254 	kd->kd_nmemb = nmemb - 1;
255 	kd->kd_size = size;
256 	return (0);
257 }
258 
259 void
260 kd_free(struct kcov_dev *kd)
261 {
262 	DPRINTF("%s: unit=%d mode=%d\n", __func__, kd->kd_unit, kd->kd_mode);
263 
264 	TAILQ_REMOVE(&kd_list, kd, kd_entry);
265 	free(kd->kd_buf, M_SUBPROC, kd->kd_size);
266 	free(kd, M_SUBPROC, sizeof(*kd));
267 }
268 
269 static inline int
270 inintr(void)
271 {
272 #if defined(__amd64__) || defined(__i386__)
273 	return (curcpu()->ci_idepth > 0);
274 #else
275 	return (0);
276 #endif
277 }
278