xref: /netbsd-src/sys/kern/subr_fault.c (revision 82287798e3a287eff1a094fe5d38bbc45d5fcdfe)
1 /*	$NetBSD: subr_fault.c,v 1.2 2020/06/30 16:28:17 maxv Exp $	*/
2 
3 /*
4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: subr_fault.c,v 1.2 2020/06/30 16:28:17 maxv Exp $");
34 
35 #include <sys/module.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 
40 #include <sys/conf.h>
41 #include <sys/types.h>
42 #include <sys/specificdata.h>
43 #include <sys/kmem.h>
44 #include <sys/atomic.h>
45 #include <sys/ioccom.h>
46 #include <sys/lwp.h>
47 #include <sys/fault.h>
48 
49 typedef struct {
50 	volatile bool enabled;
51 	volatile bool oneshot;
52 	volatile unsigned long nth;
53 	volatile unsigned long cnt;
54 	volatile unsigned long nfaults;
55 } fault_t;
56 
57 static fault_t fault_global __cacheline_aligned = {
58 	.enabled = false,
59 	.oneshot = false,
60 	.nth = FAULT_NTH_MIN,
61 	.cnt = 0,
62 	.nfaults = 0
63 };
64 
65 static kmutex_t fault_global_lock __cacheline_aligned;
66 static specificdata_key_t fault_lwp_key;
67 
68 /* -------------------------------------------------------------------------- */
69 
70 bool
fault_inject(void)71 fault_inject(void)
72 {
73 	volatile unsigned long cnt;
74 	fault_t *f;
75 
76 	if (__predict_false(cold))
77 		return false;
78 
79 	if (__predict_false(atomic_load_acquire(&fault_global.enabled))) {
80 		f = &fault_global;
81 	} else {
82 		f = lwp_getspecific(fault_lwp_key);
83 		if (__predict_true(f == NULL))
84 			return false;
85 		if (__predict_false(!f->enabled))
86 			return false;
87 	}
88 
89 	if (atomic_load_relaxed(&f->oneshot)) {
90 		if (__predict_true(atomic_load_relaxed(&f->nfaults) > 0))
91 			return false;
92 	}
93 
94 	cnt = atomic_inc_ulong_nv(&f->cnt);
95 	if (__predict_false(cnt % atomic_load_relaxed(&f->nth) == 0)) {
96 		atomic_inc_ulong(&f->nfaults);
97 		return true;
98 	}
99 
100 	return false;
101 }
102 
103 /* -------------------------------------------------------------------------- */
104 
105 static int
fault_open(dev_t dev,int flag,int mode,struct lwp * l)106 fault_open(dev_t dev, int flag, int mode, struct lwp *l)
107 {
108 	return 0;
109 }
110 
111 static int
fault_close(dev_t dev,int flag,int mode,struct lwp * l)112 fault_close(dev_t dev, int flag, int mode, struct lwp *l)
113 {
114 	return 0;
115 }
116 
117 static int
fault_ioc_enable(struct fault_ioc_enable * args)118 fault_ioc_enable(struct fault_ioc_enable *args)
119 {
120 	fault_t *f;
121 
122 	if (args->mode != FAULT_MODE_NTH_ONESHOT)
123 		return EINVAL;
124 	if (args->nth < FAULT_NTH_MIN)
125 		return EINVAL;
126 
127 	switch (args->scope) {
128 	case FAULT_SCOPE_GLOBAL:
129 		mutex_enter(&fault_global_lock);
130 		if (fault_global.enabled) {
131 			mutex_exit(&fault_global_lock);
132 			return EEXIST;
133 		}
134 		fault_global.oneshot = true;
135 		atomic_store_relaxed(&fault_global.nth, args->nth);
136 		fault_global.cnt = 0;
137 		fault_global.nfaults = 0;
138 		atomic_store_release(&fault_global.enabled, true);
139 		mutex_exit(&fault_global_lock);
140 		break;
141 	case FAULT_SCOPE_LWP:
142 		f = lwp_getspecific(fault_lwp_key);
143 		if (f != NULL) {
144 			if (f->enabled)
145 				return EEXIST;
146 		} else {
147 			f = kmem_zalloc(sizeof(*f), KM_SLEEP);
148 			lwp_setspecific(fault_lwp_key, f);
149 		}
150 		f->oneshot = true;
151 		atomic_store_relaxed(&f->nth, args->nth);
152 		f->cnt = 0;
153 		f->nfaults = 0;
154 		atomic_store_release(&f->enabled, true);
155 		break;
156 	default:
157 		return EINVAL;
158 	}
159 
160 	return 0;
161 }
162 
163 static int
fault_ioc_disable(struct fault_ioc_disable * args)164 fault_ioc_disable(struct fault_ioc_disable *args)
165 {
166 	fault_t *f;
167 
168 	switch (args->scope) {
169 	case FAULT_SCOPE_GLOBAL:
170 		mutex_enter(&fault_global_lock);
171 		if (!fault_global.enabled) {
172 			mutex_exit(&fault_global_lock);
173 			return ENOENT;
174 		}
175 		atomic_store_release(&fault_global.enabled, false);
176 		mutex_exit(&fault_global_lock);
177 		break;
178 	case FAULT_SCOPE_LWP:
179 		f = lwp_getspecific(fault_lwp_key);
180 		if (f == NULL)
181 			return ENOENT;
182 		if (!f->enabled)
183 			return ENOENT;
184 		atomic_store_release(&f->enabled, false);
185 		break;
186 	default:
187 		return EINVAL;
188 	}
189 
190 	return 0;
191 }
192 
193 static int
fault_ioc_getinfo(struct fault_ioc_getinfo * args)194 fault_ioc_getinfo(struct fault_ioc_getinfo *args)
195 {
196 	fault_t *f;
197 
198 	switch (args->scope) {
199 	case FAULT_SCOPE_GLOBAL:
200 		args->nfaults = atomic_load_relaxed(&fault_global.nfaults);
201 		break;
202 	case FAULT_SCOPE_LWP:
203 		f = lwp_getspecific(fault_lwp_key);
204 		if (f == NULL)
205 			return ENOENT;
206 		args->nfaults = atomic_load_relaxed(&f->nfaults);
207 		break;
208 	default:
209 		return EINVAL;
210 	}
211 
212 	return 0;
213 }
214 
215 static int
fault_ioctl(dev_t dev,u_long cmd,void * addr,int flag,struct lwp * l)216 fault_ioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
217 {
218 	switch (cmd) {
219 	case FAULT_IOC_ENABLE:
220 		return fault_ioc_enable(addr);
221 	case FAULT_IOC_DISABLE:
222 		return fault_ioc_disable(addr);
223 	case FAULT_IOC_GETINFO:
224 		return fault_ioc_getinfo(addr);
225 	default:
226 		return EINVAL;
227 	}
228 }
229 
230 const struct cdevsw fault_cdevsw = {
231 	.d_open = fault_open,
232 	.d_close = fault_close,
233 	.d_read = noread,
234 	.d_write = nowrite,
235 	.d_ioctl = fault_ioctl,
236 	.d_stop = nostop,
237 	.d_tty = notty,
238 	.d_poll = nopoll,
239 	.d_mmap = nommap,
240 	.d_kqfilter = nokqfilter,
241 	.d_discard = nodiscard,
242 	.d_flag = D_OTHER | D_MPSAFE
243 };
244 
245 /* -------------------------------------------------------------------------- */
246 
247 MODULE(MODULE_CLASS_MISC, fault, NULL);
248 
249 static void
fault_lwp_free(void * arg)250 fault_lwp_free(void *arg)
251 {
252 	fault_t *f = (fault_t *)arg;
253 
254 	if (f == NULL) {
255 		return;
256 	}
257 
258 	kmem_free(f, sizeof(*f));
259 }
260 
261 static void
fault_init(void)262 fault_init(void)
263 {
264 	mutex_init(&fault_global_lock, MUTEX_DEFAULT, IPL_NONE);
265 	lwp_specific_key_create(&fault_lwp_key, fault_lwp_free);
266 }
267 
268 static int
fault_modcmd(modcmd_t cmd,void * arg)269 fault_modcmd(modcmd_t cmd, void *arg)
270 {
271    	switch (cmd) {
272 	case MODULE_CMD_INIT:
273 		fault_init();
274 		return 0;
275 	case MODULE_CMD_FINI:
276 		return EINVAL;
277 	default:
278 		return ENOTTY;
279 	}
280 }
281