xref: /openbsd-src/sys/kern/kern_subr.c (revision f7e5f6f670fb5ad95e6af836a696534eb844dd37)
1 /*	$OpenBSD: kern_subr.c,v 1.11 2000/03/03 16:58:49 art Exp $	*/
2 /*	$NetBSD: kern_subr.c,v 1.15 1996/04/09 17:21:56 ragge Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/queue.h>
49 #include <sys/kernel.h>
50 #include <sys/resourcevar.h>
51 
52 void uio_yield __P((struct proc *));
53 
54 #define UIO_NEED_YIELD (roundrobin_attempts >= 2)
55 
56 int
57 uiomove(cp, n, uio)
58 	register caddr_t cp;
59 	register int n;
60 	register struct uio *uio;
61 {
62 	register struct iovec *iov;
63 	u_int cnt;
64 	int error = 0;
65 	struct proc *p;
66 
67 	p = uio->uio_procp;
68 
69 #ifdef DIAGNOSTIC
70 	if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
71 		panic("uiomove: mode");
72 	if (uio->uio_segflg == UIO_USERSPACE && p != curproc)
73 		panic("uiomove: proc");
74 #endif
75 	while (n > 0 && uio->uio_resid) {
76 		iov = uio->uio_iov;
77 		cnt = iov->iov_len;
78 		if (cnt == 0) {
79 			uio->uio_iov++;
80 			uio->uio_iovcnt--;
81 			continue;
82 		}
83 		if (cnt > n)
84 			cnt = n;
85 		switch (uio->uio_segflg) {
86 
87 		case UIO_USERSPACE:
88 			if (UIO_NEED_YIELD)
89 				uio_yield(p);
90 			if (uio->uio_rw == UIO_READ)
91 				error = copyout(cp, iov->iov_base, cnt);
92 			else
93 				error = copyin(iov->iov_base, cp, cnt);
94 			if (error)
95 				return (error);
96 			break;
97 
98 		case UIO_SYSSPACE:
99 #if defined(UVM)
100 			if (uio->uio_rw == UIO_READ)
101 				error = kcopy(cp, iov->iov_base, cnt);
102 			else
103 				error = kcopy(iov->iov_base, cp, cnt);
104 			if (error)
105 				return(error);
106 #else
107 			if (uio->uio_rw == UIO_READ)
108 				bcopy((caddr_t)cp, iov->iov_base, cnt);
109 			else
110 				bcopy(iov->iov_base, (caddr_t)cp, cnt);
111 			break;
112 #endif
113 		}
114 		iov->iov_base += cnt;
115 		iov->iov_len -= cnt;
116 		uio->uio_resid -= cnt;
117 		uio->uio_offset += cnt;
118 		cp += cnt;
119 		n -= cnt;
120 	}
121 	return (error);
122 }
123 
124 /*
125  * Give next character to user as result of read.
126  */
127 int
128 ureadc(c, uio)
129 	register int c;
130 	register struct uio *uio;
131 {
132 	register struct iovec *iov;
133 
134 	if (uio->uio_resid == 0)
135 #ifdef DIAGNOSTIC
136 		panic("ureadc: zero resid");
137 #else
138 		return (EINVAL);
139 #endif
140 again:
141 	if (uio->uio_iovcnt <= 0)
142 #ifdef DIAGNOSTIC
143 		panic("ureadc: non-positive iovcnt");
144 #else
145 		return (EINVAL);
146 #endif
147 	iov = uio->uio_iov;
148 	if (iov->iov_len <= 0) {
149 		uio->uio_iovcnt--;
150 		uio->uio_iov++;
151 		goto again;
152 	}
153 	switch (uio->uio_segflg) {
154 
155 	case UIO_USERSPACE:
156 		if (subyte(iov->iov_base, c) < 0)
157 			return (EFAULT);
158 		break;
159 
160 	case UIO_SYSSPACE:
161 		*(char *)iov->iov_base = c;
162 		break;
163 	}
164 	iov->iov_base++;
165 	iov->iov_len--;
166 	uio->uio_resid--;
167 	uio->uio_offset++;
168 	return (0);
169 }
170 
171 void
172 uio_yield(p)
173 	struct proc *p;
174 {
175 	int s;
176 
177 	p->p_priority = p->p_usrpri;
178 	s = splstatclock();
179 	setrunqueue(p);
180 	p->p_stats->p_ru.ru_nivcsw++;
181 	mi_switch();
182 	splx(s);
183 }
184 
185 /*
186  * General routine to allocate a hash table.
187  */
188 void *
189 hashinit(elements, type, flags, hashmask)
190 	int elements, type, flags;
191 	u_long *hashmask;
192 {
193 	long hashsize;
194 	LIST_HEAD(generic, generic) *hashtbl;
195 	int i;
196 
197 	if (elements <= 0)
198 		panic("hashinit: bad cnt");
199 	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
200 		continue;
201 	hashsize >>= 1;
202 	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, flags);
203 	for (i = 0; i < hashsize; i++)
204 		LIST_INIT(&hashtbl[i]);
205 	*hashmask = hashsize - 1;
206 	return (hashtbl);
207 }
208 
209 /*
210  * "Shutdown hook" types, functions, and variables.
211  */
212 
213 struct shutdownhook_desc {
214 	LIST_ENTRY(shutdownhook_desc) sfd_list;
215 	void	(*sfd_fn) __P((void *));
216 	void	*sfd_arg;
217 };
218 
219 LIST_HEAD(, shutdownhook_desc) shutdownhook_list;
220 
221 int shutdownhooks_done;
222 
223 void *
224 shutdownhook_establish(fn, arg)
225 	void (*fn) __P((void *));
226 	void *arg;
227 {
228 	struct shutdownhook_desc *ndp;
229 
230 	ndp = (struct shutdownhook_desc *)
231 	    malloc(sizeof (*ndp), M_DEVBUF, M_NOWAIT);
232 	if (ndp == NULL)
233 		return NULL;
234 
235 	ndp->sfd_fn = fn;
236 	ndp->sfd_arg = arg;
237 	LIST_INSERT_HEAD(&shutdownhook_list, ndp, sfd_list);
238 
239 	return (ndp);
240 }
241 
242 void
243 shutdownhook_disestablish(vhook)
244 	void *vhook;
245 {
246 #ifdef DIAGNOSTIC
247 	struct shutdownhook_desc *dp;
248 
249 	for (dp = shutdownhook_list.lh_first; dp != NULL;
250 	    dp = dp->sfd_list.le_next)
251                 if (dp == vhook)
252 			break;
253 	if (dp == NULL)
254 		panic("shutdownhook_disestablish: hook not established");
255 #endif
256 
257 	LIST_REMOVE((struct shutdownhook_desc *)vhook, sfd_list);
258 }
259 
260 /*
261  * Run shutdown hooks.  Should be invoked immediately before the
262  * system is halted or rebooted, i.e. after file systems unmounted,
263  * after crash dump done, etc.
264  */
265 void
266 doshutdownhooks()
267 {
268 	struct shutdownhook_desc *dp;
269 
270 	if (shutdownhooks_done)
271 		return;
272 
273 	for (dp = shutdownhook_list.lh_first; dp != NULL; dp =
274 	    dp->sfd_list.le_next)
275 		(*dp->sfd_fn)(dp->sfd_arg);
276 }
277 
278 /*
279  * "Power hook" types, functions, and variables.
280  */
281 
282 struct powerhook_desc {
283 	LIST_ENTRY(powerhook_desc) sfd_list;
284 	void	(*sfd_fn) __P((int, void *));
285 	void	*sfd_arg;
286 };
287 
288 LIST_HEAD(, powerhook_desc) powerhook_list;
289 
290 void *
291 powerhook_establish(fn, arg)
292 	void (*fn) __P((int, void *));
293 	void *arg;
294 {
295 	struct powerhook_desc *ndp;
296 
297 	ndp = (struct powerhook_desc *)
298 	    malloc(sizeof(*ndp), M_DEVBUF, M_NOWAIT);
299 	if (ndp == NULL)
300 		return NULL;
301 
302 	ndp->sfd_fn = fn;
303 	ndp->sfd_arg = arg;
304 	LIST_INSERT_HEAD(&powerhook_list, ndp, sfd_list);
305 
306 	return (ndp);
307 }
308 
309 void
310 powerhook_disestablish(vhook)
311 	void *vhook;
312 {
313 #ifdef DIAGNOSTIC
314 	struct powerhook_desc *dp;
315 
316 	for (dp = powerhook_list.lh_first; dp != NULL;
317 	    dp = dp->sfd_list.le_next)
318                 if (dp == vhook)
319 			break;
320 	if (dp == NULL)
321 		panic("powerhook_disestablish: hook not established");
322 #endif
323 
324 	LIST_REMOVE((struct powerhook_desc *)vhook, sfd_list);
325 	free(vhook, M_DEVBUF);
326 }
327 
328 /*
329  * Run power hooks.
330  */
331 void
332 dopowerhooks(why)
333 	int why;
334 {
335 	struct powerhook_desc *dp;
336 
337 	for (dp = LIST_FIRST(&powerhook_list);
338 	     dp != NULL;
339 	     dp = LIST_NEXT(dp, sfd_list)) {
340 		(*dp->sfd_fn)(why, dp->sfd_arg);
341 	}
342 }
343