xref: /openbsd-src/sys/kern/kern_sysctl.c (revision 6ca44032e7be0d795b9f13c99fbce059e942c15d)
1 /*	$OpenBSD: kern_sysctl.c,v 1.412 2023/05/04 09:40:36 mvs Exp $	*/
2 /*	$NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Mike Karels at Berkeley Software Design, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)kern_sysctl.c	8.4 (Berkeley) 4/14/94
36  */
37 
38 /*
39  * sysctl system call.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/pool.h>
47 #include <sys/proc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <sys/fcntl.h>
51 #include <sys/file.h>
52 #include <sys/filedesc.h>
53 #include <sys/vnode.h>
54 #include <sys/unistd.h>
55 #include <sys/buf.h>
56 #include <sys/clockintr.h>
57 #include <sys/tty.h>
58 #include <sys/disklabel.h>
59 #include <sys/disk.h>
60 #include <sys/sysctl.h>
61 #include <sys/msgbuf.h>
62 #include <sys/vmmeter.h>
63 #include <sys/namei.h>
64 #include <sys/exec.h>
65 #include <sys/mbuf.h>
66 #include <sys/percpu.h>
67 #include <sys/sensors.h>
68 #include <sys/pipe.h>
69 #include <sys/eventvar.h>
70 #include <sys/socketvar.h>
71 #include <sys/socket.h>
72 #include <sys/domain.h>
73 #include <sys/protosw.h>
74 #include <sys/pledge.h>
75 #include <sys/timetc.h>
76 #include <sys/evcount.h>
77 #include <sys/un.h>
78 #include <sys/unpcb.h>
79 #include <sys/sched.h>
80 #include <sys/mount.h>
81 #include <sys/syscallargs.h>
82 #include <sys/wait.h>
83 #include <sys/witness.h>
84 
85 #include <uvm/uvm_extern.h>
86 
87 #include <dev/cons.h>
88 
89 #include <net/route.h>
90 #include <netinet/in.h>
91 #include <netinet/ip.h>
92 #include <netinet/ip_var.h>
93 #include <netinet/in_pcb.h>
94 #include <netinet/ip6.h>
95 #include <netinet/tcp.h>
96 #include <netinet/tcp_timer.h>
97 #include <netinet/tcp_var.h>
98 #include <netinet/udp.h>
99 #include <netinet/udp_var.h>
100 #include <netinet6/ip6_var.h>
101 
102 #ifdef DDB
103 #include <ddb/db_var.h>
104 #endif
105 
106 #ifdef SYSVMSG
107 #include <sys/msg.h>
108 #endif
109 #ifdef SYSVSEM
110 #include <sys/sem.h>
111 #endif
112 #ifdef SYSVSHM
113 #include <sys/shm.h>
114 #endif
115 
116 #include "audio.h"
117 #include "dt.h"
118 #include "pf.h"
119 #include "video.h"
120 
121 extern struct forkstat forkstat;
122 extern struct nchstats nchstats;
123 extern int fscale;
124 extern fixpt_t ccpu;
125 extern long numvnodes;
126 extern int allowdt;
127 extern int audio_record_enable;
128 extern int video_record_enable;
129 extern int autoconf_serial;
130 
131 int allowkmem;
132 
133 int sysctl_diskinit(int, struct proc *);
134 int sysctl_proc_args(int *, u_int, void *, size_t *, struct proc *);
135 int sysctl_proc_cwd(int *, u_int, void *, size_t *, struct proc *);
136 int sysctl_proc_nobroadcastkill(int *, u_int, void *, size_t, void *, size_t *,
137 	struct proc *);
138 int sysctl_proc_vmmap(int *, u_int, void *, size_t *, struct proc *);
139 int sysctl_intrcnt(int *, u_int, void *, size_t *);
140 int sysctl_sensors(int *, u_int, void *, size_t *, void *, size_t);
141 int sysctl_cptime2(int *, u_int, void *, size_t *, void *, size_t);
142 int sysctl_audio(int *, u_int, void *, size_t *, void *, size_t);
143 int sysctl_video(int *, u_int, void *, size_t *, void *, size_t);
144 int sysctl_cpustats(int *, u_int, void *, size_t *, void *, size_t);
145 int sysctl_utc_offset(void *, size_t *, void *, size_t);
146 
147 void fill_file(struct kinfo_file *, struct file *, struct filedesc *, int,
148     struct vnode *, struct process *, struct proc *, struct socket *, int);
149 void fill_kproc(struct process *, struct kinfo_proc *, struct proc *, int);
150 
151 int (*cpu_cpuspeed)(int *);
152 
153 /*
154  * Lock to avoid too many processes vslocking a large amount of memory
155  * at the same time.
156  */
157 struct rwlock sysctl_lock = RWLOCK_INITIALIZER("sysctllk");
158 struct rwlock sysctl_disklock = RWLOCK_INITIALIZER("sysctldlk");
159 
160 int
161 sys_sysctl(struct proc *p, void *v, register_t *retval)
162 {
163 	struct sys_sysctl_args /* {
164 		syscallarg(const int *) name;
165 		syscallarg(u_int) namelen;
166 		syscallarg(void *) old;
167 		syscallarg(size_t *) oldlenp;
168 		syscallarg(void *) new;
169 		syscallarg(size_t) newlen;
170 	} */ *uap = v;
171 	int error, dokernellock = 1, dolock = 1;
172 	size_t savelen = 0, oldlen = 0;
173 	sysctlfn *fn;
174 	int name[CTL_MAXNAME];
175 
176 	if (SCARG(uap, new) != NULL &&
177 	    (error = suser(p)))
178 		return (error);
179 	/*
180 	 * all top-level sysctl names are non-terminal
181 	 */
182 	if (SCARG(uap, namelen) > CTL_MAXNAME || SCARG(uap, namelen) < 2)
183 		return (EINVAL);
184 	error = copyin(SCARG(uap, name), name,
185 		       SCARG(uap, namelen) * sizeof(int));
186 	if (error)
187 		return (error);
188 
189 	error = pledge_sysctl(p, SCARG(uap, namelen),
190 	    name, SCARG(uap, new));
191 	if (error)
192 		return (error);
193 
194 	switch (name[0]) {
195 	case CTL_KERN:
196 		fn = kern_sysctl;
197 		break;
198 	case CTL_HW:
199 		fn = hw_sysctl;
200 		break;
201 	case CTL_VM:
202 		fn = uvm_sysctl;
203 		break;
204 	case CTL_NET:
205 		fn = net_sysctl;
206 		dokernellock = 0;
207 		break;
208 	case CTL_FS:
209 		fn = fs_sysctl;
210 		break;
211 	case CTL_VFS:
212 		fn = vfs_sysctl;
213 		break;
214 	case CTL_MACHDEP:
215 		fn = cpu_sysctl;
216 		break;
217 #ifdef DEBUG_SYSCTL
218 	case CTL_DEBUG:
219 		fn = debug_sysctl;
220 		break;
221 #endif
222 #ifdef DDB
223 	case CTL_DDB:
224 		fn = ddb_sysctl;
225 		break;
226 #endif
227 	default:
228 		return (EOPNOTSUPP);
229 	}
230 
231 	if (SCARG(uap, oldlenp) &&
232 	    (error = copyin(SCARG(uap, oldlenp), &oldlen, sizeof(oldlen))))
233 		return (error);
234 	if (dokernellock)
235 		KERNEL_LOCK();
236 	if (SCARG(uap, old) != NULL) {
237 		if ((error = rw_enter(&sysctl_lock, RW_WRITE|RW_INTR)) != 0)
238 			goto unlock;
239 		if (dolock) {
240 			if (atop(oldlen) > uvmexp.wiredmax - uvmexp.wired) {
241 				rw_exit_write(&sysctl_lock);
242 				error = ENOMEM;
243 				goto unlock;
244 			}
245 			error = uvm_vslock(p, SCARG(uap, old), oldlen,
246 			    PROT_READ | PROT_WRITE);
247 			if (error) {
248 				rw_exit_write(&sysctl_lock);
249 				goto unlock;
250 			}
251 		}
252 		savelen = oldlen;
253 	}
254 	error = (*fn)(&name[1], SCARG(uap, namelen) - 1, SCARG(uap, old),
255 	    &oldlen, SCARG(uap, new), SCARG(uap, newlen), p);
256 	if (SCARG(uap, old) != NULL) {
257 		if (dolock)
258 			uvm_vsunlock(p, SCARG(uap, old), savelen);
259 		rw_exit_write(&sysctl_lock);
260 	}
261 unlock:
262 	if (dokernellock)
263 		KERNEL_UNLOCK();
264 	if (error)
265 		return (error);
266 	if (SCARG(uap, oldlenp))
267 		error = copyout(&oldlen, SCARG(uap, oldlenp), sizeof(oldlen));
268 	return (error);
269 }
270 
271 /*
272  * Attributes stored in the kernel.
273  */
274 char hostname[MAXHOSTNAMELEN];
275 int hostnamelen;
276 char domainname[MAXHOSTNAMELEN];
277 int domainnamelen;
278 long hostid;
279 char *disknames = NULL;
280 size_t disknameslen;
281 struct diskstats *diskstats = NULL;
282 size_t diskstatslen;
283 int securelevel;
284 
285 /* morally const values reported by sysctl_bounded_arr */
286 static int arg_max = ARG_MAX;
287 static int openbsd = OpenBSD;
288 static int posix_version = _POSIX_VERSION;
289 static int ngroups_max = NGROUPS_MAX;
290 static int int_zero = 0;
291 static int int_one = 1;
292 static int maxpartitions = MAXPARTITIONS;
293 static int raw_part = RAW_PART;
294 
295 extern int somaxconn, sominconn;
296 extern int nosuidcoredump;
297 extern int maxlocksperuid;
298 extern int uvm_wxabort;
299 extern int global_ptrace;
300 
301 const struct sysctl_bounded_args kern_vars[] = {
302 	{KERN_OSREV, &openbsd, SYSCTL_INT_READONLY},
303 	{KERN_MAXVNODES, &maxvnodes, 0, INT_MAX},
304 	{KERN_MAXPROC, &maxprocess, 0, INT_MAX},
305 	{KERN_MAXFILES, &maxfiles, 0, INT_MAX},
306 	{KERN_NFILES, &numfiles, SYSCTL_INT_READONLY},
307 	{KERN_TTYCOUNT, &tty_count, SYSCTL_INT_READONLY},
308 	{KERN_ARGMAX, &arg_max, SYSCTL_INT_READONLY},
309 	{KERN_POSIX1, &posix_version, SYSCTL_INT_READONLY},
310 	{KERN_NGROUPS, &ngroups_max, SYSCTL_INT_READONLY},
311 	{KERN_JOB_CONTROL, &int_one, SYSCTL_INT_READONLY},
312 	{KERN_SAVED_IDS, &int_one, SYSCTL_INT_READONLY},
313 	{KERN_MAXPARTITIONS, &maxpartitions, SYSCTL_INT_READONLY},
314 	{KERN_RAWPARTITION, &raw_part, SYSCTL_INT_READONLY},
315 	{KERN_MAXTHREAD, &maxthread, 0, INT_MAX},
316 	{KERN_NTHREADS, &nthreads, SYSCTL_INT_READONLY},
317 	{KERN_SOMAXCONN, &somaxconn, 0, SHRT_MAX},
318 	{KERN_SOMINCONN, &sominconn, 0, SHRT_MAX},
319 	{KERN_NOSUIDCOREDUMP, &nosuidcoredump, 0, 3},
320 	{KERN_FSYNC, &int_one, SYSCTL_INT_READONLY},
321 	{KERN_SYSVMSG,
322 #ifdef SYSVMSG
323 	 &int_one,
324 #else
325 	 &int_zero,
326 #endif
327 	 SYSCTL_INT_READONLY},
328 	{KERN_SYSVSEM,
329 #ifdef SYSVSEM
330 	 &int_one,
331 #else
332 	 &int_zero,
333 #endif
334 	 SYSCTL_INT_READONLY},
335 	{KERN_SYSVSHM,
336 #ifdef SYSVSHM
337 	 &int_one,
338 #else
339 	 &int_zero,
340 #endif
341 	 SYSCTL_INT_READONLY},
342 	{KERN_FSCALE, &fscale, SYSCTL_INT_READONLY},
343 	{KERN_CCPU, &ccpu, SYSCTL_INT_READONLY},
344 	{KERN_NPROCS, &nprocesses, SYSCTL_INT_READONLY},
345 	{KERN_SPLASSERT, &splassert_ctl, 0, 3},
346 	{KERN_MAXLOCKSPERUID, &maxlocksperuid, 0, INT_MAX},
347 	{KERN_WXABORT, &uvm_wxabort, 0, 1},
348 	{KERN_NETLIVELOCKS, &int_zero, SYSCTL_INT_READONLY},
349 #ifdef PTRACE
350 	{KERN_GLOBAL_PTRACE, &global_ptrace, 0, 1},
351 #endif
352 	{KERN_AUTOCONF_SERIAL, &autoconf_serial, SYSCTL_INT_READONLY},
353 };
354 
355 int
356 kern_sysctl_dirs(int top_name, int *name, u_int namelen,
357     void *oldp, size_t *oldlenp, void *newp, size_t newlen, struct proc *p)
358 {
359 	switch (top_name) {
360 #ifndef SMALL_KERNEL
361 	case KERN_PROC:
362 		return (sysctl_doproc(name, namelen, oldp, oldlenp));
363 	case KERN_PROC_ARGS:
364 		return (sysctl_proc_args(name, namelen, oldp, oldlenp, p));
365 	case KERN_PROC_CWD:
366 		return (sysctl_proc_cwd(name, namelen, oldp, oldlenp, p));
367 	case KERN_PROC_NOBROADCASTKILL:
368 		return (sysctl_proc_nobroadcastkill(name, namelen,
369 		     newp, newlen, oldp, oldlenp, p));
370 	case KERN_PROC_VMMAP:
371 		return (sysctl_proc_vmmap(name, namelen, oldp, oldlenp, p));
372 	case KERN_FILE:
373 		return (sysctl_file(name, namelen, oldp, oldlenp, p));
374 #endif
375 #if defined(GPROF) || defined(DDBPROF)
376 	case KERN_PROF:
377 		return (sysctl_doprof(name, namelen, oldp, oldlenp,
378 		    newp, newlen));
379 #endif
380 	case KERN_MALLOCSTATS:
381 		return (sysctl_malloc(name, namelen, oldp, oldlenp,
382 		    newp, newlen, p));
383 	case KERN_TTY:
384 		return (sysctl_tty(name, namelen, oldp, oldlenp,
385 		    newp, newlen));
386 	case KERN_POOL:
387 		return (sysctl_dopool(name, namelen, oldp, oldlenp));
388 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
389 	case KERN_SYSVIPC_INFO:
390 		return (sysctl_sysvipc(name, namelen, oldp, oldlenp));
391 #endif
392 #ifdef SYSVSEM
393 	case KERN_SEMINFO:
394 		return (sysctl_sysvsem(name, namelen, oldp, oldlenp,
395 		    newp, newlen));
396 #endif
397 #ifdef SYSVSHM
398 	case KERN_SHMINFO:
399 		return (sysctl_sysvshm(name, namelen, oldp, oldlenp,
400 		    newp, newlen));
401 #endif
402 #ifndef SMALL_KERNEL
403 	case KERN_INTRCNT:
404 		return (sysctl_intrcnt(name, namelen, oldp, oldlenp));
405 	case KERN_WATCHDOG:
406 		return (sysctl_wdog(name, namelen, oldp, oldlenp,
407 		    newp, newlen));
408 #endif
409 #ifndef SMALL_KERNEL
410 	case KERN_EVCOUNT:
411 		return (evcount_sysctl(name, namelen, oldp, oldlenp,
412 		    newp, newlen));
413 #endif
414 	case KERN_TIMECOUNTER:
415 		return (sysctl_tc(name, namelen, oldp, oldlenp, newp, newlen));
416 	case KERN_CPTIME2:
417 		return (sysctl_cptime2(name, namelen, oldp, oldlenp,
418 		    newp, newlen));
419 #ifdef WITNESS
420 	case KERN_WITNESSWATCH:
421 		return witness_sysctl_watch(oldp, oldlenp, newp, newlen);
422 	case KERN_WITNESS:
423 		return witness_sysctl(name, namelen, oldp, oldlenp,
424 		    newp, newlen);
425 #endif
426 #if NAUDIO > 0
427 	case KERN_AUDIO:
428 		return (sysctl_audio(name, namelen, oldp, oldlenp,
429 		    newp, newlen));
430 #endif
431 #if NVIDEO > 0
432 	case KERN_VIDEO:
433 		return (sysctl_video(name, namelen, oldp, oldlenp,
434 		    newp, newlen));
435 #endif
436 	case KERN_CPUSTATS:
437 		return (sysctl_cpustats(name, namelen, oldp, oldlenp,
438 		    newp, newlen));
439 #ifdef __HAVE_CLOCKINTR
440 	case KERN_CLOCKINTR:
441 		return sysctl_clockintr(name, namelen, oldp, oldlenp, newp,
442 		    newlen);
443 #endif
444 	default:
445 		return (ENOTDIR);	/* overloaded */
446 	}
447 }
448 
449 /*
450  * kernel related system variables.
451  */
452 int
453 kern_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
454     size_t newlen, struct proc *p)
455 {
456 	int error, level, inthostid, stackgap;
457 	dev_t dev;
458 	extern int pool_debug;
459 
460 	/* dispatch the non-terminal nodes first */
461 	if (namelen != 1) {
462 		return kern_sysctl_dirs(name[0], name + 1, namelen - 1,
463 		    oldp, oldlenp, newp, newlen, p);
464 	}
465 
466 	switch (name[0]) {
467 	case KERN_OSTYPE:
468 		return (sysctl_rdstring(oldp, oldlenp, newp, ostype));
469 	case KERN_OSRELEASE:
470 		return (sysctl_rdstring(oldp, oldlenp, newp, osrelease));
471 	case KERN_OSVERSION:
472 		return (sysctl_rdstring(oldp, oldlenp, newp, osversion));
473 	case KERN_VERSION:
474 		return (sysctl_rdstring(oldp, oldlenp, newp, version));
475 	case KERN_NUMVNODES:  /* XXX numvnodes is a long */
476 		return (sysctl_rdint(oldp, oldlenp, newp, numvnodes));
477 	case KERN_SECURELVL:
478 		level = securelevel;
479 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) ||
480 		    newp == NULL)
481 			return (error);
482 		if ((securelevel > 0 || level < -1) &&
483 		    level < securelevel && p->p_p->ps_pid != 1)
484 			return (EPERM);
485 		securelevel = level;
486 		return (0);
487 #if NDT > 0
488 	case KERN_ALLOWDT:
489 		return (sysctl_securelevel_int(oldp, oldlenp, newp, newlen,
490 		    &allowdt));
491 #endif
492 	case KERN_ALLOWKMEM:
493 		return (sysctl_securelevel_int(oldp, oldlenp, newp, newlen,
494 		    &allowkmem));
495 	case KERN_HOSTNAME:
496 		error = sysctl_tstring(oldp, oldlenp, newp, newlen,
497 		    hostname, sizeof(hostname));
498 		if (newp && !error)
499 			hostnamelen = newlen;
500 		return (error);
501 	case KERN_DOMAINNAME:
502 		if (securelevel >= 1 && domainnamelen && newp)
503 			error = EPERM;
504 		else
505 			error = sysctl_tstring(oldp, oldlenp, newp, newlen,
506 			    domainname, sizeof(domainname));
507 		if (newp && !error)
508 			domainnamelen = newlen;
509 		return (error);
510 	case KERN_HOSTID:
511 		inthostid = hostid;  /* XXX assumes sizeof long <= sizeof int */
512 		error =  sysctl_int(oldp, oldlenp, newp, newlen, &inthostid);
513 		hostid = inthostid;
514 		return (error);
515 	case KERN_CLOCKRATE:
516 		return (sysctl_clockrate(oldp, oldlenp, newp));
517 	case KERN_BOOTTIME: {
518 		struct timeval bt;
519 		memset(&bt, 0, sizeof bt);
520 		microboottime(&bt);
521 		return (sysctl_rdstruct(oldp, oldlenp, newp, &bt, sizeof bt));
522 	  }
523 	case KERN_MBSTAT: {
524 		extern struct cpumem *mbstat;
525 		uint64_t counters[MBSTAT_COUNT];
526 		struct mbstat mbs;
527 		unsigned int i;
528 
529 		memset(&mbs, 0, sizeof(mbs));
530 		counters_read(mbstat, counters, MBSTAT_COUNT);
531 		for (i = 0; i < MBSTAT_TYPES; i++)
532 			mbs.m_mtypes[i] = counters[i];
533 
534 		mbs.m_drops = counters[MBSTAT_DROPS];
535 		mbs.m_wait = counters[MBSTAT_WAIT];
536 		mbs.m_drain = counters[MBSTAT_DRAIN];
537 
538 		return (sysctl_rdstruct(oldp, oldlenp, newp,
539 		    &mbs, sizeof(mbs)));
540 	}
541 	case KERN_MSGBUFSIZE:
542 	case KERN_CONSBUFSIZE: {
543 		struct msgbuf *mp;
544 		mp = (name[0] == KERN_MSGBUFSIZE) ? msgbufp : consbufp;
545 		/*
546 		 * deal with cases where the message buffer has
547 		 * become corrupted.
548 		 */
549 		if (!mp || mp->msg_magic != MSG_MAGIC)
550 			return (ENXIO);
551 		return (sysctl_rdint(oldp, oldlenp, newp, mp->msg_bufs));
552 	}
553 	case KERN_CONSBUF:
554 		if ((error = suser(p)))
555 			return (error);
556 		/* FALLTHROUGH */
557 	case KERN_MSGBUF: {
558 		struct msgbuf *mp;
559 		mp = (name[0] == KERN_MSGBUF) ? msgbufp : consbufp;
560 		/* see note above */
561 		if (!mp || mp->msg_magic != MSG_MAGIC)
562 			return (ENXIO);
563 		return (sysctl_rdstruct(oldp, oldlenp, newp, mp,
564 		    mp->msg_bufs + offsetof(struct msgbuf, msg_bufc)));
565 	}
566 	case KERN_CPTIME:
567 	{
568 		CPU_INFO_ITERATOR cii;
569 		struct cpu_info *ci;
570 		long cp_time[CPUSTATES];
571 		int i, n = 0;
572 
573 		memset(cp_time, 0, sizeof(cp_time));
574 
575 		CPU_INFO_FOREACH(cii, ci) {
576 			if (!cpu_is_online(ci))
577 				continue;
578 			n++;
579 			for (i = 0; i < CPUSTATES; i++)
580 				cp_time[i] += ci->ci_schedstate.spc_cp_time[i];
581 		}
582 
583 		for (i = 0; i < CPUSTATES; i++)
584 			cp_time[i] /= n;
585 
586 		return (sysctl_rdstruct(oldp, oldlenp, newp, &cp_time,
587 		    sizeof(cp_time)));
588 	}
589 	case KERN_NCHSTATS:
590 		return (sysctl_rdstruct(oldp, oldlenp, newp, &nchstats,
591 		    sizeof(struct nchstats)));
592 	case KERN_FORKSTAT:
593 		return (sysctl_rdstruct(oldp, oldlenp, newp, &forkstat,
594 		    sizeof(struct forkstat)));
595 	case KERN_STACKGAPRANDOM:
596 		stackgap = stackgap_random;
597 		error = sysctl_int(oldp, oldlenp, newp, newlen, &stackgap);
598 		if (error)
599 			return (error);
600 		/*
601 		 * Safety harness.
602 		 */
603 		if ((stackgap < ALIGNBYTES && stackgap != 0) ||
604 		    !powerof2(stackgap) || stackgap >= MAXSSIZ)
605 			return (EINVAL);
606 		stackgap_random = stackgap;
607 		return (0);
608 	case KERN_MAXCLUSTERS: {
609 		int val = nmbclust;
610 		error = sysctl_int(oldp, oldlenp, newp, newlen, &val);
611 		if (error == 0 && val != nmbclust)
612 			error = nmbclust_update(val);
613 		return (error);
614 	}
615 	case KERN_CACHEPCT: {
616 		u_int64_t dmapages;
617 		int opct, pgs;
618 		opct = bufcachepercent;
619 		error = sysctl_int(oldp, oldlenp, newp, newlen,
620 		    &bufcachepercent);
621 		if (error)
622 			return(error);
623 		if (bufcachepercent > 90 || bufcachepercent < 5) {
624 			bufcachepercent = opct;
625 			return (EINVAL);
626 		}
627 		dmapages = uvm_pagecount(&dma_constraint);
628 		if (bufcachepercent != opct) {
629 			pgs = bufcachepercent * dmapages / 100;
630 			bufadjust(pgs); /* adjust bufpages */
631 			bufhighpages = bufpages; /* set high water mark */
632 		}
633 		return(0);
634 	}
635 	case KERN_CONSDEV:
636 		if (cn_tab != NULL)
637 			dev = cn_tab->cn_dev;
638 		else
639 			dev = NODEV;
640 		return sysctl_rdstruct(oldp, oldlenp, newp, &dev, sizeof(dev));
641 	case KERN_POOL_DEBUG: {
642 		int old_pool_debug = pool_debug;
643 
644 		error = sysctl_int(oldp, oldlenp, newp, newlen,
645 		    &pool_debug);
646 		if (error == 0 && pool_debug != old_pool_debug)
647 			pool_reclaim_all();
648 		return (error);
649 	}
650 #if NPF > 0
651 	case KERN_PFSTATUS:
652 		return (pf_sysctl(oldp, oldlenp, newp, newlen));
653 #endif
654 	case KERN_TIMEOUT_STATS:
655 		return (timeout_sysctl(oldp, oldlenp, newp, newlen));
656 	case KERN_UTC_OFFSET:
657 		return (sysctl_utc_offset(oldp, oldlenp, newp, newlen));
658 	default:
659 		return (sysctl_bounded_arr(kern_vars, nitems(kern_vars), name,
660 		    namelen, oldp, oldlenp, newp, newlen));
661 	}
662 	/* NOTREACHED */
663 }
664 
665 /*
666  * hardware related system variables.
667  */
668 char *hw_vendor, *hw_prod, *hw_uuid, *hw_serial, *hw_ver;
669 int allowpowerdown = 1;
670 int hw_power = 1;
671 
672 /* morally const values reported by sysctl_bounded_arr */
673 static int byte_order = BYTE_ORDER;
674 static int page_size = PAGE_SIZE;
675 
676 const struct sysctl_bounded_args hw_vars[] = {
677 	{HW_NCPU, &ncpus, SYSCTL_INT_READONLY},
678 	{HW_NCPUFOUND, &ncpusfound, SYSCTL_INT_READONLY},
679 	{HW_BYTEORDER, &byte_order, SYSCTL_INT_READONLY},
680 	{HW_PAGESIZE, &page_size, SYSCTL_INT_READONLY},
681 	{HW_DISKCOUNT, &disk_count, SYSCTL_INT_READONLY},
682 	{HW_POWER, &hw_power, SYSCTL_INT_READONLY},
683 };
684 
685 int
686 hw_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
687     size_t newlen, struct proc *p)
688 {
689 	extern char machine[], cpu_model[];
690 	int err, cpuspeed;
691 
692 	/* all sysctl names at this level except sensors are terminal */
693 	if (name[0] != HW_SENSORS && namelen != 1)
694 		return (ENOTDIR);		/* overloaded */
695 
696 	switch (name[0]) {
697 	case HW_MACHINE:
698 		return (sysctl_rdstring(oldp, oldlenp, newp, machine));
699 	case HW_MODEL:
700 		return (sysctl_rdstring(oldp, oldlenp, newp, cpu_model));
701 	case HW_NCPUONLINE:
702 		return (sysctl_rdint(oldp, oldlenp, newp,
703 		    sysctl_hwncpuonline()));
704 	case HW_PHYSMEM:
705 		return (sysctl_rdint(oldp, oldlenp, newp, ptoa(physmem)));
706 	case HW_USERMEM:
707 		return (sysctl_rdint(oldp, oldlenp, newp,
708 		    ptoa(physmem - uvmexp.wired)));
709 	case HW_DISKNAMES:
710 		err = sysctl_diskinit(0, p);
711 		if (err)
712 			return err;
713 		if (disknames)
714 			return (sysctl_rdstring(oldp, oldlenp, newp,
715 			    disknames));
716 		else
717 			return (sysctl_rdstring(oldp, oldlenp, newp, ""));
718 	case HW_DISKSTATS:
719 		err = sysctl_diskinit(1, p);
720 		if (err)
721 			return err;
722 		return (sysctl_rdstruct(oldp, oldlenp, newp, diskstats,
723 		    disk_count * sizeof(struct diskstats)));
724 	case HW_CPUSPEED:
725 		if (!cpu_cpuspeed)
726 			return (EOPNOTSUPP);
727 		err = cpu_cpuspeed(&cpuspeed);
728 		if (err)
729 			return err;
730 		return (sysctl_rdint(oldp, oldlenp, newp, cpuspeed));
731 #ifndef	SMALL_KERNEL
732 	case HW_SENSORS:
733 		return (sysctl_sensors(name + 1, namelen - 1, oldp, oldlenp,
734 		    newp, newlen));
735 	case HW_SETPERF:
736 		return (sysctl_hwsetperf(oldp, oldlenp, newp, newlen));
737 	case HW_PERFPOLICY:
738 		return (sysctl_hwperfpolicy(oldp, oldlenp, newp, newlen));
739 #endif /* !SMALL_KERNEL */
740 	case HW_VENDOR:
741 		if (hw_vendor)
742 			return (sysctl_rdstring(oldp, oldlenp, newp,
743 			    hw_vendor));
744 		else
745 			return (EOPNOTSUPP);
746 	case HW_PRODUCT:
747 		if (hw_prod)
748 			return (sysctl_rdstring(oldp, oldlenp, newp, hw_prod));
749 		else
750 			return (EOPNOTSUPP);
751 	case HW_VERSION:
752 		if (hw_ver)
753 			return (sysctl_rdstring(oldp, oldlenp, newp, hw_ver));
754 		else
755 			return (EOPNOTSUPP);
756 	case HW_SERIALNO:
757 		if (hw_serial)
758 			return (sysctl_rdstring(oldp, oldlenp, newp,
759 			    hw_serial));
760 		else
761 			return (EOPNOTSUPP);
762 	case HW_UUID:
763 		if (hw_uuid)
764 			return (sysctl_rdstring(oldp, oldlenp, newp, hw_uuid));
765 		else
766 			return (EOPNOTSUPP);
767 	case HW_PHYSMEM64:
768 		return (sysctl_rdquad(oldp, oldlenp, newp,
769 		    ptoa((psize_t)physmem)));
770 	case HW_USERMEM64:
771 		return (sysctl_rdquad(oldp, oldlenp, newp,
772 		    ptoa((psize_t)physmem - uvmexp.wired)));
773 	case HW_ALLOWPOWERDOWN:
774 		return (sysctl_securelevel_int(oldp, oldlenp, newp, newlen,
775 		    &allowpowerdown));
776 #ifdef __HAVE_CPU_TOPOLOGY
777 	case HW_SMT:
778 		return (sysctl_hwsmt(oldp, oldlenp, newp, newlen));
779 #endif
780 	default:
781 		return sysctl_bounded_arr(hw_vars, nitems(hw_vars), name,
782 		    namelen, oldp, oldlenp, newp, newlen);
783 	}
784 	/* NOTREACHED */
785 }
786 
787 #ifdef DEBUG_SYSCTL
788 /*
789  * Debugging related system variables.
790  */
791 extern struct ctldebug debug_vfs_busyprt;
792 struct ctldebug debug1, debug2, debug3, debug4;
793 struct ctldebug debug5, debug6, debug7, debug8, debug9;
794 struct ctldebug debug10, debug11, debug12, debug13, debug14;
795 struct ctldebug debug15, debug16, debug17, debug18, debug19;
796 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
797 	&debug_vfs_busyprt,
798 	&debug1, &debug2, &debug3, &debug4,
799 	&debug5, &debug6, &debug7, &debug8, &debug9,
800 	&debug10, &debug11, &debug12, &debug13, &debug14,
801 	&debug15, &debug16, &debug17, &debug18, &debug19,
802 };
803 int
804 debug_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
805     size_t newlen, struct proc *p)
806 {
807 	struct ctldebug *cdp;
808 
809 	/* all sysctl names at this level are name and field */
810 	if (namelen != 2)
811 		return (ENOTDIR);		/* overloaded */
812 	if (name[0] < 0 || name[0] >= nitems(debugvars))
813 		return (EOPNOTSUPP);
814 	cdp = debugvars[name[0]];
815 	if (cdp->debugname == 0)
816 		return (EOPNOTSUPP);
817 	switch (name[1]) {
818 	case CTL_DEBUG_NAME:
819 		return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
820 	case CTL_DEBUG_VALUE:
821 		return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
822 	default:
823 		return (EOPNOTSUPP);
824 	}
825 	/* NOTREACHED */
826 }
827 #endif /* DEBUG_SYSCTL */
828 
829 /*
830  * Reads, or writes that lower the value
831  */
832 int
833 sysctl_int_lower(void *oldp, size_t *oldlenp, void *newp, size_t newlen,
834     int *valp)
835 {
836 	unsigned int oval = *valp, val = *valp;
837 	int error;
838 
839 	if (newp == NULL)
840 		return (sysctl_rdint(oldp, oldlenp, newp, val));
841 
842 	if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)))
843 		return (error);
844 	if (val > oval)
845 		return (EPERM);		/* do not allow raising */
846 	*(unsigned int *)valp = val;
847 	return (0);
848 }
849 
850 /*
851  * Validate parameters and get old / set new parameters
852  * for an integer-valued sysctl function.
853  */
854 int
855 sysctl_int(void *oldp, size_t *oldlenp, void *newp, size_t newlen, int *valp)
856 {
857 	int error = 0;
858 
859 	if (oldp && *oldlenp < sizeof(int))
860 		return (ENOMEM);
861 	if (newp && newlen != sizeof(int))
862 		return (EINVAL);
863 	*oldlenp = sizeof(int);
864 	if (oldp)
865 		error = copyout(valp, oldp, sizeof(int));
866 	if (error == 0 && newp)
867 		error = copyin(newp, valp, sizeof(int));
868 	return (error);
869 }
870 
871 /*
872  * As above, but read-only.
873  */
874 int
875 sysctl_rdint(void *oldp, size_t *oldlenp, void *newp, int val)
876 {
877 	int error = 0;
878 
879 	if (oldp && *oldlenp < sizeof(int))
880 		return (ENOMEM);
881 	if (newp)
882 		return (EPERM);
883 	*oldlenp = sizeof(int);
884 	if (oldp)
885 		error = copyout((caddr_t)&val, oldp, sizeof(int));
886 	return (error);
887 }
888 
889 /*
890  * Selects between sysctl_rdint and sysctl_int according to securelevel.
891  */
892 int
893 sysctl_securelevel_int(void *oldp, size_t *oldlenp, void *newp, size_t newlen,
894     int *valp)
895 {
896 	if (securelevel > 0)
897 		return (sysctl_rdint(oldp, oldlenp, newp, *valp));
898 	return (sysctl_int(oldp, oldlenp, newp, newlen, valp));
899 }
900 
901 /*
902  * Read-only or bounded integer values.
903  */
904 int
905 sysctl_int_bounded(void *oldp, size_t *oldlenp, void *newp, size_t newlen,
906     int *valp, int minimum, int maximum)
907 {
908 	int val = *valp;
909 	int error;
910 
911 	/* read only */
912 	if (newp == NULL || minimum > maximum)
913 		return (sysctl_rdint(oldp, oldlenp, newp, val));
914 
915 	if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)))
916 		return (error);
917 	/* outside limits */
918 	if (val < minimum || maximum < val)
919 		return (EINVAL);
920 	*valp = val;
921 	return (0);
922 }
923 
924 /*
925  * Array of read-only or bounded integer values.
926  */
927 int
928 sysctl_bounded_arr(const struct sysctl_bounded_args *valpp, u_int valplen,
929     int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
930     size_t newlen)
931 {
932 	u_int i;
933 	if (namelen != 1)
934 		return (ENOTDIR);
935 	for (i = 0; i < valplen; ++i) {
936 		if (valpp[i].mib == name[0]) {
937 			return (sysctl_int_bounded(oldp, oldlenp, newp, newlen,
938 			    valpp[i].var, valpp[i].minimum, valpp[i].maximum));
939 		}
940 	}
941 	return (EOPNOTSUPP);
942 }
943 
944 /*
945  * Validate parameters and get old / set new parameters
946  * for an integer-valued sysctl function.
947  */
948 int
949 sysctl_quad(void *oldp, size_t *oldlenp, void *newp, size_t newlen,
950     int64_t *valp)
951 {
952 	int error = 0;
953 
954 	if (oldp && *oldlenp < sizeof(int64_t))
955 		return (ENOMEM);
956 	if (newp && newlen != sizeof(int64_t))
957 		return (EINVAL);
958 	*oldlenp = sizeof(int64_t);
959 	if (oldp)
960 		error = copyout(valp, oldp, sizeof(int64_t));
961 	if (error == 0 && newp)
962 		error = copyin(newp, valp, sizeof(int64_t));
963 	return (error);
964 }
965 
966 /*
967  * As above, but read-only.
968  */
969 int
970 sysctl_rdquad(void *oldp, size_t *oldlenp, void *newp, int64_t val)
971 {
972 	int error = 0;
973 
974 	if (oldp && *oldlenp < sizeof(int64_t))
975 		return (ENOMEM);
976 	if (newp)
977 		return (EPERM);
978 	*oldlenp = sizeof(int64_t);
979 	if (oldp)
980 		error = copyout((caddr_t)&val, oldp, sizeof(int64_t));
981 	return (error);
982 }
983 
984 /*
985  * Validate parameters and get old / set new parameters
986  * for a string-valued sysctl function.
987  */
988 int
989 sysctl_string(void *oldp, size_t *oldlenp, void *newp, size_t newlen, char *str,
990     size_t maxlen)
991 {
992 	return sysctl__string(oldp, oldlenp, newp, newlen, str, maxlen, 0);
993 }
994 
995 int
996 sysctl_tstring(void *oldp, size_t *oldlenp, void *newp, size_t newlen,
997     char *str, size_t maxlen)
998 {
999 	return sysctl__string(oldp, oldlenp, newp, newlen, str, maxlen, 1);
1000 }
1001 
1002 int
1003 sysctl__string(void *oldp, size_t *oldlenp, void *newp, size_t newlen,
1004     char *str, size_t maxlen, int trunc)
1005 {
1006 	size_t len;
1007 	int error = 0;
1008 
1009 	len = strlen(str) + 1;
1010 	if (oldp && *oldlenp < len) {
1011 		if (trunc == 0 || *oldlenp == 0)
1012 			return (ENOMEM);
1013 	}
1014 	if (newp && newlen >= maxlen)
1015 		return (EINVAL);
1016 	if (oldp) {
1017 		if (trunc && *oldlenp < len) {
1018 			len = *oldlenp;
1019 			error = copyout(str, oldp, len - 1);
1020 			if (error == 0)
1021 				error = copyout("", (char *)oldp + len - 1, 1);
1022 		} else {
1023 			error = copyout(str, oldp, len);
1024 		}
1025 	}
1026 	*oldlenp = len;
1027 	if (error == 0 && newp) {
1028 		error = copyin(newp, str, newlen);
1029 		str[newlen] = 0;
1030 	}
1031 	return (error);
1032 }
1033 
1034 /*
1035  * As above, but read-only.
1036  */
1037 int
1038 sysctl_rdstring(void *oldp, size_t *oldlenp, void *newp, const char *str)
1039 {
1040 	size_t len;
1041 	int error = 0;
1042 
1043 	len = strlen(str) + 1;
1044 	if (oldp && *oldlenp < len)
1045 		return (ENOMEM);
1046 	if (newp)
1047 		return (EPERM);
1048 	*oldlenp = len;
1049 	if (oldp)
1050 		error = copyout(str, oldp, len);
1051 	return (error);
1052 }
1053 
1054 /*
1055  * Validate parameters and get old / set new parameters
1056  * for a structure oriented sysctl function.
1057  */
1058 int
1059 sysctl_struct(void *oldp, size_t *oldlenp, void *newp, size_t newlen, void *sp,
1060     size_t len)
1061 {
1062 	int error = 0;
1063 
1064 	if (oldp && *oldlenp < len)
1065 		return (ENOMEM);
1066 	if (newp && newlen > len)
1067 		return (EINVAL);
1068 	if (oldp) {
1069 		*oldlenp = len;
1070 		error = copyout(sp, oldp, len);
1071 	}
1072 	if (error == 0 && newp)
1073 		error = copyin(newp, sp, len);
1074 	return (error);
1075 }
1076 
1077 /*
1078  * Validate parameters and get old parameters
1079  * for a structure oriented sysctl function.
1080  */
1081 int
1082 sysctl_rdstruct(void *oldp, size_t *oldlenp, void *newp, const void *sp,
1083     size_t len)
1084 {
1085 	int error = 0;
1086 
1087 	if (oldp && *oldlenp < len)
1088 		return (ENOMEM);
1089 	if (newp)
1090 		return (EPERM);
1091 	*oldlenp = len;
1092 	if (oldp)
1093 		error = copyout(sp, oldp, len);
1094 	return (error);
1095 }
1096 
1097 #ifndef SMALL_KERNEL
1098 void
1099 fill_file(struct kinfo_file *kf, struct file *fp, struct filedesc *fdp,
1100 	  int fd, struct vnode *vp, struct process *pr, struct proc *p,
1101 	  struct socket *so, int show_pointers)
1102 {
1103 	struct vattr va;
1104 
1105 	memset(kf, 0, sizeof(*kf));
1106 
1107 	kf->fd_fd = fd;		/* might not really be an fd */
1108 
1109 	if (fp != NULL) {
1110 		if (show_pointers)
1111 			kf->f_fileaddr = PTRTOINT64(fp);
1112 		kf->f_flag = fp->f_flag;
1113 		kf->f_iflags = fp->f_iflags;
1114 		kf->f_type = fp->f_type;
1115 		kf->f_count = fp->f_count;
1116 		if (show_pointers)
1117 			kf->f_ucred = PTRTOINT64(fp->f_cred);
1118 		kf->f_uid = fp->f_cred->cr_uid;
1119 		kf->f_gid = fp->f_cred->cr_gid;
1120 		if (show_pointers)
1121 			kf->f_ops = PTRTOINT64(fp->f_ops);
1122 		if (show_pointers)
1123 			kf->f_data = PTRTOINT64(fp->f_data);
1124 		kf->f_usecount = 0;
1125 
1126 		if (suser(p) == 0 || p->p_ucred->cr_uid == fp->f_cred->cr_uid) {
1127 			mtx_enter(&fp->f_mtx);
1128 			kf->f_offset = fp->f_offset;
1129 			kf->f_rxfer = fp->f_rxfer;
1130 			kf->f_rwfer = fp->f_wxfer;
1131 			kf->f_seek = fp->f_seek;
1132 			kf->f_rbytes = fp->f_rbytes;
1133 			kf->f_wbytes = fp->f_wbytes;
1134 			mtx_leave(&fp->f_mtx);
1135 		} else
1136 			kf->f_offset = -1;
1137 	} else if (vp != NULL) {
1138 		/* fake it */
1139 		kf->f_type = DTYPE_VNODE;
1140 		kf->f_flag = FREAD;
1141 		if (fd == KERN_FILE_TRACE)
1142 			kf->f_flag |= FWRITE;
1143 	} else if (so != NULL) {
1144 		/* fake it */
1145 		kf->f_type = DTYPE_SOCKET;
1146 	}
1147 
1148 	/* information about the object associated with this file */
1149 	switch (kf->f_type) {
1150 	case DTYPE_VNODE:
1151 		if (fp != NULL)
1152 			vp = (struct vnode *)fp->f_data;
1153 
1154 		if (show_pointers)
1155 			kf->v_un = PTRTOINT64(vp->v_un.vu_socket);
1156 		kf->v_type = vp->v_type;
1157 		kf->v_tag = vp->v_tag;
1158 		kf->v_flag = vp->v_flag;
1159 		if (show_pointers)
1160 			kf->v_data = PTRTOINT64(vp->v_data);
1161 		if (show_pointers)
1162 			kf->v_mount = PTRTOINT64(vp->v_mount);
1163 		if (vp->v_mount)
1164 			strlcpy(kf->f_mntonname,
1165 			    vp->v_mount->mnt_stat.f_mntonname,
1166 			    sizeof(kf->f_mntonname));
1167 
1168 		if (VOP_GETATTR(vp, &va, p->p_ucred, p) == 0) {
1169 			kf->va_fileid = va.va_fileid;
1170 			kf->va_mode = MAKEIMODE(va.va_type, va.va_mode);
1171 			kf->va_size = va.va_size;
1172 			kf->va_rdev = va.va_rdev;
1173 			kf->va_fsid = va.va_fsid & 0xffffffff;
1174 			kf->va_nlink = va.va_nlink;
1175 		}
1176 		break;
1177 
1178 	case DTYPE_SOCKET: {
1179 		int locked = 0;
1180 
1181 		if (so == NULL) {
1182 			so = (struct socket *)fp->f_data;
1183 			/* if so is passed as parameter it is already locked */
1184 			switch (so->so_proto->pr_domain->dom_family) {
1185 			case AF_INET:
1186 			case AF_INET6:
1187 				NET_LOCK();
1188 				locked = 1;
1189 				break;
1190 			}
1191 		}
1192 
1193 		kf->so_type = so->so_type;
1194 		kf->so_state = so->so_state | so->so_snd.sb_state |
1195 		    so->so_rcv.sb_state;
1196 		if (show_pointers)
1197 			kf->so_pcb = PTRTOINT64(so->so_pcb);
1198 		else
1199 			kf->so_pcb = -1;
1200 		kf->so_protocol = so->so_proto->pr_protocol;
1201 		kf->so_family = so->so_proto->pr_domain->dom_family;
1202 		kf->so_rcv_cc = so->so_rcv.sb_cc;
1203 		kf->so_snd_cc = so->so_snd.sb_cc;
1204 		if (isspliced(so)) {
1205 			if (show_pointers)
1206 				kf->so_splice =
1207 				    PTRTOINT64(so->so_sp->ssp_socket);
1208 			kf->so_splicelen = so->so_sp->ssp_len;
1209 		} else if (issplicedback(so))
1210 			kf->so_splicelen = -1;
1211 		if (so->so_pcb == NULL) {
1212 			if (locked)
1213 				NET_UNLOCK();
1214 			break;
1215 		}
1216 		switch (kf->so_family) {
1217 		case AF_INET: {
1218 			struct inpcb *inpcb = so->so_pcb;
1219 
1220 			NET_ASSERT_LOCKED();
1221 			if (show_pointers)
1222 				kf->inp_ppcb = PTRTOINT64(inpcb->inp_ppcb);
1223 			kf->inp_lport = inpcb->inp_lport;
1224 			kf->inp_laddru[0] = inpcb->inp_laddr.s_addr;
1225 			kf->inp_fport = inpcb->inp_fport;
1226 			kf->inp_faddru[0] = inpcb->inp_faddr.s_addr;
1227 			kf->inp_rtableid = inpcb->inp_rtableid;
1228 			if (so->so_type == SOCK_RAW)
1229 				kf->inp_proto = inpcb->inp_ip.ip_p;
1230 			if (so->so_proto->pr_protocol == IPPROTO_TCP) {
1231 				struct tcpcb *tcpcb = (void *)inpcb->inp_ppcb;
1232 				kf->t_rcv_wnd = tcpcb->rcv_wnd;
1233 				kf->t_snd_wnd = tcpcb->snd_wnd;
1234 				kf->t_snd_cwnd = tcpcb->snd_cwnd;
1235 				kf->t_state = tcpcb->t_state;
1236 			}
1237 			break;
1238 		    }
1239 		case AF_INET6: {
1240 			struct inpcb *inpcb = so->so_pcb;
1241 
1242 			NET_ASSERT_LOCKED();
1243 			if (show_pointers)
1244 				kf->inp_ppcb = PTRTOINT64(inpcb->inp_ppcb);
1245 			kf->inp_lport = inpcb->inp_lport;
1246 			kf->inp_laddru[0] = inpcb->inp_laddr6.s6_addr32[0];
1247 			kf->inp_laddru[1] = inpcb->inp_laddr6.s6_addr32[1];
1248 			kf->inp_laddru[2] = inpcb->inp_laddr6.s6_addr32[2];
1249 			kf->inp_laddru[3] = inpcb->inp_laddr6.s6_addr32[3];
1250 			kf->inp_fport = inpcb->inp_fport;
1251 			kf->inp_faddru[0] = inpcb->inp_faddr6.s6_addr32[0];
1252 			kf->inp_faddru[1] = inpcb->inp_faddr6.s6_addr32[1];
1253 			kf->inp_faddru[2] = inpcb->inp_faddr6.s6_addr32[2];
1254 			kf->inp_faddru[3] = inpcb->inp_faddr6.s6_addr32[3];
1255 			kf->inp_rtableid = inpcb->inp_rtableid;
1256 			if (so->so_type == SOCK_RAW)
1257 				kf->inp_proto = inpcb->inp_ipv6.ip6_nxt;
1258 			if (so->so_proto->pr_protocol == IPPROTO_TCP) {
1259 				struct tcpcb *tcpcb = (void *)inpcb->inp_ppcb;
1260 				kf->t_rcv_wnd = tcpcb->rcv_wnd;
1261 				kf->t_snd_wnd = tcpcb->snd_wnd;
1262 				kf->t_state = tcpcb->t_state;
1263 			}
1264 			break;
1265 		    }
1266 		case AF_UNIX: {
1267 			struct unpcb *unpcb = so->so_pcb;
1268 
1269 			kf->f_msgcount = unpcb->unp_msgcount;
1270 			if (show_pointers) {
1271 				kf->unp_conn	= PTRTOINT64(unpcb->unp_conn);
1272 				kf->unp_refs	= PTRTOINT64(
1273 				    SLIST_FIRST(&unpcb->unp_refs));
1274 				kf->unp_nextref	= PTRTOINT64(
1275 				    SLIST_NEXT(unpcb, unp_nextref));
1276 				kf->v_un	= PTRTOINT64(unpcb->unp_vnode);
1277 				kf->unp_addr	= PTRTOINT64(unpcb->unp_addr);
1278 			}
1279 			if (unpcb->unp_addr != NULL) {
1280 				struct sockaddr_un *un = mtod(unpcb->unp_addr,
1281 				    struct sockaddr_un *);
1282 				memcpy(kf->unp_path, un->sun_path, un->sun_len
1283 				    - offsetof(struct sockaddr_un,sun_path));
1284 			}
1285 			break;
1286 		    }
1287 		}
1288 		if (locked)
1289 			NET_UNLOCK();
1290 		break;
1291 	    }
1292 
1293 	case DTYPE_PIPE: {
1294 		struct pipe *pipe = (struct pipe *)fp->f_data;
1295 
1296 		if (show_pointers)
1297 			kf->pipe_peer = PTRTOINT64(pipe->pipe_peer);
1298 		kf->pipe_state = pipe->pipe_state;
1299 		break;
1300 	    }
1301 
1302 	case DTYPE_KQUEUE: {
1303 		struct kqueue *kqi = (struct kqueue *)fp->f_data;
1304 
1305 		kf->kq_count = kqi->kq_count;
1306 		kf->kq_state = kqi->kq_state;
1307 		break;
1308 	    }
1309 	}
1310 
1311 	/* per-process information for KERN_FILE_BY[PU]ID */
1312 	if (pr != NULL) {
1313 		kf->p_pid = pr->ps_pid;
1314 		kf->p_uid = pr->ps_ucred->cr_uid;
1315 		kf->p_gid = pr->ps_ucred->cr_gid;
1316 		kf->p_tid = -1;
1317 		strlcpy(kf->p_comm, pr->ps_comm, sizeof(kf->p_comm));
1318 	}
1319 	if (fdp != NULL) {
1320 		fdplock(fdp);
1321 		kf->fd_ofileflags = fdp->fd_ofileflags[fd];
1322 		fdpunlock(fdp);
1323 	}
1324 }
1325 
1326 /*
1327  * Get file structures.
1328  */
1329 int
1330 sysctl_file(int *name, u_int namelen, char *where, size_t *sizep,
1331     struct proc *p)
1332 {
1333 	struct kinfo_file *kf;
1334 	struct filedesc *fdp;
1335 	struct file *fp;
1336 	struct process *pr;
1337 	size_t buflen, elem_size, elem_count, outsize;
1338 	char *dp = where;
1339 	int arg, i, error = 0, needed = 0, matched;
1340 	u_int op;
1341 	int show_pointers;
1342 
1343 	if (namelen > 4)
1344 		return (ENOTDIR);
1345 	if (namelen < 4 || name[2] > sizeof(*kf))
1346 		return (EINVAL);
1347 
1348 	buflen = where != NULL ? *sizep : 0;
1349 	op = name[0];
1350 	arg = name[1];
1351 	elem_size = name[2];
1352 	elem_count = name[3];
1353 	outsize = MIN(sizeof(*kf), elem_size);
1354 
1355 	if (elem_size < 1)
1356 		return (EINVAL);
1357 
1358 	show_pointers = suser(curproc) == 0;
1359 
1360 	kf = malloc(sizeof(*kf), M_TEMP, M_WAITOK);
1361 
1362 #define FILLIT2(fp, fdp, i, vp, pr, so) do {				\
1363 	if (buflen >= elem_size && elem_count > 0) {			\
1364 		fill_file(kf, fp, fdp, i, vp, pr, p, so, show_pointers);\
1365 		error = copyout(kf, dp, outsize);			\
1366 		if (error)						\
1367 			break;						\
1368 		dp += elem_size;					\
1369 		buflen -= elem_size;					\
1370 		elem_count--;						\
1371 	}								\
1372 	needed += elem_size;						\
1373 } while (0)
1374 #define FILLIT(fp, fdp, i, vp, pr) \
1375 	FILLIT2(fp, fdp, i, vp, pr, NULL)
1376 #define FILLSO(so) \
1377 	FILLIT2(NULL, NULL, 0, NULL, NULL, so)
1378 
1379 	switch (op) {
1380 	case KERN_FILE_BYFILE:
1381 		/* use the inp-tables to pick up closed connections, too */
1382 		if (arg == DTYPE_SOCKET) {
1383 			struct inpcb *inp;
1384 
1385 			NET_LOCK();
1386 			mtx_enter(&tcbtable.inpt_mtx);
1387 			TAILQ_FOREACH(inp, &tcbtable.inpt_queue, inp_queue)
1388 				FILLSO(inp->inp_socket);
1389 			mtx_leave(&tcbtable.inpt_mtx);
1390 			mtx_enter(&udbtable.inpt_mtx);
1391 			TAILQ_FOREACH(inp, &udbtable.inpt_queue, inp_queue)
1392 				FILLSO(inp->inp_socket);
1393 			mtx_leave(&udbtable.inpt_mtx);
1394 			mtx_enter(&rawcbtable.inpt_mtx);
1395 			TAILQ_FOREACH(inp, &rawcbtable.inpt_queue, inp_queue)
1396 				FILLSO(inp->inp_socket);
1397 			mtx_leave(&rawcbtable.inpt_mtx);
1398 #ifdef INET6
1399 			mtx_enter(&rawin6pcbtable.inpt_mtx);
1400 			TAILQ_FOREACH(inp, &rawin6pcbtable.inpt_queue,
1401 			    inp_queue)
1402 				FILLSO(inp->inp_socket);
1403 			mtx_leave(&rawin6pcbtable.inpt_mtx);
1404 #endif
1405 			NET_UNLOCK();
1406 		}
1407 		fp = NULL;
1408 		while ((fp = fd_iterfile(fp, p)) != NULL) {
1409 			if ((arg == 0 || fp->f_type == arg)) {
1410 				int af, skip = 0;
1411 				if (arg == DTYPE_SOCKET && fp->f_type == arg) {
1412 					af = ((struct socket *)fp->f_data)->
1413 					    so_proto->pr_domain->dom_family;
1414 					if (af == AF_INET || af == AF_INET6)
1415 						skip = 1;
1416 				}
1417 				if (!skip)
1418 					FILLIT(fp, NULL, 0, NULL, NULL);
1419 			}
1420 		}
1421 		break;
1422 	case KERN_FILE_BYPID:
1423 		/* A arg of -1 indicates all processes */
1424 		if (arg < -1) {
1425 			error = EINVAL;
1426 			break;
1427 		}
1428 		matched = 0;
1429 		LIST_FOREACH(pr, &allprocess, ps_list) {
1430 			/*
1431 			 * skip system, exiting, embryonic and undead
1432 			 * processes
1433 			 */
1434 			if (pr->ps_flags & (PS_SYSTEM | PS_EMBRYO | PS_EXITING))
1435 				continue;
1436 			if (arg > 0 && pr->ps_pid != (pid_t)arg) {
1437 				/* not the pid we are looking for */
1438 				continue;
1439 			}
1440 			matched = 1;
1441 			fdp = pr->ps_fd;
1442 			if (pr->ps_textvp)
1443 				FILLIT(NULL, NULL, KERN_FILE_TEXT, pr->ps_textvp, pr);
1444 			if (fdp->fd_cdir)
1445 				FILLIT(NULL, NULL, KERN_FILE_CDIR, fdp->fd_cdir, pr);
1446 			if (fdp->fd_rdir)
1447 				FILLIT(NULL, NULL, KERN_FILE_RDIR, fdp->fd_rdir, pr);
1448 			if (pr->ps_tracevp)
1449 				FILLIT(NULL, NULL, KERN_FILE_TRACE, pr->ps_tracevp, pr);
1450 			for (i = 0; i < fdp->fd_nfiles; i++) {
1451 				if ((fp = fd_getfile(fdp, i)) == NULL)
1452 					continue;
1453 				FILLIT(fp, fdp, i, NULL, pr);
1454 				FRELE(fp, p);
1455 			}
1456 		}
1457 		if (!matched)
1458 			error = ESRCH;
1459 		break;
1460 	case KERN_FILE_BYUID:
1461 		LIST_FOREACH(pr, &allprocess, ps_list) {
1462 			/*
1463 			 * skip system, exiting, embryonic and undead
1464 			 * processes
1465 			 */
1466 			if (pr->ps_flags & (PS_SYSTEM | PS_EMBRYO | PS_EXITING))
1467 				continue;
1468 			if (arg >= 0 && pr->ps_ucred->cr_uid != (uid_t)arg) {
1469 				/* not the uid we are looking for */
1470 				continue;
1471 			}
1472 			fdp = pr->ps_fd;
1473 			if (fdp->fd_cdir)
1474 				FILLIT(NULL, NULL, KERN_FILE_CDIR, fdp->fd_cdir, pr);
1475 			if (fdp->fd_rdir)
1476 				FILLIT(NULL, NULL, KERN_FILE_RDIR, fdp->fd_rdir, pr);
1477 			if (pr->ps_tracevp)
1478 				FILLIT(NULL, NULL, KERN_FILE_TRACE, pr->ps_tracevp, pr);
1479 			for (i = 0; i < fdp->fd_nfiles; i++) {
1480 				if ((fp = fd_getfile(fdp, i)) == NULL)
1481 					continue;
1482 				FILLIT(fp, fdp, i, NULL, pr);
1483 				FRELE(fp, p);
1484 			}
1485 		}
1486 		break;
1487 	default:
1488 		error = EINVAL;
1489 		break;
1490 	}
1491 	free(kf, M_TEMP, sizeof(*kf));
1492 
1493 	if (!error) {
1494 		if (where == NULL)
1495 			needed += KERN_FILESLOP * elem_size;
1496 		else if (*sizep < needed)
1497 			error = ENOMEM;
1498 		*sizep = needed;
1499 	}
1500 
1501 	return (error);
1502 }
1503 
1504 /*
1505  * try over estimating by 5 procs
1506  */
1507 #define KERN_PROCSLOP	5
1508 
1509 int
1510 sysctl_doproc(int *name, u_int namelen, char *where, size_t *sizep)
1511 {
1512 	struct kinfo_proc *kproc = NULL;
1513 	struct proc *p;
1514 	struct process *pr;
1515 	char *dp;
1516 	int arg, buflen, doingzomb, elem_size, elem_count;
1517 	int error, needed, op;
1518 	int dothreads = 0;
1519 	int show_pointers;
1520 
1521 	dp = where;
1522 	buflen = where != NULL ? *sizep : 0;
1523 	needed = error = 0;
1524 
1525 	if (namelen != 4 || name[2] <= 0 || name[3] < 0 ||
1526 	    name[2] > sizeof(*kproc))
1527 		return (EINVAL);
1528 	op = name[0];
1529 	arg = name[1];
1530 	elem_size = name[2];
1531 	elem_count = name[3];
1532 
1533 	dothreads = op & KERN_PROC_SHOW_THREADS;
1534 	op &= ~KERN_PROC_SHOW_THREADS;
1535 
1536 	show_pointers = suser(curproc) == 0;
1537 
1538 	if (where != NULL)
1539 		kproc = malloc(sizeof(*kproc), M_TEMP, M_WAITOK);
1540 
1541 	pr = LIST_FIRST(&allprocess);
1542 	doingzomb = 0;
1543 again:
1544 	for (; pr != NULL; pr = LIST_NEXT(pr, ps_list)) {
1545 		/* XXX skip processes in the middle of being zapped */
1546 		if (pr->ps_pgrp == NULL)
1547 			continue;
1548 
1549 		/*
1550 		 * Skip embryonic processes.
1551 		 */
1552 		if (pr->ps_flags & PS_EMBRYO)
1553 			continue;
1554 
1555 		/*
1556 		 * TODO - make more efficient (see notes below).
1557 		 */
1558 		switch (op) {
1559 
1560 		case KERN_PROC_PID:
1561 			/* could do this with just a lookup */
1562 			if (pr->ps_pid != (pid_t)arg)
1563 				continue;
1564 			break;
1565 
1566 		case KERN_PROC_PGRP:
1567 			/* could do this by traversing pgrp */
1568 			if (pr->ps_pgrp->pg_id != (pid_t)arg)
1569 				continue;
1570 			break;
1571 
1572 		case KERN_PROC_SESSION:
1573 			if (pr->ps_session->s_leader == NULL ||
1574 			    pr->ps_session->s_leader->ps_pid != (pid_t)arg)
1575 				continue;
1576 			break;
1577 
1578 		case KERN_PROC_TTY:
1579 			if ((pr->ps_flags & PS_CONTROLT) == 0 ||
1580 			    pr->ps_session->s_ttyp == NULL ||
1581 			    pr->ps_session->s_ttyp->t_dev != (dev_t)arg)
1582 				continue;
1583 			break;
1584 
1585 		case KERN_PROC_UID:
1586 			if (pr->ps_ucred->cr_uid != (uid_t)arg)
1587 				continue;
1588 			break;
1589 
1590 		case KERN_PROC_RUID:
1591 			if (pr->ps_ucred->cr_ruid != (uid_t)arg)
1592 				continue;
1593 			break;
1594 
1595 		case KERN_PROC_ALL:
1596 			if (pr->ps_flags & PS_SYSTEM)
1597 				continue;
1598 			break;
1599 
1600 		case KERN_PROC_KTHREAD:
1601 			/* no filtering */
1602 			break;
1603 
1604 		default:
1605 			error = EINVAL;
1606 			goto err;
1607 		}
1608 
1609 		if (buflen >= elem_size && elem_count > 0) {
1610 			fill_kproc(pr, kproc, NULL, show_pointers);
1611 			error = copyout(kproc, dp, elem_size);
1612 			if (error)
1613 				goto err;
1614 			dp += elem_size;
1615 			buflen -= elem_size;
1616 			elem_count--;
1617 		}
1618 		needed += elem_size;
1619 
1620 		/* Skip per-thread entries if not required by op */
1621 		if (!dothreads)
1622 			continue;
1623 
1624 		TAILQ_FOREACH(p, &pr->ps_threads, p_thr_link) {
1625 			if (buflen >= elem_size && elem_count > 0) {
1626 				fill_kproc(pr, kproc, p, show_pointers);
1627 				error = copyout(kproc, dp, elem_size);
1628 				if (error)
1629 					goto err;
1630 				dp += elem_size;
1631 				buflen -= elem_size;
1632 				elem_count--;
1633 			}
1634 			needed += elem_size;
1635 		}
1636 	}
1637 	if (doingzomb == 0) {
1638 		pr = LIST_FIRST(&zombprocess);
1639 		doingzomb++;
1640 		goto again;
1641 	}
1642 	if (where != NULL) {
1643 		*sizep = dp - where;
1644 		if (needed > *sizep) {
1645 			error = ENOMEM;
1646 			goto err;
1647 		}
1648 	} else {
1649 		needed += KERN_PROCSLOP * elem_size;
1650 		*sizep = needed;
1651 	}
1652 err:
1653 	if (kproc)
1654 		free(kproc, M_TEMP, sizeof(*kproc));
1655 	return (error);
1656 }
1657 
1658 /*
1659  * Fill in a kproc structure for the specified process.
1660  */
1661 void
1662 fill_kproc(struct process *pr, struct kinfo_proc *ki, struct proc *p,
1663     int show_pointers)
1664 {
1665 	struct session *s = pr->ps_session;
1666 	struct tty *tp;
1667 	struct vmspace *vm = pr->ps_vmspace;
1668 	struct timespec booted, st, ut, utc;
1669 	int isthread;
1670 
1671 	isthread = p != NULL;
1672 	if (!isthread)
1673 		p = pr->ps_mainproc;		/* XXX */
1674 
1675 	FILL_KPROC(ki, strlcpy, p, pr, pr->ps_ucred, pr->ps_pgrp,
1676 	    p, pr, s, vm, pr->ps_limit, pr->ps_sigacts, isthread,
1677 	    show_pointers);
1678 
1679 	/* stuff that's too painful to generalize into the macros */
1680 	if (pr->ps_pptr)
1681 		ki->p_ppid = pr->ps_ppid;
1682 	if (s->s_leader)
1683 		ki->p_sid = s->s_leader->ps_pid;
1684 
1685 	if ((pr->ps_flags & PS_CONTROLT) && (tp = s->s_ttyp)) {
1686 		ki->p_tdev = tp->t_dev;
1687 		ki->p_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : -1;
1688 		if (show_pointers)
1689 			ki->p_tsess = PTRTOINT64(tp->t_session);
1690 	} else {
1691 		ki->p_tdev = NODEV;
1692 		ki->p_tpgid = -1;
1693 	}
1694 
1695 	/* fixups that can only be done in the kernel */
1696 	if ((pr->ps_flags & PS_ZOMBIE) == 0) {
1697 		if ((pr->ps_flags & PS_EMBRYO) == 0 && vm != NULL)
1698 			ki->p_vm_rssize = vm_resident_count(vm);
1699 		calctsru(isthread ? &p->p_tu : &pr->ps_tu, &ut, &st, NULL);
1700 		ki->p_uutime_sec = ut.tv_sec;
1701 		ki->p_uutime_usec = ut.tv_nsec/1000;
1702 		ki->p_ustime_sec = st.tv_sec;
1703 		ki->p_ustime_usec = st.tv_nsec/1000;
1704 
1705 		/* Convert starting uptime to a starting UTC time. */
1706 		nanoboottime(&booted);
1707 		timespecadd(&booted, &pr->ps_start, &utc);
1708 		ki->p_ustart_sec = utc.tv_sec;
1709 		ki->p_ustart_usec = utc.tv_nsec / 1000;
1710 
1711 #ifdef MULTIPROCESSOR
1712 		if (p->p_cpu != NULL)
1713 			ki->p_cpuid = CPU_INFO_UNIT(p->p_cpu);
1714 #endif
1715 	}
1716 
1717 	/* get %cpu and schedule state: just one thread or sum of all? */
1718 	if (isthread) {
1719 		ki->p_pctcpu = p->p_pctcpu;
1720 		ki->p_stat   = p->p_stat;
1721 	} else {
1722 		ki->p_pctcpu = 0;
1723 		ki->p_stat = (pr->ps_flags & PS_ZOMBIE) ? SDEAD : SIDL;
1724 		TAILQ_FOREACH(p, &pr->ps_threads, p_thr_link) {
1725 			ki->p_pctcpu += p->p_pctcpu;
1726 			/* find best state: ONPROC > RUN > STOP > SLEEP > .. */
1727 			if (p->p_stat == SONPROC || ki->p_stat == SONPROC)
1728 				ki->p_stat = SONPROC;
1729 			else if (p->p_stat == SRUN || ki->p_stat == SRUN)
1730 				ki->p_stat = SRUN;
1731 			else if (p->p_stat == SSTOP || ki->p_stat == SSTOP)
1732 				ki->p_stat = SSTOP;
1733 			else if (p->p_stat == SSLEEP)
1734 				ki->p_stat = SSLEEP;
1735 		}
1736 	}
1737 }
1738 
1739 int
1740 sysctl_proc_args(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1741     struct proc *cp)
1742 {
1743 	struct process *vpr;
1744 	pid_t pid;
1745 	struct ps_strings pss;
1746 	struct iovec iov;
1747 	struct uio uio;
1748 	int error, cnt, op;
1749 	size_t limit;
1750 	char **rargv, **vargv;		/* reader vs. victim */
1751 	char *rarg, *varg, *buf;
1752 	struct vmspace *vm;
1753 	vaddr_t ps_strings;
1754 
1755 	if (namelen > 2)
1756 		return (ENOTDIR);
1757 	if (namelen < 2)
1758 		return (EINVAL);
1759 
1760 	pid = name[0];
1761 	op = name[1];
1762 
1763 	switch (op) {
1764 	case KERN_PROC_ARGV:
1765 	case KERN_PROC_NARGV:
1766 	case KERN_PROC_ENV:
1767 	case KERN_PROC_NENV:
1768 		break;
1769 	default:
1770 		return (EOPNOTSUPP);
1771 	}
1772 
1773 	if ((vpr = prfind(pid)) == NULL)
1774 		return (ESRCH);
1775 
1776 	if (oldp == NULL) {
1777 		if (op == KERN_PROC_NARGV || op == KERN_PROC_NENV)
1778 			*oldlenp = sizeof(int);
1779 		else
1780 			*oldlenp = ARG_MAX;	/* XXX XXX XXX */
1781 		return (0);
1782 	}
1783 
1784 	/* Either system process or exiting/zombie */
1785 	if (vpr->ps_flags & (PS_SYSTEM | PS_EXITING))
1786 		return (EINVAL);
1787 
1788 	/* Execing - danger. */
1789 	if ((vpr->ps_flags & PS_INEXEC))
1790 		return (EBUSY);
1791 
1792 	/* Only owner or root can get env */
1793 	if ((op == KERN_PROC_NENV || op == KERN_PROC_ENV) &&
1794 	    (vpr->ps_ucred->cr_uid != cp->p_ucred->cr_uid &&
1795 	    (error = suser(cp)) != 0))
1796 		return (error);
1797 
1798 	ps_strings = vpr->ps_strings;
1799 	vm = vpr->ps_vmspace;
1800 	uvmspace_addref(vm);
1801 	vpr = NULL;
1802 
1803 	buf = malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
1804 
1805 	iov.iov_base = &pss;
1806 	iov.iov_len = sizeof(pss);
1807 	uio.uio_iov = &iov;
1808 	uio.uio_iovcnt = 1;
1809 	uio.uio_offset = (off_t)ps_strings;
1810 	uio.uio_resid = sizeof(pss);
1811 	uio.uio_segflg = UIO_SYSSPACE;
1812 	uio.uio_rw = UIO_READ;
1813 	uio.uio_procp = cp;
1814 
1815 	if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0)
1816 		goto out;
1817 
1818 	if (op == KERN_PROC_NARGV) {
1819 		error = sysctl_rdint(oldp, oldlenp, NULL, pss.ps_nargvstr);
1820 		goto out;
1821 	}
1822 	if (op == KERN_PROC_NENV) {
1823 		error = sysctl_rdint(oldp, oldlenp, NULL, pss.ps_nenvstr);
1824 		goto out;
1825 	}
1826 
1827 	if (op == KERN_PROC_ARGV) {
1828 		cnt = pss.ps_nargvstr;
1829 		vargv = pss.ps_argvstr;
1830 	} else {
1831 		cnt = pss.ps_nenvstr;
1832 		vargv = pss.ps_envstr;
1833 	}
1834 
1835 	/* -1 to have space for a terminating NUL */
1836 	limit = *oldlenp - 1;
1837 	*oldlenp = 0;
1838 
1839 	rargv = oldp;
1840 
1841 	/*
1842 	 * *oldlenp - number of bytes copied out into readers buffer.
1843 	 * limit - maximal number of bytes allowed into readers buffer.
1844 	 * rarg - pointer into readers buffer where next arg will be stored.
1845 	 * rargv - pointer into readers buffer where the next rarg pointer
1846 	 *  will be stored.
1847 	 * vargv - pointer into victim address space where the next argument
1848 	 *  will be read.
1849 	 */
1850 
1851 	/* space for cnt pointers and a NULL */
1852 	rarg = (char *)(rargv + cnt + 1);
1853 	*oldlenp += (cnt + 1) * sizeof(char **);
1854 
1855 	while (cnt > 0 && *oldlenp < limit) {
1856 		size_t len, vstrlen;
1857 
1858 		/* Write to readers argv */
1859 		if ((error = copyout(&rarg, rargv, sizeof(rarg))) != 0)
1860 			goto out;
1861 
1862 		/* read the victim argv */
1863 		iov.iov_base = &varg;
1864 		iov.iov_len = sizeof(varg);
1865 		uio.uio_iov = &iov;
1866 		uio.uio_iovcnt = 1;
1867 		uio.uio_offset = (off_t)(vaddr_t)vargv;
1868 		uio.uio_resid = sizeof(varg);
1869 		uio.uio_segflg = UIO_SYSSPACE;
1870 		uio.uio_rw = UIO_READ;
1871 		uio.uio_procp = cp;
1872 		if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0)
1873 			goto out;
1874 
1875 		if (varg == NULL)
1876 			break;
1877 
1878 		/*
1879 		 * read the victim arg. We must jump through hoops to avoid
1880 		 * crossing a page boundary too much and returning an error.
1881 		 */
1882 more:
1883 		len = PAGE_SIZE - (((vaddr_t)varg) & PAGE_MASK);
1884 		/* leave space for the terminating NUL */
1885 		iov.iov_base = buf;
1886 		iov.iov_len = len;
1887 		uio.uio_iov = &iov;
1888 		uio.uio_iovcnt = 1;
1889 		uio.uio_offset = (off_t)(vaddr_t)varg;
1890 		uio.uio_resid = len;
1891 		uio.uio_segflg = UIO_SYSSPACE;
1892 		uio.uio_rw = UIO_READ;
1893 		uio.uio_procp = cp;
1894 		if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0)
1895 			goto out;
1896 
1897 		for (vstrlen = 0; vstrlen < len; vstrlen++) {
1898 			if (buf[vstrlen] == '\0')
1899 				break;
1900 		}
1901 
1902 		/* Don't overflow readers buffer. */
1903 		if (*oldlenp + vstrlen + 1 >= limit) {
1904 			error = ENOMEM;
1905 			goto out;
1906 		}
1907 
1908 		if ((error = copyout(buf, rarg, vstrlen)) != 0)
1909 			goto out;
1910 
1911 		*oldlenp += vstrlen;
1912 		rarg += vstrlen;
1913 
1914 		/* The string didn't end in this page? */
1915 		if (vstrlen == len) {
1916 			varg += vstrlen;
1917 			goto more;
1918 		}
1919 
1920 		/* End of string. Terminate it with a NUL */
1921 		buf[0] = '\0';
1922 		if ((error = copyout(buf, rarg, 1)) != 0)
1923 			goto out;
1924 		*oldlenp += 1;
1925 		rarg += 1;
1926 
1927 		vargv++;
1928 		rargv++;
1929 		cnt--;
1930 	}
1931 
1932 	if (*oldlenp >= limit) {
1933 		error = ENOMEM;
1934 		goto out;
1935 	}
1936 
1937 	/* Write the terminating null */
1938 	rarg = NULL;
1939 	error = copyout(&rarg, rargv, sizeof(rarg));
1940 
1941 out:
1942 	uvmspace_free(vm);
1943 	free(buf, M_TEMP, PAGE_SIZE);
1944 	return (error);
1945 }
1946 
1947 int
1948 sysctl_proc_cwd(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1949     struct proc *cp)
1950 {
1951 	struct process *findpr;
1952 	struct vnode *vp;
1953 	pid_t pid;
1954 	int error;
1955 	size_t lenused, len;
1956 	char *path, *bp, *bend;
1957 
1958 	if (namelen > 1)
1959 		return (ENOTDIR);
1960 	if (namelen < 1)
1961 		return (EINVAL);
1962 
1963 	pid = name[0];
1964 	if ((findpr = prfind(pid)) == NULL)
1965 		return (ESRCH);
1966 
1967 	if (oldp == NULL) {
1968 		*oldlenp = MAXPATHLEN * 4;
1969 		return (0);
1970 	}
1971 
1972 	/* Either system process or exiting/zombie */
1973 	if (findpr->ps_flags & (PS_SYSTEM | PS_EXITING))
1974 		return (EINVAL);
1975 
1976 	/* Only owner or root can get cwd */
1977 	if (findpr->ps_ucred->cr_uid != cp->p_ucred->cr_uid &&
1978 	    (error = suser(cp)) != 0)
1979 		return (error);
1980 
1981 	len = *oldlenp;
1982 	if (len > MAXPATHLEN * 4)
1983 		len = MAXPATHLEN * 4;
1984 	else if (len < 2)
1985 		return (ERANGE);
1986 	*oldlenp = 0;
1987 
1988 	/* snag a reference to the vnode before we can sleep */
1989 	vp = findpr->ps_fd->fd_cdir;
1990 	vref(vp);
1991 
1992 	path = malloc(len, M_TEMP, M_WAITOK);
1993 
1994 	bp = &path[len];
1995 	bend = bp;
1996 	*(--bp) = '\0';
1997 
1998 	/* Same as sys__getcwd */
1999 	error = vfs_getcwd_common(vp, NULL,
2000 	    &bp, path, len / 2, GETCWD_CHECK_ACCESS, cp);
2001 	if (error == 0) {
2002 		*oldlenp = lenused = bend - bp;
2003 		error = copyout(bp, oldp, lenused);
2004 	}
2005 
2006 	vrele(vp);
2007 	free(path, M_TEMP, len);
2008 
2009 	return (error);
2010 }
2011 
2012 int
2013 sysctl_proc_nobroadcastkill(int *name, u_int namelen, void *newp, size_t newlen,
2014     void *oldp, size_t *oldlenp, struct proc *cp)
2015 {
2016 	struct process *findpr;
2017 	pid_t pid;
2018 	int error, flag;
2019 
2020 	if (namelen > 1)
2021 		return (ENOTDIR);
2022 	if (namelen < 1)
2023 		return (EINVAL);
2024 
2025 	pid = name[0];
2026 	if ((findpr = prfind(pid)) == NULL)
2027 		return (ESRCH);
2028 
2029 	/* Either system process or exiting/zombie */
2030 	if (findpr->ps_flags & (PS_SYSTEM | PS_EXITING))
2031 		return (EINVAL);
2032 
2033 	/* Only root can change PS_NOBROADCASTKILL */
2034 	if (newp != NULL && (error = suser(cp)) != 0)
2035 		return (error);
2036 
2037 	/* get the PS_NOBROADCASTKILL flag */
2038 	flag = findpr->ps_flags & PS_NOBROADCASTKILL ? 1 : 0;
2039 
2040 	error = sysctl_int(oldp, oldlenp, newp, newlen, &flag);
2041 	if (error == 0 && newp) {
2042 		if (flag)
2043 			atomic_setbits_int(&findpr->ps_flags,
2044 			    PS_NOBROADCASTKILL);
2045 		else
2046 			atomic_clearbits_int(&findpr->ps_flags,
2047 			    PS_NOBROADCASTKILL);
2048 	}
2049 
2050 	return (error);
2051 }
2052 
2053 /* Arbitrary but reasonable limit for one iteration. */
2054 #define	VMMAP_MAXLEN	MAXPHYS
2055 
2056 int
2057 sysctl_proc_vmmap(int *name, u_int namelen, void *oldp, size_t *oldlenp,
2058     struct proc *cp)
2059 {
2060 	struct process *findpr;
2061 	pid_t pid;
2062 	int error;
2063 	size_t oldlen, len;
2064 	struct kinfo_vmentry *kve, *ukve;
2065 	u_long *ustart, start;
2066 
2067 	if (namelen > 1)
2068 		return (ENOTDIR);
2069 	if (namelen < 1)
2070 		return (EINVAL);
2071 
2072 	/* Provide max buffer length as hint. */
2073 	if (oldp == NULL) {
2074 		if (oldlenp == NULL)
2075 			return (EINVAL);
2076 		else {
2077 			*oldlenp = VMMAP_MAXLEN;
2078 			return (0);
2079 		}
2080 	}
2081 
2082 	pid = name[0];
2083 	if (pid == cp->p_p->ps_pid) {
2084 		/* Self process mapping. */
2085 		findpr = cp->p_p;
2086 	} else if (pid > 0) {
2087 		if ((findpr = prfind(pid)) == NULL)
2088 			return (ESRCH);
2089 
2090 		/* Either system process or exiting/zombie */
2091 		if (findpr->ps_flags & (PS_SYSTEM | PS_EXITING))
2092 			return (EINVAL);
2093 
2094 #if 1
2095 		/* XXX Allow only root for now */
2096 		if ((error = suser(cp)) != 0)
2097 			return (error);
2098 #else
2099 		/* Only owner or root can get vmmap */
2100 		if (findpr->ps_ucred->cr_uid != cp->p_ucred->cr_uid &&
2101 		    (error = suser(cp)) != 0)
2102 			return (error);
2103 #endif
2104 	} else {
2105 		/* Only root can get kernel_map */
2106 		if ((error = suser(cp)) != 0)
2107 			return (error);
2108 		findpr = NULL;
2109 	}
2110 
2111 	/* Check the given size. */
2112 	oldlen = *oldlenp;
2113 	if (oldlen == 0 || oldlen % sizeof(*kve) != 0)
2114 		return (EINVAL);
2115 
2116 	/* Deny huge allocation. */
2117 	if (oldlen > VMMAP_MAXLEN)
2118 		return (EINVAL);
2119 
2120 	/*
2121 	 * Iterate from the given address passed as the first element's
2122 	 * kve_start via oldp.
2123 	 */
2124 	ukve = (struct kinfo_vmentry *)oldp;
2125 	ustart = &ukve->kve_start;
2126 	error = copyin(ustart, &start, sizeof(start));
2127 	if (error != 0)
2128 		return (error);
2129 
2130 	/* Allocate wired memory to not block. */
2131 	kve = malloc(oldlen, M_TEMP, M_WAITOK);
2132 
2133 	/* Set the base address and read entries. */
2134 	kve[0].kve_start = start;
2135 	len = oldlen;
2136 	error = fill_vmmap(findpr, kve, &len);
2137 	if (error != 0 && error != ENOMEM)
2138 		goto done;
2139 	if (len == 0)
2140 		goto done;
2141 
2142 	KASSERT(len <= oldlen);
2143 	KASSERT((len % sizeof(struct kinfo_vmentry)) == 0);
2144 
2145 	error = copyout(kve, oldp, len);
2146 
2147 done:
2148 	*oldlenp = len;
2149 
2150 	free(kve, M_TEMP, oldlen);
2151 
2152 	return (error);
2153 }
2154 #endif
2155 
2156 /*
2157  * Initialize disknames/diskstats for export by sysctl. If update is set,
2158  * then we simply update the disk statistics information.
2159  */
2160 int
2161 sysctl_diskinit(int update, struct proc *p)
2162 {
2163 	struct diskstats *sdk;
2164 	struct disk *dk;
2165 	const char *duid;
2166 	int error, changed = 0;
2167 
2168 	KERNEL_ASSERT_LOCKED();
2169 
2170 	if ((error = rw_enter(&sysctl_disklock, RW_WRITE|RW_INTR)) != 0)
2171 		return error;
2172 
2173 	/* Run in a loop, disks may change while malloc sleeps. */
2174 	while (disk_change) {
2175 		int tlen;
2176 
2177 		disk_change = 0;
2178 
2179 		tlen = 0;
2180 		TAILQ_FOREACH(dk, &disklist, dk_link) {
2181 			if (dk->dk_name)
2182 				tlen += strlen(dk->dk_name);
2183 			tlen += 18;	/* label uid + separators */
2184 		}
2185 		tlen++;
2186 
2187 		/*
2188 		 * The sysctl_disklock ensures that no other process can
2189 		 * allocate disknames and diskstats while our malloc sleeps.
2190 		 */
2191 		free(disknames, M_SYSCTL, disknameslen);
2192 		free(diskstats, M_SYSCTL, diskstatslen);
2193 		diskstats = NULL;
2194 		disknames = NULL;
2195 		diskstats = mallocarray(disk_count, sizeof(struct diskstats),
2196 		    M_SYSCTL, M_WAITOK|M_ZERO);
2197 		diskstatslen = disk_count * sizeof(struct diskstats);
2198 		disknames = malloc(tlen, M_SYSCTL, M_WAITOK|M_ZERO);
2199 		disknameslen = tlen;
2200 		disknames[0] = '\0';
2201 		changed = 1;
2202 	}
2203 
2204 	if (changed) {
2205 		int l;
2206 
2207 		l = 0;
2208 		sdk = diskstats;
2209 		TAILQ_FOREACH(dk, &disklist, dk_link) {
2210 			duid = NULL;
2211 			if (dk->dk_label && !duid_iszero(dk->dk_label->d_uid))
2212 				duid = duid_format(dk->dk_label->d_uid);
2213 			snprintf(disknames + l, disknameslen - l, "%s:%s,",
2214 			    dk->dk_name ? dk->dk_name : "",
2215 			    duid ? duid : "");
2216 			l += strlen(disknames + l);
2217 			strlcpy(sdk->ds_name, dk->dk_name,
2218 			    sizeof(sdk->ds_name));
2219 			mtx_enter(&dk->dk_mtx);
2220 			sdk->ds_busy = dk->dk_busy;
2221 			sdk->ds_rxfer = dk->dk_rxfer;
2222 			sdk->ds_wxfer = dk->dk_wxfer;
2223 			sdk->ds_seek = dk->dk_seek;
2224 			sdk->ds_rbytes = dk->dk_rbytes;
2225 			sdk->ds_wbytes = dk->dk_wbytes;
2226 			sdk->ds_attachtime = dk->dk_attachtime;
2227 			sdk->ds_timestamp = dk->dk_timestamp;
2228 			sdk->ds_time = dk->dk_time;
2229 			mtx_leave(&dk->dk_mtx);
2230 			sdk++;
2231 		}
2232 
2233 		/* Eliminate trailing comma */
2234 		if (l != 0)
2235 			disknames[l - 1] = '\0';
2236 	} else if (update) {
2237 		/* Just update, number of drives hasn't changed */
2238 		sdk = diskstats;
2239 		TAILQ_FOREACH(dk, &disklist, dk_link) {
2240 			strlcpy(sdk->ds_name, dk->dk_name,
2241 			    sizeof(sdk->ds_name));
2242 			mtx_enter(&dk->dk_mtx);
2243 			sdk->ds_busy = dk->dk_busy;
2244 			sdk->ds_rxfer = dk->dk_rxfer;
2245 			sdk->ds_wxfer = dk->dk_wxfer;
2246 			sdk->ds_seek = dk->dk_seek;
2247 			sdk->ds_rbytes = dk->dk_rbytes;
2248 			sdk->ds_wbytes = dk->dk_wbytes;
2249 			sdk->ds_attachtime = dk->dk_attachtime;
2250 			sdk->ds_timestamp = dk->dk_timestamp;
2251 			sdk->ds_time = dk->dk_time;
2252 			mtx_leave(&dk->dk_mtx);
2253 			sdk++;
2254 		}
2255 	}
2256 	rw_exit_write(&sysctl_disklock);
2257 	return 0;
2258 }
2259 
2260 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
2261 int
2262 sysctl_sysvipc(int *name, u_int namelen, void *where, size_t *sizep)
2263 {
2264 #ifdef SYSVSEM
2265 	struct sem_sysctl_info *semsi;
2266 #endif
2267 #ifdef SYSVSHM
2268 	struct shm_sysctl_info *shmsi;
2269 #endif
2270 	size_t infosize, dssize, tsize, buflen, bufsiz;
2271 	int i, nds, error, ret;
2272 	void *buf;
2273 
2274 	if (namelen != 1)
2275 		return (EINVAL);
2276 
2277 	buflen = *sizep;
2278 
2279 	switch (*name) {
2280 	case KERN_SYSVIPC_MSG_INFO:
2281 #ifdef SYSVMSG
2282 		return (sysctl_sysvmsg(name, namelen, where, sizep));
2283 #else
2284 		return (EOPNOTSUPP);
2285 #endif
2286 	case KERN_SYSVIPC_SEM_INFO:
2287 #ifdef SYSVSEM
2288 		infosize = sizeof(semsi->seminfo);
2289 		nds = seminfo.semmni;
2290 		dssize = sizeof(semsi->semids[0]);
2291 		break;
2292 #else
2293 		return (EOPNOTSUPP);
2294 #endif
2295 	case KERN_SYSVIPC_SHM_INFO:
2296 #ifdef SYSVSHM
2297 		infosize = sizeof(shmsi->shminfo);
2298 		nds = shminfo.shmmni;
2299 		dssize = sizeof(shmsi->shmids[0]);
2300 		break;
2301 #else
2302 		return (EOPNOTSUPP);
2303 #endif
2304 	default:
2305 		return (EINVAL);
2306 	}
2307 	tsize = infosize + (nds * dssize);
2308 
2309 	/* Return just the total size required. */
2310 	if (where == NULL) {
2311 		*sizep = tsize;
2312 		return (0);
2313 	}
2314 
2315 	/* Not enough room for even the info struct. */
2316 	if (buflen < infosize) {
2317 		*sizep = 0;
2318 		return (ENOMEM);
2319 	}
2320 	bufsiz = min(tsize, buflen);
2321 	buf = malloc(bufsiz, M_TEMP, M_WAITOK|M_ZERO);
2322 
2323 	switch (*name) {
2324 #ifdef SYSVSEM
2325 	case KERN_SYSVIPC_SEM_INFO:
2326 		semsi = (struct sem_sysctl_info *)buf;
2327 		semsi->seminfo = seminfo;
2328 		break;
2329 #endif
2330 #ifdef SYSVSHM
2331 	case KERN_SYSVIPC_SHM_INFO:
2332 		shmsi = (struct shm_sysctl_info *)buf;
2333 		shmsi->shminfo = shminfo;
2334 		break;
2335 #endif
2336 	}
2337 	buflen -= infosize;
2338 
2339 	ret = 0;
2340 	if (buflen > 0) {
2341 		/* Fill in the IPC data structures.  */
2342 		for (i = 0; i < nds; i++) {
2343 			if (buflen < dssize) {
2344 				ret = ENOMEM;
2345 				break;
2346 			}
2347 			switch (*name) {
2348 #ifdef SYSVSEM
2349 			case KERN_SYSVIPC_SEM_INFO:
2350 				if (sema[i] != NULL)
2351 					memcpy(&semsi->semids[i], sema[i],
2352 					    dssize);
2353 				else
2354 					memset(&semsi->semids[i], 0, dssize);
2355 				break;
2356 #endif
2357 #ifdef SYSVSHM
2358 			case KERN_SYSVIPC_SHM_INFO:
2359 				if (shmsegs[i] != NULL)
2360 					memcpy(&shmsi->shmids[i], shmsegs[i],
2361 					    dssize);
2362 				else
2363 					memset(&shmsi->shmids[i], 0, dssize);
2364 				break;
2365 #endif
2366 			}
2367 			buflen -= dssize;
2368 		}
2369 	}
2370 	*sizep -= buflen;
2371 	error = copyout(buf, where, *sizep);
2372 	free(buf, M_TEMP, bufsiz);
2373 	/* If copyout succeeded, use return code set earlier. */
2374 	return (error ? error : ret);
2375 }
2376 #endif /* SYSVMSG || SYSVSEM || SYSVSHM */
2377 
2378 #ifndef	SMALL_KERNEL
2379 
2380 int
2381 sysctl_intrcnt(int *name, u_int namelen, void *oldp, size_t *oldlenp)
2382 {
2383 	return (evcount_sysctl(name, namelen, oldp, oldlenp, NULL, 0));
2384 }
2385 
2386 
2387 int
2388 sysctl_sensors(int *name, u_int namelen, void *oldp, size_t *oldlenp,
2389     void *newp, size_t newlen)
2390 {
2391 	struct ksensor *ks;
2392 	struct sensor *us;
2393 	struct ksensordev *ksd;
2394 	struct sensordev *usd;
2395 	int dev, numt, ret;
2396 	enum sensor_type type;
2397 
2398 	if (namelen != 1 && namelen != 3)
2399 		return (ENOTDIR);
2400 
2401 	dev = name[0];
2402 	if (namelen == 1) {
2403 		ret = sensordev_get(dev, &ksd);
2404 		if (ret)
2405 			return (ret);
2406 
2407 		/* Grab a copy, to clear the kernel pointers */
2408 		usd = malloc(sizeof(*usd), M_TEMP, M_WAITOK|M_ZERO);
2409 		usd->num = ksd->num;
2410 		strlcpy(usd->xname, ksd->xname, sizeof(usd->xname));
2411 		memcpy(usd->maxnumt, ksd->maxnumt, sizeof(usd->maxnumt));
2412 		usd->sensors_count = ksd->sensors_count;
2413 
2414 		ret = sysctl_rdstruct(oldp, oldlenp, newp, usd,
2415 		    sizeof(struct sensordev));
2416 
2417 		free(usd, M_TEMP, sizeof(*usd));
2418 		return (ret);
2419 	}
2420 
2421 	type = name[1];
2422 	numt = name[2];
2423 
2424 	ret = sensor_find(dev, type, numt, &ks);
2425 	if (ret)
2426 		return (ret);
2427 
2428 	/* Grab a copy, to clear the kernel pointers */
2429 	us = malloc(sizeof(*us), M_TEMP, M_WAITOK|M_ZERO);
2430 	memcpy(us->desc, ks->desc, sizeof(us->desc));
2431 	us->tv = ks->tv;
2432 	us->value = ks->value;
2433 	us->type = ks->type;
2434 	us->status = ks->status;
2435 	us->numt = ks->numt;
2436 	us->flags = ks->flags;
2437 
2438 	ret = sysctl_rdstruct(oldp, oldlenp, newp, us,
2439 	    sizeof(struct sensor));
2440 	free(us, M_TEMP, sizeof(*us));
2441 	return (ret);
2442 }
2443 #endif	/* SMALL_KERNEL */
2444 
2445 int
2446 sysctl_cptime2(int *name, u_int namelen, void *oldp, size_t *oldlenp,
2447     void *newp, size_t newlen)
2448 {
2449 	CPU_INFO_ITERATOR cii;
2450 	struct cpu_info *ci;
2451 	int found = 0;
2452 
2453 	if (namelen != 1)
2454 		return (ENOTDIR);
2455 
2456 	CPU_INFO_FOREACH(cii, ci) {
2457 		if (name[0] == CPU_INFO_UNIT(ci)) {
2458 			found = 1;
2459 			break;
2460 		}
2461 	}
2462 	if (!found)
2463 		return (ENOENT);
2464 
2465 	return (sysctl_rdstruct(oldp, oldlenp, newp,
2466 	    &ci->ci_schedstate.spc_cp_time,
2467 	    sizeof(ci->ci_schedstate.spc_cp_time)));
2468 }
2469 
2470 #if NAUDIO > 0
2471 int
2472 sysctl_audio(int *name, u_int namelen, void *oldp, size_t *oldlenp,
2473     void *newp, size_t newlen)
2474 {
2475 	if (namelen != 1)
2476 		return (ENOTDIR);
2477 
2478 	if (name[0] != KERN_AUDIO_RECORD)
2479 		return (ENOENT);
2480 
2481 	return (sysctl_int(oldp, oldlenp, newp, newlen, &audio_record_enable));
2482 }
2483 #endif
2484 
2485 #if NVIDEO > 0
2486 int
2487 sysctl_video(int *name, u_int namelen, void *oldp, size_t *oldlenp,
2488     void *newp, size_t newlen)
2489 {
2490 	if (namelen != 1)
2491 		return (ENOTDIR);
2492 
2493 	if (name[0] != KERN_VIDEO_RECORD)
2494 		return (ENOENT);
2495 
2496 	return (sysctl_int(oldp, oldlenp, newp, newlen, &video_record_enable));
2497 }
2498 #endif
2499 
2500 int
2501 sysctl_cpustats(int *name, u_int namelen, void *oldp, size_t *oldlenp,
2502     void *newp, size_t newlen)
2503 {
2504 	CPU_INFO_ITERATOR cii;
2505 	struct cpustats cs;
2506 	struct cpu_info *ci;
2507 	int found = 0;
2508 
2509 	if (namelen != 1)
2510 		return (ENOTDIR);
2511 
2512 	CPU_INFO_FOREACH(cii, ci) {
2513 		if (name[0] == CPU_INFO_UNIT(ci)) {
2514 			found = 1;
2515 			break;
2516 		}
2517 	}
2518 	if (!found)
2519 		return (ENOENT);
2520 
2521 	memset(&cs, 0, sizeof cs);
2522 	memcpy(&cs.cs_time, &ci->ci_schedstate.spc_cp_time, sizeof(cs.cs_time));
2523 	cs.cs_flags = 0;
2524 	if (cpu_is_online(ci))
2525 		cs.cs_flags |= CPUSTATS_ONLINE;
2526 
2527 	return (sysctl_rdstruct(oldp, oldlenp, newp, &cs, sizeof(cs)));
2528 }
2529 
2530 int
2531 sysctl_utc_offset(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
2532 {
2533 	struct timespec adjusted, now;
2534 	int adjustment_seconds, error, new_offset_minutes, old_offset_minutes;
2535 
2536 	old_offset_minutes = utc_offset / 60;	/* seconds -> minutes */
2537 	new_offset_minutes = old_offset_minutes;
2538 	error = sysctl_securelevel_int(oldp, oldlenp, newp, newlen,
2539 	     &new_offset_minutes);
2540 	if (error)
2541 		return error;
2542 	if (new_offset_minutes < -24 * 60 || new_offset_minutes > 24 * 60)
2543 		return EINVAL;
2544 	if (new_offset_minutes == old_offset_minutes)
2545 		return 0;
2546 
2547 	utc_offset = new_offset_minutes * 60;	/* minutes -> seconds */
2548 	adjustment_seconds = (new_offset_minutes - old_offset_minutes) * 60;
2549 
2550 	nanotime(&now);
2551 	adjusted = now;
2552 	adjusted.tv_sec -= adjustment_seconds;
2553 	tc_setrealtimeclock(&adjusted);
2554 	resettodr();
2555 
2556 	return 0;
2557 }
2558