1 /* $OpenBSD: kern_sysctl.c,v 1.445 2024/08/26 08:24:25 mvs Exp $ */ 2 /* $NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $ */ 3 4 /*- 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Mike Karels at Berkeley Software Design, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94 36 */ 37 38 /* 39 * sysctl system call. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/atomic.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/pool.h> 48 #include <sys/proc.h> 49 #include <sys/resourcevar.h> 50 #include <sys/signalvar.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/filedesc.h> 54 #include <sys/vnode.h> 55 #include <sys/unistd.h> 56 #include <sys/buf.h> 57 #include <sys/clockintr.h> 58 #include <sys/tty.h> 59 #include <sys/disklabel.h> 60 #include <sys/disk.h> 61 #include <sys/sysctl.h> 62 #include <sys/msgbuf.h> 63 #include <sys/vmmeter.h> 64 #include <sys/namei.h> 65 #include <sys/exec.h> 66 #include <sys/mbuf.h> 67 #include <sys/percpu.h> 68 #include <sys/sensors.h> 69 #include <sys/pipe.h> 70 #include <sys/eventvar.h> 71 #include <sys/socketvar.h> 72 #include <sys/socket.h> 73 #include <sys/domain.h> 74 #include <sys/protosw.h> 75 #include <sys/pledge.h> 76 #include <sys/timetc.h> 77 #include <sys/evcount.h> 78 #include <sys/un.h> 79 #include <sys/unpcb.h> 80 #include <sys/sched.h> 81 #include <sys/mount.h> 82 #include <sys/syscallargs.h> 83 #include <sys/wait.h> 84 #include <sys/witness.h> 85 86 #include <uvm/uvm_extern.h> 87 88 #include <dev/cons.h> 89 90 #include <dev/usb/ucomvar.h> 91 92 #include <net/route.h> 93 #include <netinet/in.h> 94 #include <netinet/ip.h> 95 #include <netinet/ip_var.h> 96 #include <netinet/in_pcb.h> 97 #include <netinet/ip6.h> 98 #include <netinet/tcp.h> 99 #include <netinet/tcp_timer.h> 100 #include <netinet/tcp_var.h> 101 #include <netinet/udp.h> 102 #include <netinet/udp_var.h> 103 #include <netinet6/ip6_var.h> 104 105 #ifdef DDB 106 #include <ddb/db_var.h> 107 #endif 108 109 #ifdef SYSVMSG 110 #include <sys/msg.h> 111 #endif 112 #ifdef SYSVSEM 113 #include <sys/sem.h> 114 #endif 115 #ifdef SYSVSHM 116 #include <sys/shm.h> 117 #endif 118 119 #include "audio.h" 120 #include "dt.h" 121 #include "pf.h" 122 #include "ucom.h" 123 #include "video.h" 124 125 extern struct forkstat forkstat; 126 extern struct nchstats nchstats; 127 extern int fscale; 128 extern fixpt_t ccpu; 129 extern long numvnodes; 130 extern int allowdt; 131 extern int audio_record_enable; 132 extern int video_record_enable; 133 extern int autoconf_serial; 134 135 int allowkmem; 136 137 int sysctl_securelevel(void *, size_t *, void *, size_t, struct proc *); 138 int sysctl_diskinit(int, struct proc *); 139 int sysctl_proc_args(int *, u_int, void *, size_t *, struct proc *); 140 int sysctl_proc_cwd(int *, u_int, void *, size_t *, struct proc *); 141 int sysctl_proc_nobroadcastkill(int *, u_int, void *, size_t, void *, size_t *, 142 struct proc *); 143 int sysctl_proc_vmmap(int *, u_int, void *, size_t *, struct proc *); 144 int sysctl_intrcnt(int *, u_int, void *, size_t *); 145 int sysctl_sensors(int *, u_int, void *, size_t *, void *, size_t); 146 int sysctl_cptime2(int *, u_int, void *, size_t *, void *, size_t); 147 int sysctl_audio(int *, u_int, void *, size_t *, void *, size_t); 148 int sysctl_video(int *, u_int, void *, size_t *, void *, size_t); 149 int sysctl_cpustats(int *, u_int, void *, size_t *, void *, size_t); 150 int sysctl_utc_offset(void *, size_t *, void *, size_t); 151 int sysctl_hwbattery(int *, u_int, void *, size_t *, void *, size_t); 152 153 void fill_file(struct kinfo_file *, struct file *, struct filedesc *, int, 154 struct vnode *, struct process *, struct proc *, struct socket *, int); 155 void fill_kproc(struct process *, struct kinfo_proc *, struct proc *, int); 156 157 int kern_sysctl_locked(int *, u_int, void *, size_t *, void *, size_t, 158 struct proc *); 159 int hw_sysctl_locked(int *, u_int, void *, size_t *,void *, size_t, 160 struct proc *); 161 162 int (*cpu_cpuspeed)(int *); 163 164 /* 165 * Lock to avoid too many processes vslocking a large amount of memory 166 * at the same time. 167 */ 168 struct rwlock sysctl_lock = RWLOCK_INITIALIZER("sysctllk"); 169 struct rwlock sysctl_disklock = RWLOCK_INITIALIZER("sysctldlk"); 170 171 int 172 sysctl_vslock(void *addr, size_t len) 173 { 174 int error; 175 176 error = rw_enter(&sysctl_lock, RW_WRITE|RW_INTR); 177 if (error) 178 return (error); 179 KERNEL_LOCK(); 180 181 if (addr) { 182 if (atop(len) > uvmexp.wiredmax - uvmexp.wired) { 183 error = ENOMEM; 184 goto out; 185 } 186 error = uvm_vslock(curproc, addr, len, PROT_READ | PROT_WRITE); 187 if (error) 188 goto out; 189 } 190 191 return (0); 192 out: 193 KERNEL_UNLOCK(); 194 rw_exit_write(&sysctl_lock); 195 return (error); 196 } 197 198 void 199 sysctl_vsunlock(void *addr, size_t len) 200 { 201 KERNEL_ASSERT_LOCKED(); 202 203 if (addr) 204 uvm_vsunlock(curproc, addr, len); 205 KERNEL_UNLOCK(); 206 rw_exit_write(&sysctl_lock); 207 } 208 209 int 210 sys_sysctl(struct proc *p, void *v, register_t *retval) 211 { 212 struct sys_sysctl_args /* { 213 syscallarg(const int *) name; 214 syscallarg(u_int) namelen; 215 syscallarg(void *) old; 216 syscallarg(size_t *) oldlenp; 217 syscallarg(void *) new; 218 syscallarg(size_t) newlen; 219 } */ *uap = v; 220 int error, dolock = 1; 221 size_t savelen = 0, oldlen = 0; 222 sysctlfn *fn; 223 int name[CTL_MAXNAME]; 224 225 if (SCARG(uap, new) != NULL && 226 (error = suser(p))) 227 return (error); 228 /* 229 * all top-level sysctl names are non-terminal 230 */ 231 if (SCARG(uap, namelen) > CTL_MAXNAME || SCARG(uap, namelen) < 2) 232 return (EINVAL); 233 error = copyin(SCARG(uap, name), name, 234 SCARG(uap, namelen) * sizeof(int)); 235 if (error) 236 return (error); 237 238 error = pledge_sysctl(p, SCARG(uap, namelen), 239 name, SCARG(uap, new)); 240 if (error) 241 return (error); 242 243 switch (name[0]) { 244 case CTL_KERN: 245 dolock = 0; 246 fn = kern_sysctl; 247 break; 248 case CTL_HW: 249 dolock = 0; 250 fn = hw_sysctl; 251 break; 252 case CTL_VM: 253 fn = uvm_sysctl; 254 break; 255 case CTL_NET: 256 dolock = 0; 257 fn = net_sysctl; 258 break; 259 case CTL_FS: 260 fn = fs_sysctl; 261 break; 262 case CTL_VFS: 263 fn = vfs_sysctl; 264 break; 265 case CTL_MACHDEP: 266 fn = cpu_sysctl; 267 break; 268 #ifdef DEBUG_SYSCTL 269 case CTL_DEBUG: 270 fn = debug_sysctl; 271 break; 272 #endif 273 #ifdef DDB 274 case CTL_DDB: 275 fn = ddb_sysctl; 276 break; 277 #endif 278 default: 279 return (EOPNOTSUPP); 280 } 281 282 if (SCARG(uap, oldlenp) && 283 (error = copyin(SCARG(uap, oldlenp), &oldlen, sizeof(oldlen)))) 284 return (error); 285 286 if (dolock) { 287 error = sysctl_vslock(SCARG(uap, old), oldlen); 288 if (error) 289 return (error); 290 savelen = oldlen; 291 } 292 error = (*fn)(&name[1], SCARG(uap, namelen) - 1, SCARG(uap, old), 293 &oldlen, SCARG(uap, new), SCARG(uap, newlen), p); 294 if (dolock) 295 sysctl_vsunlock(SCARG(uap, old), savelen); 296 297 if (error) 298 return (error); 299 if (SCARG(uap, oldlenp)) 300 error = copyout(&oldlen, SCARG(uap, oldlenp), sizeof(oldlen)); 301 return (error); 302 } 303 304 /* 305 * Attributes stored in the kernel. 306 */ 307 char hostname[MAXHOSTNAMELEN]; 308 int hostnamelen; 309 char domainname[MAXHOSTNAMELEN]; 310 int domainnamelen; 311 int hostid; 312 char *disknames = NULL; 313 size_t disknameslen; 314 struct diskstats *diskstats = NULL; 315 size_t diskstatslen; 316 int securelevel; 317 318 /* morally const values reported by sysctl_bounded_arr */ 319 static int arg_max = ARG_MAX; 320 static int openbsd = OpenBSD; 321 static int posix_version = _POSIX_VERSION; 322 static int ngroups_max = NGROUPS_MAX; 323 static int int_zero = 0; 324 static int int_one = 1; 325 static int maxpartitions = MAXPARTITIONS; 326 static int raw_part = RAW_PART; 327 328 extern int somaxconn, sominconn; 329 extern int nosuidcoredump; 330 extern int maxlocksperuid; 331 extern int uvm_wxabort; 332 extern int global_ptrace; 333 334 const struct sysctl_bounded_args kern_vars[] = { 335 {KERN_OSREV, &openbsd, SYSCTL_INT_READONLY}, 336 {KERN_MAXVNODES, &maxvnodes, 0, INT_MAX}, 337 {KERN_MAXPROC, &maxprocess, 0, INT_MAX}, 338 {KERN_MAXFILES, &maxfiles, 0, INT_MAX}, 339 {KERN_NFILES, &numfiles, SYSCTL_INT_READONLY}, 340 {KERN_TTYCOUNT, &tty_count, SYSCTL_INT_READONLY}, 341 {KERN_ARGMAX, &arg_max, SYSCTL_INT_READONLY}, 342 {KERN_POSIX1, &posix_version, SYSCTL_INT_READONLY}, 343 {KERN_NGROUPS, &ngroups_max, SYSCTL_INT_READONLY}, 344 {KERN_JOB_CONTROL, &int_one, SYSCTL_INT_READONLY}, 345 {KERN_SAVED_IDS, &int_one, SYSCTL_INT_READONLY}, 346 {KERN_MAXPARTITIONS, &maxpartitions, SYSCTL_INT_READONLY}, 347 {KERN_RAWPARTITION, &raw_part, SYSCTL_INT_READONLY}, 348 {KERN_MAXTHREAD, &maxthread, 0, INT_MAX}, 349 {KERN_NTHREADS, &nthreads, SYSCTL_INT_READONLY}, 350 {KERN_SOMAXCONN, &somaxconn, 0, SHRT_MAX}, 351 {KERN_SOMINCONN, &sominconn, 0, SHRT_MAX}, 352 {KERN_NOSUIDCOREDUMP, &nosuidcoredump, 0, 3}, 353 {KERN_FSYNC, &int_one, SYSCTL_INT_READONLY}, 354 {KERN_SYSVMSG, 355 #ifdef SYSVMSG 356 &int_one, 357 #else 358 &int_zero, 359 #endif 360 SYSCTL_INT_READONLY}, 361 {KERN_SYSVSEM, 362 #ifdef SYSVSEM 363 &int_one, 364 #else 365 &int_zero, 366 #endif 367 SYSCTL_INT_READONLY}, 368 {KERN_SYSVSHM, 369 #ifdef SYSVSHM 370 &int_one, 371 #else 372 &int_zero, 373 #endif 374 SYSCTL_INT_READONLY}, 375 {KERN_FSCALE, &fscale, SYSCTL_INT_READONLY}, 376 {KERN_CCPU, &ccpu, SYSCTL_INT_READONLY}, 377 {KERN_NPROCS, &nprocesses, SYSCTL_INT_READONLY}, 378 {KERN_SPLASSERT, &splassert_ctl, 0, 3}, 379 {KERN_MAXLOCKSPERUID, &maxlocksperuid, 0, INT_MAX}, 380 {KERN_WXABORT, &uvm_wxabort, 0, 1}, 381 {KERN_NETLIVELOCKS, &int_zero, SYSCTL_INT_READONLY}, 382 #ifdef PTRACE 383 {KERN_GLOBAL_PTRACE, &global_ptrace, 0, 1}, 384 #endif 385 {KERN_AUTOCONF_SERIAL, &autoconf_serial, SYSCTL_INT_READONLY}, 386 }; 387 388 int 389 kern_sysctl_dirs(int top_name, int *name, u_int namelen, 390 void *oldp, size_t *oldlenp, void *newp, size_t newlen, struct proc *p) 391 { 392 switch (top_name) { 393 #ifndef SMALL_KERNEL 394 case KERN_PROC: 395 return (sysctl_doproc(name, namelen, oldp, oldlenp)); 396 case KERN_PROC_ARGS: 397 return (sysctl_proc_args(name, namelen, oldp, oldlenp, p)); 398 case KERN_PROC_CWD: 399 return (sysctl_proc_cwd(name, namelen, oldp, oldlenp, p)); 400 case KERN_PROC_NOBROADCASTKILL: 401 return (sysctl_proc_nobroadcastkill(name, namelen, 402 newp, newlen, oldp, oldlenp, p)); 403 case KERN_PROC_VMMAP: 404 return (sysctl_proc_vmmap(name, namelen, oldp, oldlenp, p)); 405 case KERN_FILE: 406 return (sysctl_file(name, namelen, oldp, oldlenp, p)); 407 #endif 408 #if defined(GPROF) || defined(DDBPROF) 409 case KERN_PROF: 410 return (sysctl_doprof(name, namelen, oldp, oldlenp, 411 newp, newlen)); 412 #endif 413 case KERN_MALLOCSTATS: 414 return (sysctl_malloc(name, namelen, oldp, oldlenp, 415 newp, newlen, p)); 416 case KERN_TTY: 417 return (sysctl_tty(name, namelen, oldp, oldlenp, 418 newp, newlen)); 419 case KERN_POOL: 420 return (sysctl_dopool(name, namelen, oldp, oldlenp)); 421 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM) 422 case KERN_SYSVIPC_INFO: 423 return (sysctl_sysvipc(name, namelen, oldp, oldlenp)); 424 #endif 425 #ifdef SYSVSEM 426 case KERN_SEMINFO: 427 return (sysctl_sysvsem(name, namelen, oldp, oldlenp, 428 newp, newlen)); 429 #endif 430 #ifdef SYSVSHM 431 case KERN_SHMINFO: 432 return (sysctl_sysvshm(name, namelen, oldp, oldlenp, 433 newp, newlen)); 434 #endif 435 #ifndef SMALL_KERNEL 436 case KERN_INTRCNT: 437 return (sysctl_intrcnt(name, namelen, oldp, oldlenp)); 438 case KERN_WATCHDOG: 439 return (sysctl_wdog(name, namelen, oldp, oldlenp, 440 newp, newlen)); 441 #endif 442 #ifndef SMALL_KERNEL 443 case KERN_EVCOUNT: 444 return (evcount_sysctl(name, namelen, oldp, oldlenp, 445 newp, newlen)); 446 #endif 447 case KERN_TIMECOUNTER: 448 return (sysctl_tc(name, namelen, oldp, oldlenp, newp, newlen)); 449 case KERN_CPTIME2: 450 return (sysctl_cptime2(name, namelen, oldp, oldlenp, 451 newp, newlen)); 452 #ifdef WITNESS 453 case KERN_WITNESSWATCH: 454 return witness_sysctl_watch(oldp, oldlenp, newp, newlen); 455 case KERN_WITNESS: 456 return witness_sysctl(name, namelen, oldp, oldlenp, 457 newp, newlen); 458 #endif 459 #if NVIDEO > 0 460 case KERN_VIDEO: 461 return (sysctl_video(name, namelen, oldp, oldlenp, 462 newp, newlen)); 463 #endif 464 case KERN_CPUSTATS: 465 return (sysctl_cpustats(name, namelen, oldp, oldlenp, 466 newp, newlen)); 467 case KERN_CLOCKINTR: 468 return sysctl_clockintr(name, namelen, oldp, oldlenp, newp, 469 newlen); 470 default: 471 return (ENOTDIR); /* overloaded */ 472 } 473 } 474 475 /* 476 * kernel related system variables. 477 */ 478 int 479 kern_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 480 size_t newlen, struct proc *p) 481 { 482 int error; 483 size_t savelen; 484 485 /* dispatch the non-terminal nodes first */ 486 if (namelen != 1) { 487 switch (name[0]) { 488 #if NAUDIO > 0 489 case KERN_AUDIO: 490 return (sysctl_audio(name + 1, namelen - 1, 491 oldp, oldlenp, newp, newlen)); 492 #endif 493 default: 494 break; 495 } 496 497 savelen = *oldlenp; 498 if ((error = sysctl_vslock(oldp, savelen))) 499 return (error); 500 error = kern_sysctl_dirs(name[0], name + 1, namelen - 1, 501 oldp, oldlenp, newp, newlen, p); 502 sysctl_vsunlock(oldp, savelen); 503 return (error); 504 } 505 506 switch (name[0]) { 507 case KERN_OSTYPE: 508 return (sysctl_rdstring(oldp, oldlenp, newp, ostype)); 509 case KERN_OSRELEASE: 510 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease)); 511 case KERN_OSVERSION: 512 return (sysctl_rdstring(oldp, oldlenp, newp, osversion)); 513 case KERN_VERSION: 514 return (sysctl_rdstring(oldp, oldlenp, newp, version)); 515 case KERN_NUMVNODES: /* XXX numvnodes is a long */ 516 return (sysctl_rdint(oldp, oldlenp, newp, numvnodes)); 517 #if NDT > 0 518 case KERN_ALLOWDT: 519 return (sysctl_securelevel_int(oldp, oldlenp, newp, newlen, 520 &allowdt)); 521 #endif 522 case KERN_HOSTID: 523 return (sysctl_int(oldp, oldlenp, newp, newlen, &hostid)); 524 case KERN_CLOCKRATE: 525 return (sysctl_clockrate(oldp, oldlenp, newp)); 526 case KERN_BOOTTIME: { 527 struct timeval bt; 528 memset(&bt, 0, sizeof bt); 529 microboottime(&bt); 530 return (sysctl_rdstruct(oldp, oldlenp, newp, &bt, sizeof bt)); 531 } 532 case KERN_MBSTAT: { 533 extern struct cpumem *mbstat; 534 uint64_t counters[MBSTAT_COUNT]; 535 struct mbstat mbs; 536 unsigned int i; 537 538 memset(&mbs, 0, sizeof(mbs)); 539 counters_read(mbstat, counters, MBSTAT_COUNT, NULL); 540 for (i = 0; i < MBSTAT_TYPES; i++) 541 mbs.m_mtypes[i] = counters[i]; 542 543 mbs.m_drops = counters[MBSTAT_DROPS]; 544 mbs.m_wait = counters[MBSTAT_WAIT]; 545 mbs.m_drain = counters[MBSTAT_DRAIN]; 546 547 return (sysctl_rdstruct(oldp, oldlenp, newp, 548 &mbs, sizeof(mbs))); 549 } 550 case KERN_MSGBUFSIZE: 551 case KERN_CONSBUFSIZE: { 552 struct msgbuf *mp; 553 mp = (name[0] == KERN_MSGBUFSIZE) ? msgbufp : consbufp; 554 /* 555 * deal with cases where the message buffer has 556 * become corrupted. 557 */ 558 if (!mp || mp->msg_magic != MSG_MAGIC) 559 return (ENXIO); 560 return (sysctl_rdint(oldp, oldlenp, newp, mp->msg_bufs)); 561 } 562 case KERN_OSREV: 563 case KERN_MAXPROC: 564 case KERN_MAXFILES: 565 case KERN_NFILES: 566 case KERN_TTYCOUNT: 567 case KERN_ARGMAX: 568 case KERN_POSIX1: 569 case KERN_NGROUPS: 570 case KERN_JOB_CONTROL: 571 case KERN_SAVED_IDS: 572 case KERN_MAXPARTITIONS: 573 case KERN_RAWPARTITION: 574 case KERN_MAXTHREAD: 575 case KERN_NTHREADS: 576 case KERN_SOMAXCONN: 577 case KERN_SOMINCONN: 578 case KERN_FSYNC: 579 case KERN_SYSVMSG: 580 case KERN_SYSVSEM: 581 case KERN_SYSVSHM: 582 case KERN_FSCALE: 583 case KERN_CCPU: 584 case KERN_NPROCS: 585 case KERN_NETLIVELOCKS: 586 case KERN_AUTOCONF_SERIAL: 587 return (sysctl_bounded_arr(kern_vars, nitems(kern_vars), name, 588 namelen, oldp, oldlenp, newp, newlen)); 589 } 590 591 savelen = *oldlenp; 592 if ((error = sysctl_vslock(oldp, savelen))) 593 return (error); 594 error = kern_sysctl_locked(name, namelen, oldp, oldlenp, 595 newp, newlen, p); 596 sysctl_vsunlock(oldp, savelen); 597 598 return (error); 599 } 600 601 int 602 kern_sysctl_locked(int *name, u_int namelen, void *oldp, size_t *oldlenp, 603 void *newp, size_t newlen, struct proc *p) 604 { 605 int error, stackgap; 606 dev_t dev; 607 extern int pool_debug; 608 609 switch (name[0]) { 610 case KERN_SECURELVL: 611 return (sysctl_securelevel(oldp, oldlenp, newp, newlen, p)); 612 case KERN_ALLOWKMEM: 613 return (sysctl_securelevel_int(oldp, oldlenp, newp, newlen, 614 &allowkmem)); 615 case KERN_HOSTNAME: 616 error = sysctl_tstring(oldp, oldlenp, newp, newlen, 617 hostname, sizeof(hostname)); 618 if (newp && !error) 619 hostnamelen = newlen; 620 return (error); 621 case KERN_DOMAINNAME: 622 if (securelevel >= 1 && domainnamelen && newp) 623 error = EPERM; 624 else 625 error = sysctl_tstring(oldp, oldlenp, newp, newlen, 626 domainname, sizeof(domainname)); 627 if (newp && !error) 628 domainnamelen = newlen; 629 return (error); 630 case KERN_CONSBUF: 631 if ((error = suser(p))) 632 return (error); 633 /* FALLTHROUGH */ 634 case KERN_MSGBUF: { 635 struct msgbuf *mp; 636 mp = (name[0] == KERN_MSGBUF) ? msgbufp : consbufp; 637 /* 638 * deal with cases where the message buffer has 639 * become corrupted. 640 */ 641 if (!mp || mp->msg_magic != MSG_MAGIC) 642 return (ENXIO); 643 return (sysctl_rdstruct(oldp, oldlenp, newp, mp, 644 mp->msg_bufs + offsetof(struct msgbuf, msg_bufc))); 645 } 646 case KERN_CPTIME: 647 { 648 CPU_INFO_ITERATOR cii; 649 struct cpu_info *ci; 650 long cp_time[CPUSTATES]; 651 int i, n = 0; 652 653 memset(cp_time, 0, sizeof(cp_time)); 654 655 CPU_INFO_FOREACH(cii, ci) { 656 if (!cpu_is_online(ci)) 657 continue; 658 n++; 659 for (i = 0; i < CPUSTATES; i++) 660 cp_time[i] += ci->ci_schedstate.spc_cp_time[i]; 661 } 662 663 for (i = 0; i < CPUSTATES; i++) 664 cp_time[i] /= n; 665 666 return (sysctl_rdstruct(oldp, oldlenp, newp, &cp_time, 667 sizeof(cp_time))); 668 } 669 case KERN_NCHSTATS: 670 return (sysctl_rdstruct(oldp, oldlenp, newp, &nchstats, 671 sizeof(struct nchstats))); 672 case KERN_FORKSTAT: 673 return (sysctl_rdstruct(oldp, oldlenp, newp, &forkstat, 674 sizeof(struct forkstat))); 675 case KERN_STACKGAPRANDOM: 676 stackgap = stackgap_random; 677 error = sysctl_int(oldp, oldlenp, newp, newlen, &stackgap); 678 if (error) 679 return (error); 680 /* 681 * Safety harness. 682 */ 683 if ((stackgap < ALIGNBYTES && stackgap != 0) || 684 !powerof2(stackgap) || stackgap >= MAXSSIZ) 685 return (EINVAL); 686 stackgap_random = stackgap; 687 return (0); 688 case KERN_MAXCLUSTERS: { 689 int val = nmbclust; 690 error = sysctl_int(oldp, oldlenp, newp, newlen, &val); 691 if (error == 0 && val != nmbclust) 692 error = nmbclust_update(val); 693 return (error); 694 } 695 case KERN_CACHEPCT: { 696 u_int64_t dmapages; 697 int opct, pgs; 698 opct = bufcachepercent; 699 error = sysctl_int(oldp, oldlenp, newp, newlen, 700 &bufcachepercent); 701 if (error) 702 return(error); 703 if (bufcachepercent > 90 || bufcachepercent < 5) { 704 bufcachepercent = opct; 705 return (EINVAL); 706 } 707 dmapages = uvm_pagecount(&dma_constraint); 708 if (bufcachepercent != opct) { 709 pgs = bufcachepercent * dmapages / 100; 710 bufadjust(pgs); /* adjust bufpages */ 711 bufhighpages = bufpages; /* set high water mark */ 712 } 713 return(0); 714 } 715 case KERN_CONSDEV: 716 if (cn_tab != NULL) 717 dev = cn_tab->cn_dev; 718 else 719 dev = NODEV; 720 return sysctl_rdstruct(oldp, oldlenp, newp, &dev, sizeof(dev)); 721 case KERN_POOL_DEBUG: { 722 int old_pool_debug = pool_debug; 723 724 error = sysctl_int(oldp, oldlenp, newp, newlen, 725 &pool_debug); 726 if (error == 0 && pool_debug != old_pool_debug) 727 pool_reclaim_all(); 728 return (error); 729 } 730 #if NPF > 0 731 case KERN_PFSTATUS: 732 return (pf_sysctl(oldp, oldlenp, newp, newlen)); 733 #endif 734 case KERN_TIMEOUT_STATS: 735 return (timeout_sysctl(oldp, oldlenp, newp, newlen)); 736 case KERN_UTC_OFFSET: 737 return (sysctl_utc_offset(oldp, oldlenp, newp, newlen)); 738 default: 739 return (sysctl_bounded_arr(kern_vars, nitems(kern_vars), name, 740 namelen, oldp, oldlenp, newp, newlen)); 741 } 742 /* NOTREACHED */ 743 } 744 745 /* 746 * hardware related system variables. 747 */ 748 char *hw_vendor, *hw_prod, *hw_uuid, *hw_serial, *hw_ver; 749 int allowpowerdown = 1; 750 int hw_power = 1; 751 752 /* morally const values reported by sysctl_bounded_arr */ 753 static int byte_order = BYTE_ORDER; 754 755 const struct sysctl_bounded_args hw_vars[] = { 756 {HW_NCPU, &ncpus, SYSCTL_INT_READONLY}, 757 {HW_NCPUFOUND, &ncpusfound, SYSCTL_INT_READONLY}, 758 {HW_BYTEORDER, &byte_order, SYSCTL_INT_READONLY}, 759 {HW_PAGESIZE, &uvmexp.pagesize, SYSCTL_INT_READONLY}, 760 {HW_DISKCOUNT, &disk_count, SYSCTL_INT_READONLY}, 761 {HW_POWER, &hw_power, SYSCTL_INT_READONLY}, 762 }; 763 764 int 765 hw_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 766 size_t newlen, struct proc *p) 767 { 768 extern char machine[], cpu_model[]; 769 int err; 770 771 /* 772 * all sysctl names at this level except sensors and battery 773 * are terminal 774 */ 775 if (name[0] != HW_SENSORS && name[0] != HW_BATTERY && namelen != 1) 776 return (ENOTDIR); /* overloaded */ 777 778 switch (name[0]) { 779 case HW_MACHINE: 780 return (sysctl_rdstring(oldp, oldlenp, newp, machine)); 781 case HW_MODEL: 782 return (sysctl_rdstring(oldp, oldlenp, newp, cpu_model)); 783 case HW_NCPUONLINE: 784 return (sysctl_rdint(oldp, oldlenp, newp, 785 sysctl_hwncpuonline())); 786 case HW_PHYSMEM: 787 return (sysctl_rdint(oldp, oldlenp, newp, ptoa(physmem))); 788 case HW_USERMEM: 789 return (sysctl_rdint(oldp, oldlenp, newp, 790 ptoa(physmem - uvmexp.wired))); 791 case HW_DISKNAMES: 792 case HW_DISKSTATS: 793 case HW_CPUSPEED: 794 #ifndef SMALL_KERNEL 795 case HW_SENSORS: 796 case HW_SETPERF: 797 case HW_PERFPOLICY: 798 case HW_BATTERY: 799 #endif /* !SMALL_KERNEL */ 800 case HW_ALLOWPOWERDOWN: 801 case HW_UCOMNAMES: 802 #ifdef __HAVE_CPU_TOPOLOGY 803 case HW_SMT: 804 #endif 805 { 806 size_t savelen = *oldlenp; 807 if ((err = sysctl_vslock(oldp, savelen))) 808 return (err); 809 err = hw_sysctl_locked(name, namelen, oldp, oldlenp, 810 newp, newlen, p); 811 sysctl_vsunlock(oldp, savelen); 812 return (err); 813 } 814 case HW_VENDOR: 815 if (hw_vendor) 816 return (sysctl_rdstring(oldp, oldlenp, newp, 817 hw_vendor)); 818 else 819 return (EOPNOTSUPP); 820 case HW_PRODUCT: 821 if (hw_prod) 822 return (sysctl_rdstring(oldp, oldlenp, newp, hw_prod)); 823 else 824 return (EOPNOTSUPP); 825 case HW_VERSION: 826 if (hw_ver) 827 return (sysctl_rdstring(oldp, oldlenp, newp, hw_ver)); 828 else 829 return (EOPNOTSUPP); 830 case HW_SERIALNO: 831 if (hw_serial) 832 return (sysctl_rdstring(oldp, oldlenp, newp, 833 hw_serial)); 834 else 835 return (EOPNOTSUPP); 836 case HW_UUID: 837 if (hw_uuid) 838 return (sysctl_rdstring(oldp, oldlenp, newp, hw_uuid)); 839 else 840 return (EOPNOTSUPP); 841 case HW_PHYSMEM64: 842 return (sysctl_rdquad(oldp, oldlenp, newp, 843 ptoa((psize_t)physmem))); 844 case HW_USERMEM64: 845 return (sysctl_rdquad(oldp, oldlenp, newp, 846 ptoa((psize_t)physmem - uvmexp.wired))); 847 default: 848 return sysctl_bounded_arr(hw_vars, nitems(hw_vars), name, 849 namelen, oldp, oldlenp, newp, newlen); 850 } 851 /* NOTREACHED */ 852 } 853 854 int 855 hw_sysctl_locked(int *name, u_int namelen, void *oldp, size_t *oldlenp, 856 void *newp, size_t newlen, struct proc *p) 857 { 858 int err, cpuspeed; 859 860 switch (name[0]) { 861 case HW_DISKNAMES: 862 err = sysctl_diskinit(0, p); 863 if (err) 864 return err; 865 if (disknames) 866 return (sysctl_rdstring(oldp, oldlenp, newp, 867 disknames)); 868 else 869 return (sysctl_rdstring(oldp, oldlenp, newp, "")); 870 case HW_DISKSTATS: 871 err = sysctl_diskinit(1, p); 872 if (err) 873 return err; 874 return (sysctl_rdstruct(oldp, oldlenp, newp, diskstats, 875 disk_count * sizeof(struct diskstats))); 876 case HW_CPUSPEED: 877 if (!cpu_cpuspeed) 878 return (EOPNOTSUPP); 879 err = cpu_cpuspeed(&cpuspeed); 880 if (err) 881 return err; 882 return (sysctl_rdint(oldp, oldlenp, newp, cpuspeed)); 883 #ifndef SMALL_KERNEL 884 case HW_SENSORS: 885 return (sysctl_sensors(name + 1, namelen - 1, oldp, oldlenp, 886 newp, newlen)); 887 case HW_SETPERF: 888 return (sysctl_hwsetperf(oldp, oldlenp, newp, newlen)); 889 case HW_PERFPOLICY: 890 return (sysctl_hwperfpolicy(oldp, oldlenp, newp, newlen)); 891 #endif /* !SMALL_KERNEL */ 892 case HW_ALLOWPOWERDOWN: 893 return (sysctl_securelevel_int(oldp, oldlenp, newp, newlen, 894 &allowpowerdown)); 895 case HW_UCOMNAMES: { 896 const char *str = ""; 897 #if NUCOM > 0 898 str = sysctl_ucominit(); 899 #endif /* NUCOM > 0 */ 900 return (sysctl_rdstring(oldp, oldlenp, newp, str)); 901 } 902 #ifdef __HAVE_CPU_TOPOLOGY 903 case HW_SMT: 904 return (sysctl_hwsmt(oldp, oldlenp, newp, newlen)); 905 #endif 906 #ifndef SMALL_KERNEL 907 case HW_BATTERY: 908 return (sysctl_hwbattery(name + 1, namelen - 1, oldp, oldlenp, 909 newp, newlen)); 910 #endif 911 default: 912 return (EOPNOTSUPP); 913 } 914 /* NOTREACHED */ 915 } 916 917 #ifndef SMALL_KERNEL 918 919 int hw_battery_chargemode; 920 int hw_battery_chargestart; 921 int hw_battery_chargestop; 922 int (*hw_battery_setchargemode)(int); 923 int (*hw_battery_setchargestart)(int); 924 int (*hw_battery_setchargestop)(int); 925 926 int 927 sysctl_hwchargemode(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 928 { 929 int mode = hw_battery_chargemode; 930 int error; 931 932 if (!hw_battery_setchargemode) 933 return EOPNOTSUPP; 934 935 error = sysctl_int_bounded(oldp, oldlenp, newp, newlen, 936 &mode, -1, 1); 937 if (error) 938 return error; 939 940 if (newp != NULL) 941 error = hw_battery_setchargemode(mode); 942 943 return error; 944 } 945 946 int 947 sysctl_hwchargestart(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 948 { 949 int start = hw_battery_chargestart; 950 int error; 951 952 if (!hw_battery_setchargestart) 953 return EOPNOTSUPP; 954 955 error = sysctl_int_bounded(oldp, oldlenp, newp, newlen, 956 &start, 0, 100); 957 if (error) 958 return error; 959 960 if (newp != NULL) 961 error = hw_battery_setchargestart(start); 962 963 return error; 964 } 965 966 int 967 sysctl_hwchargestop(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 968 { 969 int stop = hw_battery_chargestop; 970 int error; 971 972 if (!hw_battery_setchargestop) 973 return EOPNOTSUPP; 974 975 error = sysctl_int_bounded(oldp, oldlenp, newp, newlen, 976 &stop, 0, 100); 977 if (error) 978 return error; 979 980 if (newp != NULL) 981 error = hw_battery_setchargestop(stop); 982 983 return error; 984 } 985 986 int 987 sysctl_hwbattery(int *name, u_int namelen, void *oldp, size_t *oldlenp, 988 void *newp, size_t newlen) 989 { 990 if (namelen != 1) 991 return (ENOTDIR); 992 993 switch (name[0]) { 994 case HW_BATTERY_CHARGEMODE: 995 return (sysctl_hwchargemode(oldp, oldlenp, newp, newlen)); 996 case HW_BATTERY_CHARGESTART: 997 return (sysctl_hwchargestart(oldp, oldlenp, newp, newlen)); 998 case HW_BATTERY_CHARGESTOP: 999 return (sysctl_hwchargestop(oldp, oldlenp, newp, newlen)); 1000 default: 1001 return (EOPNOTSUPP); 1002 } 1003 /* NOTREACHED */ 1004 } 1005 1006 #endif 1007 1008 #ifdef DEBUG_SYSCTL 1009 /* 1010 * Debugging related system variables. 1011 */ 1012 extern struct ctldebug debug_vfs_busyprt; 1013 struct ctldebug debug1, debug2, debug3, debug4; 1014 struct ctldebug debug5, debug6, debug7, debug8, debug9; 1015 struct ctldebug debug10, debug11, debug12, debug13, debug14; 1016 struct ctldebug debug15, debug16, debug17, debug18, debug19; 1017 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = { 1018 &debug_vfs_busyprt, 1019 &debug1, &debug2, &debug3, &debug4, 1020 &debug5, &debug6, &debug7, &debug8, &debug9, 1021 &debug10, &debug11, &debug12, &debug13, &debug14, 1022 &debug15, &debug16, &debug17, &debug18, &debug19, 1023 }; 1024 int 1025 debug_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1026 size_t newlen, struct proc *p) 1027 { 1028 struct ctldebug *cdp; 1029 1030 /* all sysctl names at this level are name and field */ 1031 if (namelen != 2) 1032 return (ENOTDIR); /* overloaded */ 1033 if (name[0] < 0 || name[0] >= nitems(debugvars)) 1034 return (EOPNOTSUPP); 1035 cdp = debugvars[name[0]]; 1036 if (cdp->debugname == 0) 1037 return (EOPNOTSUPP); 1038 switch (name[1]) { 1039 case CTL_DEBUG_NAME: 1040 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname)); 1041 case CTL_DEBUG_VALUE: 1042 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar)); 1043 default: 1044 return (EOPNOTSUPP); 1045 } 1046 /* NOTREACHED */ 1047 } 1048 #endif /* DEBUG_SYSCTL */ 1049 1050 /* 1051 * Reads, or writes that lower the value 1052 */ 1053 int 1054 sysctl_int_lower(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 1055 int *valp) 1056 { 1057 unsigned int oldval, newval; 1058 int error; 1059 1060 if (oldp && *oldlenp < sizeof(int)) 1061 return (ENOMEM); 1062 if (newp && newlen != sizeof(int)) 1063 return (EINVAL); 1064 *oldlenp = sizeof(int); 1065 1066 if (newp) { 1067 if ((error = copyin(newp, &newval, sizeof(int)))) 1068 return (error); 1069 do { 1070 oldval = atomic_load_int(valp); 1071 if (oldval < (unsigned int)newval) 1072 return (EPERM); /* do not allow raising */ 1073 } while (atomic_cas_uint(valp, oldval, newval) != oldval); 1074 1075 if (oldp) { 1076 /* new value has been set although user gets error */ 1077 if ((error = copyout(&oldval, oldp, sizeof(int)))) 1078 return (error); 1079 } 1080 } else if (oldp) { 1081 oldval = atomic_load_int(valp); 1082 1083 if ((error = copyout(&oldval, oldp, sizeof(int)))) 1084 return (error); 1085 } 1086 1087 return (0); 1088 } 1089 1090 /* 1091 * Validate parameters and get old / set new parameters 1092 * for an integer-valued sysctl function. 1093 */ 1094 int 1095 sysctl_int(void *oldp, size_t *oldlenp, void *newp, size_t newlen, int *valp) 1096 { 1097 return (sysctl_int_bounded(oldp, oldlenp, newp, newlen, valp, 1098 INT_MIN, INT_MAX)); 1099 } 1100 1101 /* 1102 * As above, but read-only. 1103 */ 1104 int 1105 sysctl_rdint(void *oldp, size_t *oldlenp, void *newp, int val) 1106 { 1107 int error = 0; 1108 1109 if (oldp && *oldlenp < sizeof(int)) 1110 return (ENOMEM); 1111 if (newp) 1112 return (EPERM); 1113 *oldlenp = sizeof(int); 1114 if (oldp) 1115 error = copyout((caddr_t)&val, oldp, sizeof(int)); 1116 return (error); 1117 } 1118 1119 int 1120 sysctl_securelevel(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 1121 struct proc *p) 1122 { 1123 int oldval, newval; 1124 int error; 1125 1126 if (oldp && *oldlenp < sizeof(int)) 1127 return (ENOMEM); 1128 if (newp && newlen != sizeof(int)) 1129 return (EINVAL); 1130 *oldlenp = sizeof(int); 1131 1132 if (newp) { 1133 if ((error = copyin(newp, &newval, sizeof(int)))) 1134 return (error); 1135 do { 1136 oldval = atomic_load_int(&securelevel); 1137 if ((oldval > 0 || newval < -1) && newval < oldval && 1138 p->p_p->ps_pid != 1) 1139 return (EPERM); 1140 } while (atomic_cas_uint(&securelevel, oldval, newval) != 1141 oldval); 1142 1143 if (oldp) { 1144 /* new value has been set although user gets error */ 1145 if ((error = copyout(&oldval, oldp, sizeof(int)))) 1146 return (error); 1147 } 1148 } else if (oldp) { 1149 oldval = atomic_load_int(&securelevel); 1150 1151 if ((error = copyout(&oldval, oldp, sizeof(int)))) 1152 return (error); 1153 } 1154 1155 return (0); 1156 } 1157 1158 /* 1159 * Selects between sysctl_rdint and sysctl_int according to securelevel. 1160 */ 1161 int 1162 sysctl_securelevel_int(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 1163 int *valp) 1164 { 1165 if (atomic_load_int(&securelevel) > 0) 1166 return (sysctl_rdint(oldp, oldlenp, newp, *valp)); 1167 return (sysctl_int(oldp, oldlenp, newp, newlen, valp)); 1168 } 1169 1170 /* 1171 * Read-only or bounded integer values. 1172 */ 1173 int 1174 sysctl_int_bounded(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 1175 int *valp, int minimum, int maximum) 1176 { 1177 int oldval, newval; 1178 int error; 1179 1180 /* read only */ 1181 if (newp != NULL && minimum > maximum) 1182 return (EPERM); 1183 1184 if (oldp != NULL && *oldlenp < sizeof(int)) 1185 return (ENOMEM); 1186 if (newp != NULL && newlen != sizeof(int)) 1187 return (EINVAL); 1188 *oldlenp = sizeof(int); 1189 1190 /* copyin() may sleep, call it first */ 1191 if (newp != NULL) { 1192 if ((error = copyin(newp, &newval, sizeof(int)))) 1193 return (error); 1194 /* outside limits */ 1195 if (newval < minimum || maximum < newval) 1196 return (EINVAL); 1197 } 1198 if (oldp != NULL) { 1199 if (newp != NULL) 1200 oldval = atomic_swap_uint(valp, newval); 1201 else 1202 oldval = atomic_load_int(valp); 1203 if ((error = copyout(&oldval, oldp, sizeof(int)))) { 1204 /* new value has been set although user gets error */ 1205 return (error); 1206 } 1207 } else if (newp != NULL) 1208 atomic_store_int(valp, newval); 1209 1210 return (0); 1211 } 1212 1213 /* 1214 * Array of read-only or bounded integer values. 1215 */ 1216 int 1217 sysctl_bounded_arr(const struct sysctl_bounded_args *valpp, u_int valplen, 1218 int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1219 size_t newlen) 1220 { 1221 u_int i; 1222 if (namelen != 1) 1223 return (ENOTDIR); 1224 for (i = 0; i < valplen; ++i) { 1225 if (valpp[i].mib == name[0]) { 1226 return (sysctl_int_bounded(oldp, oldlenp, newp, newlen, 1227 valpp[i].var, valpp[i].minimum, valpp[i].maximum)); 1228 } 1229 } 1230 return (EOPNOTSUPP); 1231 } 1232 1233 /* 1234 * Validate parameters and get old / set new parameters 1235 * for an integer-valued sysctl function. 1236 */ 1237 int 1238 sysctl_quad(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 1239 int64_t *valp) 1240 { 1241 int error = 0; 1242 1243 if (oldp && *oldlenp < sizeof(int64_t)) 1244 return (ENOMEM); 1245 if (newp && newlen != sizeof(int64_t)) 1246 return (EINVAL); 1247 *oldlenp = sizeof(int64_t); 1248 if (oldp) 1249 error = copyout(valp, oldp, sizeof(int64_t)); 1250 if (error == 0 && newp) 1251 error = copyin(newp, valp, sizeof(int64_t)); 1252 return (error); 1253 } 1254 1255 /* 1256 * As above, but read-only. 1257 */ 1258 int 1259 sysctl_rdquad(void *oldp, size_t *oldlenp, void *newp, int64_t val) 1260 { 1261 int error = 0; 1262 1263 if (oldp && *oldlenp < sizeof(int64_t)) 1264 return (ENOMEM); 1265 if (newp) 1266 return (EPERM); 1267 *oldlenp = sizeof(int64_t); 1268 if (oldp) 1269 error = copyout((caddr_t)&val, oldp, sizeof(int64_t)); 1270 return (error); 1271 } 1272 1273 /* 1274 * Validate parameters and get old / set new parameters 1275 * for a string-valued sysctl function. 1276 */ 1277 int 1278 sysctl_string(void *oldp, size_t *oldlenp, void *newp, size_t newlen, char *str, 1279 size_t maxlen) 1280 { 1281 return sysctl__string(oldp, oldlenp, newp, newlen, str, maxlen, 0); 1282 } 1283 1284 int 1285 sysctl_tstring(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 1286 char *str, size_t maxlen) 1287 { 1288 return sysctl__string(oldp, oldlenp, newp, newlen, str, maxlen, 1); 1289 } 1290 1291 int 1292 sysctl__string(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 1293 char *str, size_t maxlen, int trunc) 1294 { 1295 size_t len; 1296 int error = 0; 1297 1298 len = strlen(str) + 1; 1299 if (oldp && *oldlenp < len) { 1300 if (trunc == 0 || *oldlenp == 0) 1301 return (ENOMEM); 1302 } 1303 if (newp && newlen >= maxlen) 1304 return (EINVAL); 1305 if (oldp) { 1306 if (trunc && *oldlenp < len) { 1307 len = *oldlenp; 1308 error = copyout(str, oldp, len - 1); 1309 if (error == 0) 1310 error = copyout("", (char *)oldp + len - 1, 1); 1311 } else { 1312 error = copyout(str, oldp, len); 1313 } 1314 } 1315 *oldlenp = len; 1316 if (error == 0 && newp) { 1317 error = copyin(newp, str, newlen); 1318 str[newlen] = 0; 1319 } 1320 return (error); 1321 } 1322 1323 /* 1324 * As above, but read-only. 1325 */ 1326 int 1327 sysctl_rdstring(void *oldp, size_t *oldlenp, void *newp, const char *str) 1328 { 1329 size_t len; 1330 int error = 0; 1331 1332 len = strlen(str) + 1; 1333 if (oldp && *oldlenp < len) 1334 return (ENOMEM); 1335 if (newp) 1336 return (EPERM); 1337 *oldlenp = len; 1338 if (oldp) 1339 error = copyout(str, oldp, len); 1340 return (error); 1341 } 1342 1343 /* 1344 * Validate parameters and get old / set new parameters 1345 * for a structure oriented sysctl function. 1346 */ 1347 int 1348 sysctl_struct(void *oldp, size_t *oldlenp, void *newp, size_t newlen, void *sp, 1349 size_t len) 1350 { 1351 int error = 0; 1352 1353 if (oldp && *oldlenp < len) 1354 return (ENOMEM); 1355 if (newp && newlen > len) 1356 return (EINVAL); 1357 if (oldp) { 1358 *oldlenp = len; 1359 error = copyout(sp, oldp, len); 1360 } 1361 if (error == 0 && newp) 1362 error = copyin(newp, sp, len); 1363 return (error); 1364 } 1365 1366 /* 1367 * Validate parameters and get old parameters 1368 * for a structure oriented sysctl function. 1369 */ 1370 int 1371 sysctl_rdstruct(void *oldp, size_t *oldlenp, void *newp, const void *sp, 1372 size_t len) 1373 { 1374 int error = 0; 1375 1376 if (oldp && *oldlenp < len) 1377 return (ENOMEM); 1378 if (newp) 1379 return (EPERM); 1380 *oldlenp = len; 1381 if (oldp) 1382 error = copyout(sp, oldp, len); 1383 return (error); 1384 } 1385 1386 #ifndef SMALL_KERNEL 1387 void 1388 fill_file(struct kinfo_file *kf, struct file *fp, struct filedesc *fdp, 1389 int fd, struct vnode *vp, struct process *pr, struct proc *p, 1390 struct socket *so, int show_pointers) 1391 { 1392 struct vattr va; 1393 1394 memset(kf, 0, sizeof(*kf)); 1395 1396 kf->fd_fd = fd; /* might not really be an fd */ 1397 1398 if (fp != NULL) { 1399 if (show_pointers) 1400 kf->f_fileaddr = PTRTOINT64(fp); 1401 kf->f_flag = fp->f_flag; 1402 kf->f_iflags = fp->f_iflags; 1403 kf->f_type = fp->f_type; 1404 kf->f_count = fp->f_count; 1405 if (show_pointers) 1406 kf->f_ucred = PTRTOINT64(fp->f_cred); 1407 kf->f_uid = fp->f_cred->cr_uid; 1408 kf->f_gid = fp->f_cred->cr_gid; 1409 if (show_pointers) 1410 kf->f_ops = PTRTOINT64(fp->f_ops); 1411 if (show_pointers) 1412 kf->f_data = PTRTOINT64(fp->f_data); 1413 kf->f_usecount = 0; 1414 1415 if (suser(p) == 0 || p->p_ucred->cr_uid == fp->f_cred->cr_uid) { 1416 mtx_enter(&fp->f_mtx); 1417 kf->f_offset = fp->f_offset; 1418 kf->f_rxfer = fp->f_rxfer; 1419 kf->f_rwfer = fp->f_wxfer; 1420 kf->f_seek = fp->f_seek; 1421 kf->f_rbytes = fp->f_rbytes; 1422 kf->f_wbytes = fp->f_wbytes; 1423 mtx_leave(&fp->f_mtx); 1424 } else 1425 kf->f_offset = -1; 1426 } else if (vp != NULL) { 1427 /* fake it */ 1428 kf->f_type = DTYPE_VNODE; 1429 kf->f_flag = FREAD; 1430 if (fd == KERN_FILE_TRACE) 1431 kf->f_flag |= FWRITE; 1432 } else if (so != NULL) { 1433 /* fake it */ 1434 kf->f_type = DTYPE_SOCKET; 1435 } 1436 1437 /* information about the object associated with this file */ 1438 switch (kf->f_type) { 1439 case DTYPE_VNODE: 1440 if (fp != NULL) 1441 vp = (struct vnode *)fp->f_data; 1442 1443 if (show_pointers) 1444 kf->v_un = PTRTOINT64(vp->v_un.vu_socket); 1445 kf->v_type = vp->v_type; 1446 kf->v_tag = vp->v_tag; 1447 kf->v_flag = vp->v_flag; 1448 if (show_pointers) 1449 kf->v_data = PTRTOINT64(vp->v_data); 1450 if (show_pointers) 1451 kf->v_mount = PTRTOINT64(vp->v_mount); 1452 if (vp->v_mount) 1453 strlcpy(kf->f_mntonname, 1454 vp->v_mount->mnt_stat.f_mntonname, 1455 sizeof(kf->f_mntonname)); 1456 1457 if (VOP_GETATTR(vp, &va, p->p_ucred, p) == 0) { 1458 kf->va_fileid = va.va_fileid; 1459 kf->va_mode = MAKEIMODE(va.va_type, va.va_mode); 1460 kf->va_size = va.va_size; 1461 kf->va_rdev = va.va_rdev; 1462 kf->va_fsid = va.va_fsid & 0xffffffff; 1463 kf->va_nlink = va.va_nlink; 1464 } 1465 break; 1466 1467 case DTYPE_SOCKET: { 1468 int locked = 0; 1469 1470 if (so == NULL) { 1471 so = (struct socket *)fp->f_data; 1472 /* if so is passed as parameter it is already locked */ 1473 solock(so); 1474 locked = 1; 1475 } 1476 1477 kf->so_type = so->so_type; 1478 kf->so_state = so->so_state | so->so_snd.sb_state | 1479 so->so_rcv.sb_state; 1480 if (show_pointers) 1481 kf->so_pcb = PTRTOINT64(so->so_pcb); 1482 else 1483 kf->so_pcb = -1; 1484 kf->so_protocol = so->so_proto->pr_protocol; 1485 kf->so_family = so->so_proto->pr_domain->dom_family; 1486 kf->so_rcv_cc = so->so_rcv.sb_cc; 1487 kf->so_snd_cc = so->so_snd.sb_cc; 1488 if (isspliced(so)) { 1489 if (show_pointers) 1490 kf->so_splice = 1491 PTRTOINT64(so->so_sp->ssp_socket); 1492 kf->so_splicelen = so->so_sp->ssp_len; 1493 } else if (issplicedback(so)) 1494 kf->so_splicelen = -1; 1495 if (so->so_pcb == NULL) { 1496 if (locked) 1497 sounlock(so); 1498 break; 1499 } 1500 switch (kf->so_family) { 1501 case AF_INET: { 1502 struct inpcb *inpcb = so->so_pcb; 1503 1504 soassertlocked(so); 1505 if (show_pointers) 1506 kf->inp_ppcb = PTRTOINT64(inpcb->inp_ppcb); 1507 kf->inp_lport = inpcb->inp_lport; 1508 kf->inp_laddru[0] = inpcb->inp_laddr.s_addr; 1509 kf->inp_fport = inpcb->inp_fport; 1510 kf->inp_faddru[0] = inpcb->inp_faddr.s_addr; 1511 kf->inp_rtableid = inpcb->inp_rtableid; 1512 if (so->so_type == SOCK_RAW) 1513 kf->inp_proto = inpcb->inp_ip.ip_p; 1514 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 1515 struct tcpcb *tcpcb = (void *)inpcb->inp_ppcb; 1516 kf->t_rcv_wnd = tcpcb->rcv_wnd; 1517 kf->t_snd_wnd = tcpcb->snd_wnd; 1518 kf->t_snd_cwnd = tcpcb->snd_cwnd; 1519 kf->t_state = tcpcb->t_state; 1520 } 1521 break; 1522 } 1523 case AF_INET6: { 1524 struct inpcb *inpcb = so->so_pcb; 1525 1526 soassertlocked(so); 1527 if (show_pointers) 1528 kf->inp_ppcb = PTRTOINT64(inpcb->inp_ppcb); 1529 kf->inp_lport = inpcb->inp_lport; 1530 kf->inp_laddru[0] = inpcb->inp_laddr6.s6_addr32[0]; 1531 kf->inp_laddru[1] = inpcb->inp_laddr6.s6_addr32[1]; 1532 kf->inp_laddru[2] = inpcb->inp_laddr6.s6_addr32[2]; 1533 kf->inp_laddru[3] = inpcb->inp_laddr6.s6_addr32[3]; 1534 kf->inp_fport = inpcb->inp_fport; 1535 kf->inp_faddru[0] = inpcb->inp_faddr6.s6_addr32[0]; 1536 kf->inp_faddru[1] = inpcb->inp_faddr6.s6_addr32[1]; 1537 kf->inp_faddru[2] = inpcb->inp_faddr6.s6_addr32[2]; 1538 kf->inp_faddru[3] = inpcb->inp_faddr6.s6_addr32[3]; 1539 kf->inp_rtableid = inpcb->inp_rtableid; 1540 if (so->so_type == SOCK_RAW) 1541 kf->inp_proto = inpcb->inp_ipv6.ip6_nxt; 1542 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 1543 struct tcpcb *tcpcb = (void *)inpcb->inp_ppcb; 1544 kf->t_rcv_wnd = tcpcb->rcv_wnd; 1545 kf->t_snd_wnd = tcpcb->snd_wnd; 1546 kf->t_state = tcpcb->t_state; 1547 } 1548 break; 1549 } 1550 case AF_UNIX: { 1551 struct unpcb *unpcb = so->so_pcb; 1552 1553 kf->f_msgcount = unpcb->unp_msgcount; 1554 if (show_pointers) { 1555 kf->unp_conn = PTRTOINT64(unpcb->unp_conn); 1556 kf->unp_refs = PTRTOINT64( 1557 SLIST_FIRST(&unpcb->unp_refs)); 1558 kf->unp_nextref = PTRTOINT64( 1559 SLIST_NEXT(unpcb, unp_nextref)); 1560 kf->v_un = PTRTOINT64(unpcb->unp_vnode); 1561 kf->unp_addr = PTRTOINT64(unpcb->unp_addr); 1562 } 1563 if (unpcb->unp_addr != NULL) { 1564 struct sockaddr_un *un = mtod(unpcb->unp_addr, 1565 struct sockaddr_un *); 1566 memcpy(kf->unp_path, un->sun_path, un->sun_len 1567 - offsetof(struct sockaddr_un,sun_path)); 1568 } 1569 break; 1570 } 1571 } 1572 if (locked) 1573 sounlock(so); 1574 break; 1575 } 1576 1577 case DTYPE_PIPE: { 1578 struct pipe *pipe = (struct pipe *)fp->f_data; 1579 1580 if (show_pointers) 1581 kf->pipe_peer = PTRTOINT64(pipe->pipe_peer); 1582 kf->pipe_state = pipe->pipe_state; 1583 break; 1584 } 1585 1586 case DTYPE_KQUEUE: { 1587 struct kqueue *kqi = (struct kqueue *)fp->f_data; 1588 1589 kf->kq_count = kqi->kq_count; 1590 kf->kq_state = kqi->kq_state; 1591 break; 1592 } 1593 } 1594 1595 /* per-process information for KERN_FILE_BY[PU]ID */ 1596 if (pr != NULL) { 1597 kf->p_pid = pr->ps_pid; 1598 kf->p_uid = pr->ps_ucred->cr_uid; 1599 kf->p_gid = pr->ps_ucred->cr_gid; 1600 kf->p_tid = -1; 1601 strlcpy(kf->p_comm, pr->ps_comm, sizeof(kf->p_comm)); 1602 } 1603 if (fdp != NULL) { 1604 fdplock(fdp); 1605 kf->fd_ofileflags = fdp->fd_ofileflags[fd]; 1606 fdpunlock(fdp); 1607 } 1608 } 1609 1610 /* 1611 * Get file structures. 1612 */ 1613 int 1614 sysctl_file(int *name, u_int namelen, char *where, size_t *sizep, 1615 struct proc *p) 1616 { 1617 struct kinfo_file *kf; 1618 struct filedesc *fdp; 1619 struct file *fp; 1620 struct process *pr; 1621 size_t buflen, elem_size, elem_count, outsize; 1622 char *dp = where; 1623 int arg, i, error = 0, needed = 0, matched; 1624 u_int op; 1625 int show_pointers; 1626 1627 if (namelen > 4) 1628 return (ENOTDIR); 1629 if (namelen < 4 || name[2] > sizeof(*kf)) 1630 return (EINVAL); 1631 1632 buflen = where != NULL ? *sizep : 0; 1633 op = name[0]; 1634 arg = name[1]; 1635 elem_size = name[2]; 1636 elem_count = name[3]; 1637 outsize = MIN(sizeof(*kf), elem_size); 1638 1639 if (elem_size < 1) 1640 return (EINVAL); 1641 1642 show_pointers = suser(curproc) == 0; 1643 1644 kf = malloc(sizeof(*kf), M_TEMP, M_WAITOK); 1645 1646 #define FILLIT2(fp, fdp, i, vp, pr, so) do { \ 1647 if (buflen >= elem_size && elem_count > 0) { \ 1648 fill_file(kf, fp, fdp, i, vp, pr, p, so, show_pointers);\ 1649 error = copyout(kf, dp, outsize); \ 1650 if (error) \ 1651 break; \ 1652 dp += elem_size; \ 1653 buflen -= elem_size; \ 1654 elem_count--; \ 1655 } \ 1656 needed += elem_size; \ 1657 } while (0) 1658 #define FILLIT(fp, fdp, i, vp, pr) \ 1659 FILLIT2(fp, fdp, i, vp, pr, NULL) 1660 #define FILLSO(so) \ 1661 FILLIT2(NULL, NULL, 0, NULL, NULL, so) 1662 1663 switch (op) { 1664 case KERN_FILE_BYFILE: 1665 /* use the inp-tables to pick up closed connections, too */ 1666 if (arg == DTYPE_SOCKET) { 1667 struct inpcb *inp; 1668 1669 NET_LOCK(); 1670 mtx_enter(&tcbtable.inpt_mtx); 1671 TAILQ_FOREACH(inp, &tcbtable.inpt_queue, inp_queue) 1672 FILLSO(inp->inp_socket); 1673 mtx_leave(&tcbtable.inpt_mtx); 1674 #ifdef INET6 1675 mtx_enter(&tcb6table.inpt_mtx); 1676 TAILQ_FOREACH(inp, &tcb6table.inpt_queue, inp_queue) 1677 FILLSO(inp->inp_socket); 1678 mtx_leave(&tcb6table.inpt_mtx); 1679 #endif 1680 mtx_enter(&udbtable.inpt_mtx); 1681 TAILQ_FOREACH(inp, &udbtable.inpt_queue, inp_queue) 1682 FILLSO(inp->inp_socket); 1683 mtx_leave(&udbtable.inpt_mtx); 1684 #ifdef INET6 1685 mtx_enter(&udb6table.inpt_mtx); 1686 TAILQ_FOREACH(inp, &udb6table.inpt_queue, inp_queue) 1687 FILLSO(inp->inp_socket); 1688 mtx_leave(&udb6table.inpt_mtx); 1689 #endif 1690 mtx_enter(&rawcbtable.inpt_mtx); 1691 TAILQ_FOREACH(inp, &rawcbtable.inpt_queue, inp_queue) 1692 FILLSO(inp->inp_socket); 1693 mtx_leave(&rawcbtable.inpt_mtx); 1694 #ifdef INET6 1695 mtx_enter(&rawin6pcbtable.inpt_mtx); 1696 TAILQ_FOREACH(inp, &rawin6pcbtable.inpt_queue, 1697 inp_queue) 1698 FILLSO(inp->inp_socket); 1699 mtx_leave(&rawin6pcbtable.inpt_mtx); 1700 #endif 1701 NET_UNLOCK(); 1702 } 1703 fp = NULL; 1704 while ((fp = fd_iterfile(fp, p)) != NULL) { 1705 if ((arg == 0 || fp->f_type == arg)) { 1706 int af, skip = 0; 1707 if (arg == DTYPE_SOCKET && fp->f_type == arg) { 1708 af = ((struct socket *)fp->f_data)-> 1709 so_proto->pr_domain->dom_family; 1710 if (af == AF_INET || af == AF_INET6) 1711 skip = 1; 1712 } 1713 if (!skip) 1714 FILLIT(fp, NULL, 0, NULL, NULL); 1715 } 1716 } 1717 break; 1718 case KERN_FILE_BYPID: 1719 /* A arg of -1 indicates all processes */ 1720 if (arg < -1) { 1721 error = EINVAL; 1722 break; 1723 } 1724 matched = 0; 1725 LIST_FOREACH(pr, &allprocess, ps_list) { 1726 /* 1727 * skip system, exiting, embryonic and undead 1728 * processes 1729 */ 1730 if (pr->ps_flags & (PS_SYSTEM | PS_EMBRYO | PS_EXITING)) 1731 continue; 1732 if (arg >= 0 && pr->ps_pid != (pid_t)arg) { 1733 /* not the pid we are looking for */ 1734 continue; 1735 } 1736 1737 refcnt_take(&pr->ps_refcnt); 1738 1739 matched = 1; 1740 fdp = pr->ps_fd; 1741 if (pr->ps_textvp) 1742 FILLIT(NULL, NULL, KERN_FILE_TEXT, pr->ps_textvp, pr); 1743 if (fdp->fd_cdir) 1744 FILLIT(NULL, NULL, KERN_FILE_CDIR, fdp->fd_cdir, pr); 1745 if (fdp->fd_rdir) 1746 FILLIT(NULL, NULL, KERN_FILE_RDIR, fdp->fd_rdir, pr); 1747 if (pr->ps_tracevp) 1748 FILLIT(NULL, NULL, KERN_FILE_TRACE, pr->ps_tracevp, pr); 1749 for (i = 0; i < fdp->fd_nfiles; i++) { 1750 if ((fp = fd_getfile(fdp, i)) == NULL) 1751 continue; 1752 FILLIT(fp, fdp, i, NULL, pr); 1753 FRELE(fp, p); 1754 } 1755 1756 refcnt_rele_wake(&pr->ps_refcnt); 1757 1758 /* pid is unique, stop searching */ 1759 if (arg >= 0) 1760 break; 1761 } 1762 if (!matched) 1763 error = ESRCH; 1764 break; 1765 case KERN_FILE_BYUID: 1766 LIST_FOREACH(pr, &allprocess, ps_list) { 1767 /* 1768 * skip system, exiting, embryonic and undead 1769 * processes 1770 */ 1771 if (pr->ps_flags & (PS_SYSTEM | PS_EMBRYO | PS_EXITING)) 1772 continue; 1773 if (arg >= 0 && pr->ps_ucred->cr_uid != (uid_t)arg) { 1774 /* not the uid we are looking for */ 1775 continue; 1776 } 1777 1778 refcnt_take(&pr->ps_refcnt); 1779 1780 fdp = pr->ps_fd; 1781 if (fdp->fd_cdir) 1782 FILLIT(NULL, NULL, KERN_FILE_CDIR, fdp->fd_cdir, pr); 1783 if (fdp->fd_rdir) 1784 FILLIT(NULL, NULL, KERN_FILE_RDIR, fdp->fd_rdir, pr); 1785 if (pr->ps_tracevp) 1786 FILLIT(NULL, NULL, KERN_FILE_TRACE, pr->ps_tracevp, pr); 1787 for (i = 0; i < fdp->fd_nfiles; i++) { 1788 if ((fp = fd_getfile(fdp, i)) == NULL) 1789 continue; 1790 FILLIT(fp, fdp, i, NULL, pr); 1791 FRELE(fp, p); 1792 } 1793 1794 refcnt_rele_wake(&pr->ps_refcnt); 1795 } 1796 break; 1797 default: 1798 error = EINVAL; 1799 break; 1800 } 1801 free(kf, M_TEMP, sizeof(*kf)); 1802 1803 if (!error) { 1804 if (where == NULL) 1805 needed += KERN_FILESLOP * elem_size; 1806 else if (*sizep < needed) 1807 error = ENOMEM; 1808 *sizep = needed; 1809 } 1810 1811 return (error); 1812 } 1813 1814 /* 1815 * try over estimating by 5 procs 1816 */ 1817 #define KERN_PROCSLOP 5 1818 1819 int 1820 sysctl_doproc(int *name, u_int namelen, char *where, size_t *sizep) 1821 { 1822 struct kinfo_proc *kproc = NULL; 1823 struct proc *p; 1824 struct process *pr; 1825 char *dp; 1826 int arg, buflen, doingzomb, elem_size, elem_count; 1827 int error, needed, op; 1828 int dothreads = 0; 1829 int show_pointers; 1830 1831 dp = where; 1832 buflen = where != NULL ? *sizep : 0; 1833 needed = error = 0; 1834 1835 if (namelen != 4 || name[2] <= 0 || name[3] < 0 || 1836 name[2] > sizeof(*kproc)) 1837 return (EINVAL); 1838 op = name[0]; 1839 arg = name[1]; 1840 elem_size = name[2]; 1841 elem_count = name[3]; 1842 1843 dothreads = op & KERN_PROC_SHOW_THREADS; 1844 op &= ~KERN_PROC_SHOW_THREADS; 1845 1846 show_pointers = suser(curproc) == 0; 1847 1848 if (where != NULL) 1849 kproc = malloc(sizeof(*kproc), M_TEMP, M_WAITOK); 1850 1851 pr = LIST_FIRST(&allprocess); 1852 doingzomb = 0; 1853 again: 1854 for (; pr != NULL; pr = LIST_NEXT(pr, ps_list)) { 1855 /* XXX skip processes in the middle of being zapped */ 1856 if (pr->ps_pgrp == NULL) 1857 continue; 1858 1859 /* 1860 * Skip embryonic processes. 1861 */ 1862 if (pr->ps_flags & PS_EMBRYO) 1863 continue; 1864 1865 /* 1866 * TODO - make more efficient (see notes below). 1867 */ 1868 switch (op) { 1869 1870 case KERN_PROC_PID: 1871 /* could do this with just a lookup */ 1872 if (pr->ps_pid != (pid_t)arg) 1873 continue; 1874 break; 1875 1876 case KERN_PROC_PGRP: 1877 /* could do this by traversing pgrp */ 1878 if (pr->ps_pgrp->pg_id != (pid_t)arg) 1879 continue; 1880 break; 1881 1882 case KERN_PROC_SESSION: 1883 if (pr->ps_session->s_leader == NULL || 1884 pr->ps_session->s_leader->ps_pid != (pid_t)arg) 1885 continue; 1886 break; 1887 1888 case KERN_PROC_TTY: 1889 if ((pr->ps_flags & PS_CONTROLT) == 0 || 1890 pr->ps_session->s_ttyp == NULL || 1891 pr->ps_session->s_ttyp->t_dev != (dev_t)arg) 1892 continue; 1893 break; 1894 1895 case KERN_PROC_UID: 1896 if (pr->ps_ucred->cr_uid != (uid_t)arg) 1897 continue; 1898 break; 1899 1900 case KERN_PROC_RUID: 1901 if (pr->ps_ucred->cr_ruid != (uid_t)arg) 1902 continue; 1903 break; 1904 1905 case KERN_PROC_ALL: 1906 if (pr->ps_flags & PS_SYSTEM) 1907 continue; 1908 break; 1909 1910 case KERN_PROC_KTHREAD: 1911 /* no filtering */ 1912 break; 1913 1914 default: 1915 error = EINVAL; 1916 goto err; 1917 } 1918 1919 if (buflen >= elem_size && elem_count > 0) { 1920 fill_kproc(pr, kproc, NULL, show_pointers); 1921 error = copyout(kproc, dp, elem_size); 1922 if (error) 1923 goto err; 1924 dp += elem_size; 1925 buflen -= elem_size; 1926 elem_count--; 1927 } 1928 needed += elem_size; 1929 1930 /* Skip per-thread entries if not required by op */ 1931 if (!dothreads) 1932 continue; 1933 1934 TAILQ_FOREACH(p, &pr->ps_threads, p_thr_link) { 1935 if (buflen >= elem_size && elem_count > 0) { 1936 fill_kproc(pr, kproc, p, show_pointers); 1937 error = copyout(kproc, dp, elem_size); 1938 if (error) 1939 goto err; 1940 dp += elem_size; 1941 buflen -= elem_size; 1942 elem_count--; 1943 } 1944 needed += elem_size; 1945 } 1946 } 1947 if (doingzomb == 0) { 1948 pr = LIST_FIRST(&zombprocess); 1949 doingzomb++; 1950 goto again; 1951 } 1952 if (where != NULL) { 1953 *sizep = dp - where; 1954 if (needed > *sizep) { 1955 error = ENOMEM; 1956 goto err; 1957 } 1958 } else { 1959 needed += KERN_PROCSLOP * elem_size; 1960 *sizep = needed; 1961 } 1962 err: 1963 if (kproc) 1964 free(kproc, M_TEMP, sizeof(*kproc)); 1965 return (error); 1966 } 1967 1968 /* 1969 * Fill in a kproc structure for the specified process. 1970 */ 1971 void 1972 fill_kproc(struct process *pr, struct kinfo_proc *ki, struct proc *p, 1973 int show_pointers) 1974 { 1975 struct session *s = pr->ps_session; 1976 struct tty *tp; 1977 struct vmspace *vm = pr->ps_vmspace; 1978 struct timespec booted, st, ut, utc; 1979 struct tusage tu; 1980 int isthread; 1981 1982 isthread = p != NULL; 1983 if (!isthread) { 1984 p = pr->ps_mainproc; /* XXX */ 1985 tuagg_get_process(&tu, pr); 1986 } else 1987 tuagg_get_proc(&tu, p); 1988 1989 FILL_KPROC(ki, strlcpy, p, pr, pr->ps_ucred, pr->ps_pgrp, 1990 p, pr, s, vm, pr->ps_limit, pr->ps_sigacts, &tu, isthread, 1991 show_pointers); 1992 1993 /* stuff that's too painful to generalize into the macros */ 1994 if (pr->ps_pptr) 1995 ki->p_ppid = pr->ps_ppid; 1996 if (s->s_leader) 1997 ki->p_sid = s->s_leader->ps_pid; 1998 1999 if ((pr->ps_flags & PS_CONTROLT) && (tp = s->s_ttyp)) { 2000 ki->p_tdev = tp->t_dev; 2001 ki->p_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : -1; 2002 if (show_pointers) 2003 ki->p_tsess = PTRTOINT64(tp->t_session); 2004 } else { 2005 ki->p_tdev = NODEV; 2006 ki->p_tpgid = -1; 2007 } 2008 2009 /* fixups that can only be done in the kernel */ 2010 if ((pr->ps_flags & PS_ZOMBIE) == 0) { 2011 if ((pr->ps_flags & PS_EMBRYO) == 0 && vm != NULL) 2012 ki->p_vm_rssize = vm_resident_count(vm); 2013 calctsru(&tu, &ut, &st, NULL); 2014 ki->p_uutime_sec = ut.tv_sec; 2015 ki->p_uutime_usec = ut.tv_nsec/1000; 2016 ki->p_ustime_sec = st.tv_sec; 2017 ki->p_ustime_usec = st.tv_nsec/1000; 2018 2019 /* Convert starting uptime to a starting UTC time. */ 2020 nanoboottime(&booted); 2021 timespecadd(&booted, &pr->ps_start, &utc); 2022 ki->p_ustart_sec = utc.tv_sec; 2023 ki->p_ustart_usec = utc.tv_nsec / 1000; 2024 2025 #ifdef MULTIPROCESSOR 2026 if (p->p_cpu != NULL) 2027 ki->p_cpuid = CPU_INFO_UNIT(p->p_cpu); 2028 #endif 2029 } 2030 2031 /* get %cpu and schedule state: just one thread or sum of all? */ 2032 if (isthread) { 2033 ki->p_pctcpu = p->p_pctcpu; 2034 ki->p_stat = p->p_stat; 2035 } else { 2036 ki->p_pctcpu = 0; 2037 ki->p_stat = (pr->ps_flags & PS_ZOMBIE) ? SDEAD : SIDL; 2038 TAILQ_FOREACH(p, &pr->ps_threads, p_thr_link) { 2039 ki->p_pctcpu += p->p_pctcpu; 2040 /* find best state: ONPROC > RUN > STOP > SLEEP > .. */ 2041 if (p->p_stat == SONPROC || ki->p_stat == SONPROC) 2042 ki->p_stat = SONPROC; 2043 else if (p->p_stat == SRUN || ki->p_stat == SRUN) 2044 ki->p_stat = SRUN; 2045 else if (p->p_stat == SSTOP || ki->p_stat == SSTOP) 2046 ki->p_stat = SSTOP; 2047 else if (p->p_stat == SSLEEP) 2048 ki->p_stat = SSLEEP; 2049 } 2050 } 2051 } 2052 2053 int 2054 sysctl_proc_args(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2055 struct proc *cp) 2056 { 2057 struct process *vpr; 2058 pid_t pid; 2059 struct ps_strings pss; 2060 struct iovec iov; 2061 struct uio uio; 2062 int error, cnt, op; 2063 size_t limit; 2064 char **rargv, **vargv; /* reader vs. victim */ 2065 char *rarg, *varg, *buf; 2066 struct vmspace *vm; 2067 vaddr_t ps_strings; 2068 2069 if (namelen > 2) 2070 return (ENOTDIR); 2071 if (namelen < 2) 2072 return (EINVAL); 2073 2074 pid = name[0]; 2075 op = name[1]; 2076 2077 switch (op) { 2078 case KERN_PROC_ARGV: 2079 case KERN_PROC_NARGV: 2080 case KERN_PROC_ENV: 2081 case KERN_PROC_NENV: 2082 break; 2083 default: 2084 return (EOPNOTSUPP); 2085 } 2086 2087 if ((vpr = prfind(pid)) == NULL) 2088 return (ESRCH); 2089 2090 if (oldp == NULL) { 2091 if (op == KERN_PROC_NARGV || op == KERN_PROC_NENV) 2092 *oldlenp = sizeof(int); 2093 else 2094 *oldlenp = ARG_MAX; /* XXX XXX XXX */ 2095 return (0); 2096 } 2097 2098 /* Either system process or exiting/zombie */ 2099 if (vpr->ps_flags & (PS_SYSTEM | PS_EXITING)) 2100 return (EINVAL); 2101 2102 /* Execing - danger. */ 2103 if ((vpr->ps_flags & PS_INEXEC)) 2104 return (EBUSY); 2105 2106 /* Only owner or root can get env */ 2107 if ((op == KERN_PROC_NENV || op == KERN_PROC_ENV) && 2108 (vpr->ps_ucred->cr_uid != cp->p_ucred->cr_uid && 2109 (error = suser(cp)) != 0)) 2110 return (error); 2111 2112 ps_strings = vpr->ps_strings; 2113 vm = vpr->ps_vmspace; 2114 uvmspace_addref(vm); 2115 vpr = NULL; 2116 2117 buf = malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 2118 2119 iov.iov_base = &pss; 2120 iov.iov_len = sizeof(pss); 2121 uio.uio_iov = &iov; 2122 uio.uio_iovcnt = 1; 2123 uio.uio_offset = (off_t)ps_strings; 2124 uio.uio_resid = sizeof(pss); 2125 uio.uio_segflg = UIO_SYSSPACE; 2126 uio.uio_rw = UIO_READ; 2127 uio.uio_procp = cp; 2128 2129 if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0) 2130 goto out; 2131 2132 if (op == KERN_PROC_NARGV) { 2133 error = sysctl_rdint(oldp, oldlenp, NULL, pss.ps_nargvstr); 2134 goto out; 2135 } 2136 if (op == KERN_PROC_NENV) { 2137 error = sysctl_rdint(oldp, oldlenp, NULL, pss.ps_nenvstr); 2138 goto out; 2139 } 2140 2141 if (op == KERN_PROC_ARGV) { 2142 cnt = pss.ps_nargvstr; 2143 vargv = pss.ps_argvstr; 2144 } else { 2145 cnt = pss.ps_nenvstr; 2146 vargv = pss.ps_envstr; 2147 } 2148 2149 /* -1 to have space for a terminating NUL */ 2150 limit = *oldlenp - 1; 2151 *oldlenp = 0; 2152 2153 rargv = oldp; 2154 2155 /* 2156 * *oldlenp - number of bytes copied out into readers buffer. 2157 * limit - maximal number of bytes allowed into readers buffer. 2158 * rarg - pointer into readers buffer where next arg will be stored. 2159 * rargv - pointer into readers buffer where the next rarg pointer 2160 * will be stored. 2161 * vargv - pointer into victim address space where the next argument 2162 * will be read. 2163 */ 2164 2165 /* space for cnt pointers and a NULL */ 2166 rarg = (char *)(rargv + cnt + 1); 2167 *oldlenp += (cnt + 1) * sizeof(char **); 2168 2169 while (cnt > 0 && *oldlenp < limit) { 2170 size_t len, vstrlen; 2171 2172 /* Write to readers argv */ 2173 if ((error = copyout(&rarg, rargv, sizeof(rarg))) != 0) 2174 goto out; 2175 2176 /* read the victim argv */ 2177 iov.iov_base = &varg; 2178 iov.iov_len = sizeof(varg); 2179 uio.uio_iov = &iov; 2180 uio.uio_iovcnt = 1; 2181 uio.uio_offset = (off_t)(vaddr_t)vargv; 2182 uio.uio_resid = sizeof(varg); 2183 uio.uio_segflg = UIO_SYSSPACE; 2184 uio.uio_rw = UIO_READ; 2185 uio.uio_procp = cp; 2186 if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0) 2187 goto out; 2188 2189 if (varg == NULL) 2190 break; 2191 2192 /* 2193 * read the victim arg. We must jump through hoops to avoid 2194 * crossing a page boundary too much and returning an error. 2195 */ 2196 more: 2197 len = PAGE_SIZE - (((vaddr_t)varg) & PAGE_MASK); 2198 /* leave space for the terminating NUL */ 2199 iov.iov_base = buf; 2200 iov.iov_len = len; 2201 uio.uio_iov = &iov; 2202 uio.uio_iovcnt = 1; 2203 uio.uio_offset = (off_t)(vaddr_t)varg; 2204 uio.uio_resid = len; 2205 uio.uio_segflg = UIO_SYSSPACE; 2206 uio.uio_rw = UIO_READ; 2207 uio.uio_procp = cp; 2208 if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0) 2209 goto out; 2210 2211 for (vstrlen = 0; vstrlen < len; vstrlen++) { 2212 if (buf[vstrlen] == '\0') 2213 break; 2214 } 2215 2216 /* Don't overflow readers buffer. */ 2217 if (*oldlenp + vstrlen + 1 >= limit) { 2218 error = ENOMEM; 2219 goto out; 2220 } 2221 2222 if ((error = copyout(buf, rarg, vstrlen)) != 0) 2223 goto out; 2224 2225 *oldlenp += vstrlen; 2226 rarg += vstrlen; 2227 2228 /* The string didn't end in this page? */ 2229 if (vstrlen == len) { 2230 varg += vstrlen; 2231 goto more; 2232 } 2233 2234 /* End of string. Terminate it with a NUL */ 2235 buf[0] = '\0'; 2236 if ((error = copyout(buf, rarg, 1)) != 0) 2237 goto out; 2238 *oldlenp += 1; 2239 rarg += 1; 2240 2241 vargv++; 2242 rargv++; 2243 cnt--; 2244 } 2245 2246 if (*oldlenp >= limit) { 2247 error = ENOMEM; 2248 goto out; 2249 } 2250 2251 /* Write the terminating null */ 2252 rarg = NULL; 2253 error = copyout(&rarg, rargv, sizeof(rarg)); 2254 2255 out: 2256 uvmspace_free(vm); 2257 free(buf, M_TEMP, PAGE_SIZE); 2258 return (error); 2259 } 2260 2261 int 2262 sysctl_proc_cwd(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2263 struct proc *cp) 2264 { 2265 struct process *findpr; 2266 struct vnode *vp; 2267 pid_t pid; 2268 int error; 2269 size_t lenused, len; 2270 char *path, *bp, *bend; 2271 2272 if (namelen > 1) 2273 return (ENOTDIR); 2274 if (namelen < 1) 2275 return (EINVAL); 2276 2277 pid = name[0]; 2278 if ((findpr = prfind(pid)) == NULL) 2279 return (ESRCH); 2280 2281 if (oldp == NULL) { 2282 *oldlenp = MAXPATHLEN * 4; 2283 return (0); 2284 } 2285 2286 /* Either system process or exiting/zombie */ 2287 if (findpr->ps_flags & (PS_SYSTEM | PS_EXITING)) 2288 return (EINVAL); 2289 2290 /* Only owner or root can get cwd */ 2291 if (findpr->ps_ucred->cr_uid != cp->p_ucred->cr_uid && 2292 (error = suser(cp)) != 0) 2293 return (error); 2294 2295 len = *oldlenp; 2296 if (len > MAXPATHLEN * 4) 2297 len = MAXPATHLEN * 4; 2298 else if (len < 2) 2299 return (ERANGE); 2300 *oldlenp = 0; 2301 2302 /* snag a reference to the vnode before we can sleep */ 2303 vp = findpr->ps_fd->fd_cdir; 2304 vref(vp); 2305 2306 path = malloc(len, M_TEMP, M_WAITOK); 2307 2308 bp = &path[len]; 2309 bend = bp; 2310 *(--bp) = '\0'; 2311 2312 /* Same as sys__getcwd */ 2313 error = vfs_getcwd_common(vp, NULL, 2314 &bp, path, len / 2, GETCWD_CHECK_ACCESS, cp); 2315 if (error == 0) { 2316 *oldlenp = lenused = bend - bp; 2317 error = copyout(bp, oldp, lenused); 2318 } 2319 2320 vrele(vp); 2321 free(path, M_TEMP, len); 2322 2323 return (error); 2324 } 2325 2326 int 2327 sysctl_proc_nobroadcastkill(int *name, u_int namelen, void *newp, size_t newlen, 2328 void *oldp, size_t *oldlenp, struct proc *cp) 2329 { 2330 struct process *findpr; 2331 pid_t pid; 2332 int error, flag; 2333 2334 if (namelen > 1) 2335 return (ENOTDIR); 2336 if (namelen < 1) 2337 return (EINVAL); 2338 2339 pid = name[0]; 2340 if ((findpr = prfind(pid)) == NULL) 2341 return (ESRCH); 2342 2343 /* Either system process or exiting/zombie */ 2344 if (findpr->ps_flags & (PS_SYSTEM | PS_EXITING)) 2345 return (EINVAL); 2346 2347 /* Only root can change PS_NOBROADCASTKILL */ 2348 if (newp != NULL && (error = suser(cp)) != 0) 2349 return (error); 2350 2351 /* get the PS_NOBROADCASTKILL flag */ 2352 flag = findpr->ps_flags & PS_NOBROADCASTKILL ? 1 : 0; 2353 2354 error = sysctl_int(oldp, oldlenp, newp, newlen, &flag); 2355 if (error == 0 && newp) { 2356 if (flag) 2357 atomic_setbits_int(&findpr->ps_flags, 2358 PS_NOBROADCASTKILL); 2359 else 2360 atomic_clearbits_int(&findpr->ps_flags, 2361 PS_NOBROADCASTKILL); 2362 } 2363 2364 return (error); 2365 } 2366 2367 /* Arbitrary but reasonable limit for one iteration. */ 2368 #define VMMAP_MAXLEN MAXPHYS 2369 2370 int 2371 sysctl_proc_vmmap(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2372 struct proc *cp) 2373 { 2374 struct process *findpr; 2375 pid_t pid; 2376 int error; 2377 size_t oldlen, len; 2378 struct kinfo_vmentry *kve, *ukve; 2379 u_long *ustart, start; 2380 2381 if (namelen > 1) 2382 return (ENOTDIR); 2383 if (namelen < 1) 2384 return (EINVAL); 2385 2386 /* Provide max buffer length as hint. */ 2387 if (oldp == NULL) { 2388 if (oldlenp == NULL) 2389 return (EINVAL); 2390 else { 2391 *oldlenp = VMMAP_MAXLEN; 2392 return (0); 2393 } 2394 } 2395 2396 pid = name[0]; 2397 if (pid == cp->p_p->ps_pid) { 2398 /* Self process mapping. */ 2399 findpr = cp->p_p; 2400 } else if (pid > 0) { 2401 if ((findpr = prfind(pid)) == NULL) 2402 return (ESRCH); 2403 2404 /* Either system process or exiting/zombie */ 2405 if (findpr->ps_flags & (PS_SYSTEM | PS_EXITING)) 2406 return (EINVAL); 2407 2408 #if 1 2409 /* XXX Allow only root for now */ 2410 if ((error = suser(cp)) != 0) 2411 return (error); 2412 #else 2413 /* Only owner or root can get vmmap */ 2414 if (findpr->ps_ucred->cr_uid != cp->p_ucred->cr_uid && 2415 (error = suser(cp)) != 0) 2416 return (error); 2417 #endif 2418 } else { 2419 /* Only root can get kernel_map */ 2420 if ((error = suser(cp)) != 0) 2421 return (error); 2422 findpr = NULL; 2423 } 2424 2425 /* Check the given size. */ 2426 oldlen = *oldlenp; 2427 if (oldlen == 0 || oldlen % sizeof(*kve) != 0) 2428 return (EINVAL); 2429 2430 /* Deny huge allocation. */ 2431 if (oldlen > VMMAP_MAXLEN) 2432 return (EINVAL); 2433 2434 /* 2435 * Iterate from the given address passed as the first element's 2436 * kve_start via oldp. 2437 */ 2438 ukve = (struct kinfo_vmentry *)oldp; 2439 ustart = &ukve->kve_start; 2440 error = copyin(ustart, &start, sizeof(start)); 2441 if (error != 0) 2442 return (error); 2443 2444 /* Allocate wired memory to not block. */ 2445 kve = malloc(oldlen, M_TEMP, M_WAITOK); 2446 2447 /* Set the base address and read entries. */ 2448 kve[0].kve_start = start; 2449 len = oldlen; 2450 error = fill_vmmap(findpr, kve, &len); 2451 if (error != 0 && error != ENOMEM) 2452 goto done; 2453 if (len == 0) 2454 goto done; 2455 2456 KASSERT(len <= oldlen); 2457 KASSERT((len % sizeof(struct kinfo_vmentry)) == 0); 2458 2459 error = copyout(kve, oldp, len); 2460 2461 done: 2462 *oldlenp = len; 2463 2464 free(kve, M_TEMP, oldlen); 2465 2466 return (error); 2467 } 2468 #endif 2469 2470 /* 2471 * Initialize disknames/diskstats for export by sysctl. If update is set, 2472 * then we simply update the disk statistics information. 2473 */ 2474 int 2475 sysctl_diskinit(int update, struct proc *p) 2476 { 2477 struct diskstats *sdk; 2478 struct disk *dk; 2479 const char *duid; 2480 int error, changed = 0; 2481 2482 KERNEL_ASSERT_LOCKED(); 2483 2484 if ((error = rw_enter(&sysctl_disklock, RW_WRITE|RW_INTR)) != 0) 2485 return error; 2486 2487 /* Run in a loop, disks may change while malloc sleeps. */ 2488 while (disk_change) { 2489 int tlen; 2490 2491 disk_change = 0; 2492 2493 tlen = 0; 2494 TAILQ_FOREACH(dk, &disklist, dk_link) { 2495 if (dk->dk_name) 2496 tlen += strlen(dk->dk_name); 2497 tlen += 18; /* label uid + separators */ 2498 } 2499 tlen++; 2500 2501 /* 2502 * The sysctl_disklock ensures that no other process can 2503 * allocate disknames and diskstats while our malloc sleeps. 2504 */ 2505 free(disknames, M_SYSCTL, disknameslen); 2506 free(diskstats, M_SYSCTL, diskstatslen); 2507 diskstats = NULL; 2508 disknames = NULL; 2509 diskstats = mallocarray(disk_count, sizeof(struct diskstats), 2510 M_SYSCTL, M_WAITOK|M_ZERO); 2511 diskstatslen = disk_count * sizeof(struct diskstats); 2512 disknames = malloc(tlen, M_SYSCTL, M_WAITOK|M_ZERO); 2513 disknameslen = tlen; 2514 disknames[0] = '\0'; 2515 changed = 1; 2516 } 2517 2518 if (changed) { 2519 int l; 2520 2521 l = 0; 2522 sdk = diskstats; 2523 TAILQ_FOREACH(dk, &disklist, dk_link) { 2524 duid = NULL; 2525 if (dk->dk_label && !duid_iszero(dk->dk_label->d_uid)) 2526 duid = duid_format(dk->dk_label->d_uid); 2527 snprintf(disknames + l, disknameslen - l, "%s:%s,", 2528 dk->dk_name ? dk->dk_name : "", 2529 duid ? duid : ""); 2530 l += strlen(disknames + l); 2531 strlcpy(sdk->ds_name, dk->dk_name, 2532 sizeof(sdk->ds_name)); 2533 mtx_enter(&dk->dk_mtx); 2534 sdk->ds_busy = dk->dk_busy; 2535 sdk->ds_rxfer = dk->dk_rxfer; 2536 sdk->ds_wxfer = dk->dk_wxfer; 2537 sdk->ds_seek = dk->dk_seek; 2538 sdk->ds_rbytes = dk->dk_rbytes; 2539 sdk->ds_wbytes = dk->dk_wbytes; 2540 sdk->ds_attachtime = dk->dk_attachtime; 2541 sdk->ds_timestamp = dk->dk_timestamp; 2542 sdk->ds_time = dk->dk_time; 2543 mtx_leave(&dk->dk_mtx); 2544 sdk++; 2545 } 2546 2547 /* Eliminate trailing comma */ 2548 if (l != 0) 2549 disknames[l - 1] = '\0'; 2550 } else if (update) { 2551 /* Just update, number of drives hasn't changed */ 2552 sdk = diskstats; 2553 TAILQ_FOREACH(dk, &disklist, dk_link) { 2554 strlcpy(sdk->ds_name, dk->dk_name, 2555 sizeof(sdk->ds_name)); 2556 mtx_enter(&dk->dk_mtx); 2557 sdk->ds_busy = dk->dk_busy; 2558 sdk->ds_rxfer = dk->dk_rxfer; 2559 sdk->ds_wxfer = dk->dk_wxfer; 2560 sdk->ds_seek = dk->dk_seek; 2561 sdk->ds_rbytes = dk->dk_rbytes; 2562 sdk->ds_wbytes = dk->dk_wbytes; 2563 sdk->ds_attachtime = dk->dk_attachtime; 2564 sdk->ds_timestamp = dk->dk_timestamp; 2565 sdk->ds_time = dk->dk_time; 2566 mtx_leave(&dk->dk_mtx); 2567 sdk++; 2568 } 2569 } 2570 rw_exit_write(&sysctl_disklock); 2571 return 0; 2572 } 2573 2574 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM) 2575 int 2576 sysctl_sysvipc(int *name, u_int namelen, void *where, size_t *sizep) 2577 { 2578 #ifdef SYSVSEM 2579 struct sem_sysctl_info *semsi; 2580 #endif 2581 #ifdef SYSVSHM 2582 struct shm_sysctl_info *shmsi; 2583 #endif 2584 size_t infosize, dssize, tsize, buflen, bufsiz; 2585 int i, nds, error, ret; 2586 void *buf; 2587 2588 if (namelen != 1) 2589 return (EINVAL); 2590 2591 buflen = *sizep; 2592 2593 switch (*name) { 2594 case KERN_SYSVIPC_MSG_INFO: 2595 #ifdef SYSVMSG 2596 return (sysctl_sysvmsg(name, namelen, where, sizep)); 2597 #else 2598 return (EOPNOTSUPP); 2599 #endif 2600 case KERN_SYSVIPC_SEM_INFO: 2601 #ifdef SYSVSEM 2602 infosize = sizeof(semsi->seminfo); 2603 nds = seminfo.semmni; 2604 dssize = sizeof(semsi->semids[0]); 2605 break; 2606 #else 2607 return (EOPNOTSUPP); 2608 #endif 2609 case KERN_SYSVIPC_SHM_INFO: 2610 #ifdef SYSVSHM 2611 infosize = sizeof(shmsi->shminfo); 2612 nds = shminfo.shmmni; 2613 dssize = sizeof(shmsi->shmids[0]); 2614 break; 2615 #else 2616 return (EOPNOTSUPP); 2617 #endif 2618 default: 2619 return (EINVAL); 2620 } 2621 tsize = infosize + (nds * dssize); 2622 2623 /* Return just the total size required. */ 2624 if (where == NULL) { 2625 *sizep = tsize; 2626 return (0); 2627 } 2628 2629 /* Not enough room for even the info struct. */ 2630 if (buflen < infosize) { 2631 *sizep = 0; 2632 return (ENOMEM); 2633 } 2634 bufsiz = min(tsize, buflen); 2635 buf = malloc(bufsiz, M_TEMP, M_WAITOK|M_ZERO); 2636 2637 switch (*name) { 2638 #ifdef SYSVSEM 2639 case KERN_SYSVIPC_SEM_INFO: 2640 semsi = (struct sem_sysctl_info *)buf; 2641 semsi->seminfo = seminfo; 2642 break; 2643 #endif 2644 #ifdef SYSVSHM 2645 case KERN_SYSVIPC_SHM_INFO: 2646 shmsi = (struct shm_sysctl_info *)buf; 2647 shmsi->shminfo = shminfo; 2648 break; 2649 #endif 2650 } 2651 buflen -= infosize; 2652 2653 ret = 0; 2654 if (buflen > 0) { 2655 /* Fill in the IPC data structures. */ 2656 for (i = 0; i < nds; i++) { 2657 if (buflen < dssize) { 2658 ret = ENOMEM; 2659 break; 2660 } 2661 switch (*name) { 2662 #ifdef SYSVSEM 2663 case KERN_SYSVIPC_SEM_INFO: 2664 if (sema[i] != NULL) 2665 memcpy(&semsi->semids[i], sema[i], 2666 dssize); 2667 else 2668 memset(&semsi->semids[i], 0, dssize); 2669 break; 2670 #endif 2671 #ifdef SYSVSHM 2672 case KERN_SYSVIPC_SHM_INFO: 2673 if (shmsegs[i] != NULL) 2674 memcpy(&shmsi->shmids[i], shmsegs[i], 2675 dssize); 2676 else 2677 memset(&shmsi->shmids[i], 0, dssize); 2678 break; 2679 #endif 2680 } 2681 buflen -= dssize; 2682 } 2683 } 2684 *sizep -= buflen; 2685 error = copyout(buf, where, *sizep); 2686 free(buf, M_TEMP, bufsiz); 2687 /* If copyout succeeded, use return code set earlier. */ 2688 return (error ? error : ret); 2689 } 2690 #endif /* SYSVMSG || SYSVSEM || SYSVSHM */ 2691 2692 #ifndef SMALL_KERNEL 2693 2694 int 2695 sysctl_intrcnt(int *name, u_int namelen, void *oldp, size_t *oldlenp) 2696 { 2697 return (evcount_sysctl(name, namelen, oldp, oldlenp, NULL, 0)); 2698 } 2699 2700 2701 int 2702 sysctl_sensors(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2703 void *newp, size_t newlen) 2704 { 2705 struct ksensor *ks; 2706 struct sensor *us; 2707 struct ksensordev *ksd; 2708 struct sensordev *usd; 2709 int dev, numt, ret; 2710 enum sensor_type type; 2711 2712 if (namelen != 1 && namelen != 3) 2713 return (ENOTDIR); 2714 2715 dev = name[0]; 2716 if (namelen == 1) { 2717 ret = sensordev_get(dev, &ksd); 2718 if (ret) 2719 return (ret); 2720 2721 /* Grab a copy, to clear the kernel pointers */ 2722 usd = malloc(sizeof(*usd), M_TEMP, M_WAITOK|M_ZERO); 2723 usd->num = ksd->num; 2724 strlcpy(usd->xname, ksd->xname, sizeof(usd->xname)); 2725 memcpy(usd->maxnumt, ksd->maxnumt, sizeof(usd->maxnumt)); 2726 usd->sensors_count = ksd->sensors_count; 2727 2728 ret = sysctl_rdstruct(oldp, oldlenp, newp, usd, 2729 sizeof(struct sensordev)); 2730 2731 free(usd, M_TEMP, sizeof(*usd)); 2732 return (ret); 2733 } 2734 2735 type = name[1]; 2736 numt = name[2]; 2737 2738 ret = sensor_find(dev, type, numt, &ks); 2739 if (ret) 2740 return (ret); 2741 2742 /* Grab a copy, to clear the kernel pointers */ 2743 us = malloc(sizeof(*us), M_TEMP, M_WAITOK|M_ZERO); 2744 memcpy(us->desc, ks->desc, sizeof(us->desc)); 2745 us->tv = ks->tv; 2746 us->value = ks->value; 2747 us->type = ks->type; 2748 us->status = ks->status; 2749 us->numt = ks->numt; 2750 us->flags = ks->flags; 2751 2752 ret = sysctl_rdstruct(oldp, oldlenp, newp, us, 2753 sizeof(struct sensor)); 2754 free(us, M_TEMP, sizeof(*us)); 2755 return (ret); 2756 } 2757 #endif /* SMALL_KERNEL */ 2758 2759 int 2760 sysctl_cptime2(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2761 void *newp, size_t newlen) 2762 { 2763 CPU_INFO_ITERATOR cii; 2764 struct cpu_info *ci; 2765 int found = 0; 2766 2767 if (namelen != 1) 2768 return (ENOTDIR); 2769 2770 CPU_INFO_FOREACH(cii, ci) { 2771 if (name[0] == CPU_INFO_UNIT(ci)) { 2772 found = 1; 2773 break; 2774 } 2775 } 2776 if (!found) 2777 return (ENOENT); 2778 2779 return (sysctl_rdstruct(oldp, oldlenp, newp, 2780 &ci->ci_schedstate.spc_cp_time, 2781 sizeof(ci->ci_schedstate.spc_cp_time))); 2782 } 2783 2784 #if NAUDIO > 0 2785 int 2786 sysctl_audio(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2787 void *newp, size_t newlen) 2788 { 2789 if (namelen != 1) 2790 return (ENOTDIR); 2791 2792 if (name[0] != KERN_AUDIO_RECORD) 2793 return (ENOENT); 2794 2795 return (sysctl_int(oldp, oldlenp, newp, newlen, &audio_record_enable)); 2796 } 2797 #endif 2798 2799 #if NVIDEO > 0 2800 int 2801 sysctl_video(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2802 void *newp, size_t newlen) 2803 { 2804 if (namelen != 1) 2805 return (ENOTDIR); 2806 2807 if (name[0] != KERN_VIDEO_RECORD) 2808 return (ENOENT); 2809 2810 return (sysctl_int(oldp, oldlenp, newp, newlen, &video_record_enable)); 2811 } 2812 #endif 2813 2814 int 2815 sysctl_cpustats(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2816 void *newp, size_t newlen) 2817 { 2818 CPU_INFO_ITERATOR cii; 2819 struct cpustats cs; 2820 struct cpu_info *ci; 2821 int found = 0; 2822 2823 if (namelen != 1) 2824 return (ENOTDIR); 2825 2826 CPU_INFO_FOREACH(cii, ci) { 2827 if (name[0] == CPU_INFO_UNIT(ci)) { 2828 found = 1; 2829 break; 2830 } 2831 } 2832 if (!found) 2833 return (ENOENT); 2834 2835 memset(&cs, 0, sizeof cs); 2836 memcpy(&cs.cs_time, &ci->ci_schedstate.spc_cp_time, sizeof(cs.cs_time)); 2837 cs.cs_flags = 0; 2838 if (cpu_is_online(ci)) 2839 cs.cs_flags |= CPUSTATS_ONLINE; 2840 2841 return (sysctl_rdstruct(oldp, oldlenp, newp, &cs, sizeof(cs))); 2842 } 2843 2844 int 2845 sysctl_utc_offset(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 2846 { 2847 struct timespec adjusted, now; 2848 int adjustment_seconds, error, new_offset_minutes, old_offset_minutes; 2849 2850 old_offset_minutes = utc_offset / 60; /* seconds -> minutes */ 2851 new_offset_minutes = old_offset_minutes; 2852 error = sysctl_securelevel_int(oldp, oldlenp, newp, newlen, 2853 &new_offset_minutes); 2854 if (error) 2855 return error; 2856 if (new_offset_minutes < -24 * 60 || new_offset_minutes > 24 * 60) 2857 return EINVAL; 2858 if (new_offset_minutes == old_offset_minutes) 2859 return 0; 2860 2861 utc_offset = new_offset_minutes * 60; /* minutes -> seconds */ 2862 adjustment_seconds = (new_offset_minutes - old_offset_minutes) * 60; 2863 2864 nanotime(&now); 2865 adjusted = now; 2866 adjusted.tv_sec -= adjustment_seconds; 2867 tc_setrealtimeclock(&adjusted); 2868 resettodr(); 2869 2870 return 0; 2871 } 2872