1 /* $OpenBSD: kern_sysctl.c,v 1.389 2021/02/08 10:51:02 mpi Exp $ */ 2 /* $NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $ */ 3 4 /*- 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Mike Karels at Berkeley Software Design, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94 36 */ 37 38 /* 39 * sysctl system call. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/pool.h> 47 #include <sys/proc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/signalvar.h> 50 #include <sys/fcntl.h> 51 #include <sys/file.h> 52 #include <sys/filedesc.h> 53 #include <sys/vnode.h> 54 #include <sys/unistd.h> 55 #include <sys/buf.h> 56 #include <sys/ioctl.h> 57 #include <sys/tty.h> 58 #include <sys/disklabel.h> 59 #include <sys/disk.h> 60 #include <sys/sysctl.h> 61 #include <sys/msgbuf.h> 62 #include <sys/vmmeter.h> 63 #include <sys/namei.h> 64 #include <sys/exec.h> 65 #include <sys/mbuf.h> 66 #include <sys/percpu.h> 67 #include <sys/sensors.h> 68 #include <sys/pipe.h> 69 #include <sys/eventvar.h> 70 #include <sys/socketvar.h> 71 #include <sys/socket.h> 72 #include <sys/domain.h> 73 #include <sys/protosw.h> 74 #include <sys/pledge.h> 75 #include <sys/timetc.h> 76 #include <sys/evcount.h> 77 #include <sys/un.h> 78 #include <sys/unpcb.h> 79 #include <sys/sched.h> 80 #include <sys/mount.h> 81 #include <sys/syscallargs.h> 82 #include <sys/wait.h> 83 #include <sys/witness.h> 84 85 #include <uvm/uvm_extern.h> 86 87 #include <dev/cons.h> 88 89 #include <net/route.h> 90 #include <netinet/in.h> 91 #include <netinet/ip.h> 92 #include <netinet/ip_var.h> 93 #include <netinet/in_pcb.h> 94 #include <netinet/ip6.h> 95 #include <netinet/tcp.h> 96 #include <netinet/tcp_timer.h> 97 #include <netinet/tcp_var.h> 98 #include <netinet/udp.h> 99 #include <netinet/udp_var.h> 100 #include <netinet6/ip6_var.h> 101 102 #ifdef DDB 103 #include <ddb/db_var.h> 104 #endif 105 106 #ifdef SYSVMSG 107 #include <sys/msg.h> 108 #endif 109 #ifdef SYSVSEM 110 #include <sys/sem.h> 111 #endif 112 #ifdef SYSVSHM 113 #include <sys/shm.h> 114 #endif 115 116 #include "audio.h" 117 #include "video.h" 118 #include "pf.h" 119 120 extern struct forkstat forkstat; 121 extern struct nchstats nchstats; 122 extern int nselcoll, fscale; 123 extern struct disklist_head disklist; 124 extern fixpt_t ccpu; 125 extern long numvnodes; 126 #if NAUDIO > 0 127 extern int audio_record_enable; 128 #endif 129 #if NVIDEO > 0 130 extern int video_record_enable; 131 #endif 132 133 int allowkmem; 134 int allowdt; 135 136 int sysctl_diskinit(int, struct proc *); 137 int sysctl_proc_args(int *, u_int, void *, size_t *, struct proc *); 138 int sysctl_proc_cwd(int *, u_int, void *, size_t *, struct proc *); 139 int sysctl_proc_nobroadcastkill(int *, u_int, void *, size_t, void *, size_t *, 140 struct proc *); 141 int sysctl_proc_vmmap(int *, u_int, void *, size_t *, struct proc *); 142 int sysctl_intrcnt(int *, u_int, void *, size_t *); 143 int sysctl_sensors(int *, u_int, void *, size_t *, void *, size_t); 144 int sysctl_cptime2(int *, u_int, void *, size_t *, void *, size_t); 145 #if NAUDIO > 0 146 int sysctl_audio(int *, u_int, void *, size_t *, void *, size_t); 147 #endif 148 #if NVIDEO > 0 149 int sysctl_video(int *, u_int, void *, size_t *, void *, size_t); 150 #endif 151 int sysctl_cpustats(int *, u_int, void *, size_t *, void *, size_t); 152 int sysctl_utc_offset(void *, size_t *, void *, size_t); 153 154 void fill_file(struct kinfo_file *, struct file *, struct filedesc *, int, 155 struct vnode *, struct process *, struct proc *, struct socket *, int); 156 void fill_kproc(struct process *, struct kinfo_proc *, struct proc *, int); 157 158 int (*cpu_cpuspeed)(int *); 159 160 /* 161 * Lock to avoid too many processes vslocking a large amount of memory 162 * at the same time. 163 */ 164 struct rwlock sysctl_lock = RWLOCK_INITIALIZER("sysctllk"); 165 struct rwlock sysctl_disklock = RWLOCK_INITIALIZER("sysctldlk"); 166 167 int 168 sys_sysctl(struct proc *p, void *v, register_t *retval) 169 { 170 struct sys_sysctl_args /* { 171 syscallarg(const int *) name; 172 syscallarg(u_int) namelen; 173 syscallarg(void *) old; 174 syscallarg(size_t *) oldlenp; 175 syscallarg(void *) new; 176 syscallarg(size_t) newlen; 177 } */ *uap = v; 178 int error, dolock = 1; 179 size_t savelen = 0, oldlen = 0; 180 sysctlfn *fn; 181 int name[CTL_MAXNAME]; 182 183 if (SCARG(uap, new) != NULL && 184 (error = suser(p))) 185 return (error); 186 /* 187 * all top-level sysctl names are non-terminal 188 */ 189 if (SCARG(uap, namelen) > CTL_MAXNAME || SCARG(uap, namelen) < 2) 190 return (EINVAL); 191 error = copyin(SCARG(uap, name), name, 192 SCARG(uap, namelen) * sizeof(int)); 193 if (error) 194 return (error); 195 196 error = pledge_sysctl(p, SCARG(uap, namelen), 197 name, SCARG(uap, new)); 198 if (error) 199 return (error); 200 201 switch (name[0]) { 202 case CTL_KERN: 203 fn = kern_sysctl; 204 break; 205 case CTL_HW: 206 fn = hw_sysctl; 207 break; 208 case CTL_VM: 209 fn = uvm_sysctl; 210 break; 211 case CTL_NET: 212 fn = net_sysctl; 213 break; 214 case CTL_FS: 215 fn = fs_sysctl; 216 break; 217 case CTL_VFS: 218 fn = vfs_sysctl; 219 break; 220 case CTL_MACHDEP: 221 fn = cpu_sysctl; 222 break; 223 #ifdef DEBUG_SYSCTL 224 case CTL_DEBUG: 225 fn = debug_sysctl; 226 break; 227 #endif 228 #ifdef DDB 229 case CTL_DDB: 230 fn = ddb_sysctl; 231 break; 232 #endif 233 default: 234 return (EOPNOTSUPP); 235 } 236 237 if (SCARG(uap, oldlenp) && 238 (error = copyin(SCARG(uap, oldlenp), &oldlen, sizeof(oldlen)))) 239 return (error); 240 if (SCARG(uap, old) != NULL) { 241 if ((error = rw_enter(&sysctl_lock, RW_WRITE|RW_INTR)) != 0) 242 return (error); 243 if (dolock) { 244 if (atop(oldlen) > uvmexp.wiredmax - uvmexp.wired) { 245 rw_exit_write(&sysctl_lock); 246 return (ENOMEM); 247 } 248 error = uvm_vslock(p, SCARG(uap, old), oldlen, 249 PROT_READ | PROT_WRITE); 250 if (error) { 251 rw_exit_write(&sysctl_lock); 252 return (error); 253 } 254 } 255 savelen = oldlen; 256 } 257 error = (*fn)(&name[1], SCARG(uap, namelen) - 1, SCARG(uap, old), 258 &oldlen, SCARG(uap, new), SCARG(uap, newlen), p); 259 if (SCARG(uap, old) != NULL) { 260 if (dolock) 261 uvm_vsunlock(p, SCARG(uap, old), savelen); 262 rw_exit_write(&sysctl_lock); 263 } 264 if (error) 265 return (error); 266 if (SCARG(uap, oldlenp)) 267 error = copyout(&oldlen, SCARG(uap, oldlenp), sizeof(oldlen)); 268 return (error); 269 } 270 271 /* 272 * Attributes stored in the kernel. 273 */ 274 char hostname[MAXHOSTNAMELEN]; 275 int hostnamelen; 276 char domainname[MAXHOSTNAMELEN]; 277 int domainnamelen; 278 long hostid; 279 char *disknames = NULL; 280 size_t disknameslen; 281 struct diskstats *diskstats = NULL; 282 size_t diskstatslen; 283 int securelevel; 284 285 /* morally const values reported by sysctl_bounded_arr */ 286 static int arg_max = ARG_MAX; 287 static int openbsd = OpenBSD; 288 static int posix_version = _POSIX_VERSION; 289 static int ngroups_max = NGROUPS_MAX; 290 static int int_zero = 0; 291 static int int_one = 1; 292 static int maxpartitions = MAXPARTITIONS; 293 static int raw_part = RAW_PART; 294 295 extern int somaxconn, sominconn; 296 extern int nosuidcoredump; 297 extern int maxlocksperuid; 298 extern int uvm_wxabort; 299 extern int global_ptrace; 300 301 const struct sysctl_bounded_args kern_vars[] = { 302 {KERN_OSREV, &openbsd, 1, 0}, 303 {KERN_MAXVNODES, &maxvnodes, 0, INT_MAX}, 304 {KERN_MAXPROC, &maxprocess, 0, INT_MAX}, 305 {KERN_MAXFILES, &maxfiles, 0, INT_MAX}, 306 {KERN_NFILES, &numfiles, 1, 0}, 307 {KERN_TTYCOUNT, &tty_count, 1, 0}, 308 {KERN_ARGMAX, &arg_max, 1, 0}, 309 {KERN_NSELCOLL, &nselcoll, 1, 0}, 310 {KERN_POSIX1, &posix_version, 1, 0}, 311 {KERN_NGROUPS, &ngroups_max, 1, 0}, 312 {KERN_JOB_CONTROL, &int_one, 1, 0}, 313 {KERN_SAVED_IDS, &int_one, 1, 0}, 314 {KERN_MAXPARTITIONS, &maxpartitions, 1, 0}, 315 {KERN_RAWPARTITION, &raw_part, 1, 0}, 316 {KERN_MAXTHREAD, &maxthread, 0, INT_MAX}, 317 {KERN_NTHREADS, &nthreads, 1, 0}, 318 {KERN_SOMAXCONN, &somaxconn, 0, SHRT_MAX}, 319 {KERN_SOMINCONN, &sominconn, 0, SHRT_MAX}, 320 {KERN_NOSUIDCOREDUMP, &nosuidcoredump, 0, 3}, 321 {KERN_FSYNC, &int_one, 1, 0}, 322 {KERN_SYSVMSG, 323 #ifdef SYSVMSG 324 &int_one, 325 #else 326 &int_zero, 327 #endif 328 1, 0}, 329 {KERN_SYSVSEM, 330 #ifdef SYSVSEM 331 &int_one, 332 #else 333 &int_zero, 334 #endif 335 1, 0}, 336 {KERN_SYSVSHM, 337 #ifdef SYSVSHM 338 &int_one, 339 #else 340 &int_zero, 341 #endif 342 1, 0}, 343 {KERN_FSCALE, &fscale, 1, 0}, 344 {KERN_CCPU, &ccpu, 1, 0}, 345 {KERN_NPROCS, &nprocesses, 1, 0}, 346 {KERN_SPLASSERT, &splassert_ctl, 0, 3}, 347 {KERN_MAXLOCKSPERUID, &maxlocksperuid, 0, INT_MAX}, 348 {KERN_WXABORT, &uvm_wxabort, 0, 1}, 349 {KERN_NETLIVELOCKS, &int_zero, 1, 0}, 350 #ifdef PTRACE 351 {KERN_GLOBAL_PTRACE, &global_ptrace, 0, 1}, 352 #endif 353 }; 354 355 int 356 kern_sysctl_dirs(int top_name, int *name, u_int namelen, 357 void *oldp, size_t *oldlenp, void *newp, size_t newlen, struct proc *p) 358 { 359 switch (top_name) { 360 #ifndef SMALL_KERNEL 361 case KERN_PROC: 362 return (sysctl_doproc(name, namelen, oldp, oldlenp)); 363 case KERN_PROC_ARGS: 364 return (sysctl_proc_args(name, namelen, oldp, oldlenp, p)); 365 case KERN_PROC_CWD: 366 return (sysctl_proc_cwd(name, namelen, oldp, oldlenp, p)); 367 case KERN_PROC_NOBROADCASTKILL: 368 return (sysctl_proc_nobroadcastkill(name, namelen, 369 newp, newlen, oldp, oldlenp, p)); 370 case KERN_PROC_VMMAP: 371 return (sysctl_proc_vmmap(name, namelen, oldp, oldlenp, p)); 372 case KERN_FILE: 373 return (sysctl_file(name, namelen, oldp, oldlenp, p)); 374 #endif 375 #if defined(GPROF) || defined(DDBPROF) 376 case KERN_PROF: 377 return (sysctl_doprof(name, namelen, oldp, oldlenp, 378 newp, newlen)); 379 #endif 380 case KERN_MALLOCSTATS: 381 return (sysctl_malloc(name, namelen, oldp, oldlenp, 382 newp, newlen, p)); 383 case KERN_TTY: 384 return (sysctl_tty(name, namelen, oldp, oldlenp, 385 newp, newlen)); 386 case KERN_POOL: 387 return (sysctl_dopool(name, namelen, oldp, oldlenp)); 388 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM) 389 case KERN_SYSVIPC_INFO: 390 return (sysctl_sysvipc(name, namelen, oldp, oldlenp)); 391 #endif 392 #ifdef SYSVSEM 393 case KERN_SEMINFO: 394 return (sysctl_sysvsem(name, namelen, oldp, oldlenp, 395 newp, newlen)); 396 #endif 397 #ifdef SYSVSHM 398 case KERN_SHMINFO: 399 return (sysctl_sysvshm(name, namelen, oldp, oldlenp, 400 newp, newlen)); 401 #endif 402 #ifndef SMALL_KERNEL 403 case KERN_INTRCNT: 404 return (sysctl_intrcnt(name, namelen, oldp, oldlenp)); 405 case KERN_WATCHDOG: 406 return (sysctl_wdog(name, namelen, oldp, oldlenp, 407 newp, newlen)); 408 #endif 409 #ifndef SMALL_KERNEL 410 case KERN_EVCOUNT: 411 return (evcount_sysctl(name, namelen, oldp, oldlenp, 412 newp, newlen)); 413 #endif 414 case KERN_TIMECOUNTER: 415 return (sysctl_tc(name, namelen, oldp, oldlenp, newp, newlen)); 416 case KERN_CPTIME2: 417 return (sysctl_cptime2(name, namelen, oldp, oldlenp, 418 newp, newlen)); 419 #ifdef WITNESS 420 case KERN_WITNESSWATCH: 421 return witness_sysctl_watch(oldp, oldlenp, newp, newlen); 422 case KERN_WITNESS: 423 return witness_sysctl(name, namelen, oldp, oldlenp, 424 newp, newlen); 425 #endif 426 #if NAUDIO > 0 427 case KERN_AUDIO: 428 return (sysctl_audio(name, namelen, oldp, oldlenp, 429 newp, newlen)); 430 #endif 431 #if NVIDEO > 0 432 case KERN_VIDEO: 433 return (sysctl_video(name, namelen, oldp, oldlenp, 434 newp, newlen)); 435 #endif 436 case KERN_CPUSTATS: 437 return (sysctl_cpustats(name, namelen, oldp, oldlenp, 438 newp, newlen)); 439 default: 440 return (ENOTDIR); /* overloaded */ 441 } 442 } 443 444 /* 445 * kernel related system variables. 446 */ 447 int 448 kern_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 449 size_t newlen, struct proc *p) 450 { 451 int error, level, inthostid, stackgap; 452 dev_t dev; 453 extern int pool_debug; 454 455 /* dispatch the non-terminal nodes first */ 456 if (namelen != 1) { 457 return kern_sysctl_dirs(name[0], name + 1, namelen - 1, 458 oldp, oldlenp, newp, newlen, p); 459 } 460 461 switch (name[0]) { 462 case KERN_OSTYPE: 463 return (sysctl_rdstring(oldp, oldlenp, newp, ostype)); 464 case KERN_OSRELEASE: 465 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease)); 466 case KERN_OSVERSION: 467 return (sysctl_rdstring(oldp, oldlenp, newp, osversion)); 468 case KERN_VERSION: 469 return (sysctl_rdstring(oldp, oldlenp, newp, version)); 470 case KERN_NUMVNODES: /* XXX numvnodes is a long */ 471 return (sysctl_rdint(oldp, oldlenp, newp, numvnodes)); 472 case KERN_SECURELVL: 473 level = securelevel; 474 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) || 475 newp == NULL) 476 return (error); 477 if ((securelevel > 0 || level < -1) && 478 level < securelevel && p->p_p->ps_pid != 1) 479 return (EPERM); 480 securelevel = level; 481 return (0); 482 case KERN_ALLOWDT: 483 if (securelevel > 0) 484 return (sysctl_rdint(oldp, oldlenp, newp, allowdt)); 485 return (sysctl_int(oldp, oldlenp, newp, newlen, &allowdt)); 486 case KERN_ALLOWKMEM: 487 if (securelevel > 0) 488 return (sysctl_rdint(oldp, oldlenp, newp, allowkmem)); 489 return (sysctl_int(oldp, oldlenp, newp, newlen, &allowkmem)); 490 case KERN_HOSTNAME: 491 error = sysctl_tstring(oldp, oldlenp, newp, newlen, 492 hostname, sizeof(hostname)); 493 if (newp && !error) 494 hostnamelen = newlen; 495 return (error); 496 case KERN_DOMAINNAME: 497 error = sysctl_tstring(oldp, oldlenp, newp, newlen, 498 domainname, sizeof(domainname)); 499 if (newp && !error) 500 domainnamelen = newlen; 501 return (error); 502 case KERN_HOSTID: 503 inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */ 504 error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid); 505 hostid = inthostid; 506 return (error); 507 case KERN_CLOCKRATE: 508 return (sysctl_clockrate(oldp, oldlenp, newp)); 509 case KERN_BOOTTIME: { 510 struct timeval bt; 511 memset(&bt, 0, sizeof bt); 512 microboottime(&bt); 513 return (sysctl_rdstruct(oldp, oldlenp, newp, &bt, sizeof bt)); 514 } 515 case KERN_MBSTAT: { 516 extern struct cpumem *mbstat; 517 uint64_t counters[MBSTAT_COUNT]; 518 struct mbstat mbs; 519 unsigned int i; 520 521 memset(&mbs, 0, sizeof(mbs)); 522 counters_read(mbstat, counters, MBSTAT_COUNT); 523 for (i = 0; i < MBSTAT_TYPES; i++) 524 mbs.m_mtypes[i] = counters[i]; 525 526 mbs.m_drops = counters[MBSTAT_DROPS]; 527 mbs.m_wait = counters[MBSTAT_WAIT]; 528 mbs.m_drain = counters[MBSTAT_DRAIN]; 529 530 return (sysctl_rdstruct(oldp, oldlenp, newp, 531 &mbs, sizeof(mbs))); 532 } 533 case KERN_MSGBUFSIZE: 534 case KERN_CONSBUFSIZE: { 535 struct msgbuf *mp; 536 mp = (name[0] == KERN_MSGBUFSIZE) ? msgbufp : consbufp; 537 /* 538 * deal with cases where the message buffer has 539 * become corrupted. 540 */ 541 if (!mp || mp->msg_magic != MSG_MAGIC) 542 return (ENXIO); 543 return (sysctl_rdint(oldp, oldlenp, newp, mp->msg_bufs)); 544 } 545 case KERN_CONSBUF: 546 if ((error = suser(p))) 547 return (error); 548 /* FALLTHROUGH */ 549 case KERN_MSGBUF: { 550 struct msgbuf *mp; 551 mp = (name[0] == KERN_MSGBUF) ? msgbufp : consbufp; 552 /* see note above */ 553 if (!mp || mp->msg_magic != MSG_MAGIC) 554 return (ENXIO); 555 return (sysctl_rdstruct(oldp, oldlenp, newp, mp, 556 mp->msg_bufs + offsetof(struct msgbuf, msg_bufc))); 557 } 558 case KERN_CPTIME: 559 { 560 CPU_INFO_ITERATOR cii; 561 struct cpu_info *ci; 562 long cp_time[CPUSTATES]; 563 int i, n = 0; 564 565 memset(cp_time, 0, sizeof(cp_time)); 566 567 CPU_INFO_FOREACH(cii, ci) { 568 if (!cpu_is_online(ci)) 569 continue; 570 n++; 571 for (i = 0; i < CPUSTATES; i++) 572 cp_time[i] += ci->ci_schedstate.spc_cp_time[i]; 573 } 574 575 for (i = 0; i < CPUSTATES; i++) 576 cp_time[i] /= n; 577 578 return (sysctl_rdstruct(oldp, oldlenp, newp, &cp_time, 579 sizeof(cp_time))); 580 } 581 case KERN_NCHSTATS: 582 return (sysctl_rdstruct(oldp, oldlenp, newp, &nchstats, 583 sizeof(struct nchstats))); 584 case KERN_FORKSTAT: 585 return (sysctl_rdstruct(oldp, oldlenp, newp, &forkstat, 586 sizeof(struct forkstat))); 587 case KERN_STACKGAPRANDOM: 588 stackgap = stackgap_random; 589 error = sysctl_int(oldp, oldlenp, newp, newlen, &stackgap); 590 if (error) 591 return (error); 592 /* 593 * Safety harness. 594 */ 595 if ((stackgap < ALIGNBYTES && stackgap != 0) || 596 !powerof2(stackgap) || stackgap >= MAXSSIZ) 597 return (EINVAL); 598 stackgap_random = stackgap; 599 return (0); 600 case KERN_MAXCLUSTERS: { 601 int val = nmbclust; 602 error = sysctl_int(oldp, oldlenp, newp, newlen, &val); 603 if (error == 0 && val != nmbclust) 604 error = nmbclust_update(val); 605 return (error); 606 } 607 case KERN_CACHEPCT: { 608 u_int64_t dmapages; 609 int opct, pgs; 610 opct = bufcachepercent; 611 error = sysctl_int(oldp, oldlenp, newp, newlen, 612 &bufcachepercent); 613 if (error) 614 return(error); 615 if (bufcachepercent > 90 || bufcachepercent < 5) { 616 bufcachepercent = opct; 617 return (EINVAL); 618 } 619 dmapages = uvm_pagecount(&dma_constraint); 620 if (bufcachepercent != opct) { 621 pgs = bufcachepercent * dmapages / 100; 622 bufadjust(pgs); /* adjust bufpages */ 623 bufhighpages = bufpages; /* set high water mark */ 624 } 625 return(0); 626 } 627 case KERN_CONSDEV: 628 if (cn_tab != NULL) 629 dev = cn_tab->cn_dev; 630 else 631 dev = NODEV; 632 return sysctl_rdstruct(oldp, oldlenp, newp, &dev, sizeof(dev)); 633 case KERN_POOL_DEBUG: { 634 int old_pool_debug = pool_debug; 635 636 error = sysctl_int(oldp, oldlenp, newp, newlen, 637 &pool_debug); 638 if (error == 0 && pool_debug != old_pool_debug) 639 pool_reclaim_all(); 640 return (error); 641 } 642 #if NPF > 0 643 case KERN_PFSTATUS: 644 return (pf_sysctl(oldp, oldlenp, newp, newlen)); 645 #endif 646 case KERN_TIMEOUT_STATS: 647 return (timeout_sysctl(oldp, oldlenp, newp, newlen)); 648 case KERN_UTC_OFFSET: 649 return (sysctl_utc_offset(oldp, oldlenp, newp, newlen)); 650 default: 651 return (sysctl_bounded_arr(kern_vars, nitems(kern_vars), name, 652 namelen, oldp, oldlenp, newp, newlen)); 653 } 654 /* NOTREACHED */ 655 } 656 657 /* 658 * hardware related system variables. 659 */ 660 char *hw_vendor, *hw_prod, *hw_uuid, *hw_serial, *hw_ver; 661 int allowpowerdown = 1; 662 663 /* morally const values reported by sysctl_bounded_arr */ 664 static int byte_order = BYTE_ORDER; 665 static int page_size = PAGE_SIZE; 666 667 const struct sysctl_bounded_args hw_vars[] = { 668 {HW_NCPU, &ncpus, 1, 0}, 669 {HW_NCPUFOUND, &ncpusfound, 1, 0}, 670 {HW_BYTEORDER, &byte_order, 1, 0}, 671 {HW_PAGESIZE, &page_size, 1, 0}, 672 {HW_DISKCOUNT, &disk_count, 1, 0}, 673 }; 674 675 int 676 hw_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 677 size_t newlen, struct proc *p) 678 { 679 extern char machine[], cpu_model[]; 680 int err, cpuspeed; 681 682 /* all sysctl names at this level except sensors are terminal */ 683 if (name[0] != HW_SENSORS && namelen != 1) 684 return (ENOTDIR); /* overloaded */ 685 686 switch (name[0]) { 687 case HW_MACHINE: 688 return (sysctl_rdstring(oldp, oldlenp, newp, machine)); 689 case HW_MODEL: 690 return (sysctl_rdstring(oldp, oldlenp, newp, cpu_model)); 691 case HW_NCPUONLINE: 692 return (sysctl_rdint(oldp, oldlenp, newp, 693 sysctl_hwncpuonline())); 694 case HW_PHYSMEM: 695 return (sysctl_rdint(oldp, oldlenp, newp, ptoa(physmem))); 696 case HW_USERMEM: 697 return (sysctl_rdint(oldp, oldlenp, newp, 698 ptoa(physmem - uvmexp.wired))); 699 case HW_DISKNAMES: 700 err = sysctl_diskinit(0, p); 701 if (err) 702 return err; 703 if (disknames) 704 return (sysctl_rdstring(oldp, oldlenp, newp, 705 disknames)); 706 else 707 return (sysctl_rdstring(oldp, oldlenp, newp, "")); 708 case HW_DISKSTATS: 709 err = sysctl_diskinit(1, p); 710 if (err) 711 return err; 712 return (sysctl_rdstruct(oldp, oldlenp, newp, diskstats, 713 disk_count * sizeof(struct diskstats))); 714 case HW_CPUSPEED: 715 if (!cpu_cpuspeed) 716 return (EOPNOTSUPP); 717 err = cpu_cpuspeed(&cpuspeed); 718 if (err) 719 return err; 720 return (sysctl_rdint(oldp, oldlenp, newp, cpuspeed)); 721 #ifndef SMALL_KERNEL 722 case HW_SENSORS: 723 return (sysctl_sensors(name + 1, namelen - 1, oldp, oldlenp, 724 newp, newlen)); 725 case HW_SETPERF: 726 return (sysctl_hwsetperf(oldp, oldlenp, newp, newlen)); 727 case HW_PERFPOLICY: 728 return (sysctl_hwperfpolicy(oldp, oldlenp, newp, newlen)); 729 #endif /* !SMALL_KERNEL */ 730 case HW_VENDOR: 731 if (hw_vendor) 732 return (sysctl_rdstring(oldp, oldlenp, newp, 733 hw_vendor)); 734 else 735 return (EOPNOTSUPP); 736 case HW_PRODUCT: 737 if (hw_prod) 738 return (sysctl_rdstring(oldp, oldlenp, newp, hw_prod)); 739 else 740 return (EOPNOTSUPP); 741 case HW_VERSION: 742 if (hw_ver) 743 return (sysctl_rdstring(oldp, oldlenp, newp, hw_ver)); 744 else 745 return (EOPNOTSUPP); 746 case HW_SERIALNO: 747 if (hw_serial) 748 return (sysctl_rdstring(oldp, oldlenp, newp, 749 hw_serial)); 750 else 751 return (EOPNOTSUPP); 752 case HW_UUID: 753 if (hw_uuid) 754 return (sysctl_rdstring(oldp, oldlenp, newp, hw_uuid)); 755 else 756 return (EOPNOTSUPP); 757 case HW_PHYSMEM64: 758 return (sysctl_rdquad(oldp, oldlenp, newp, 759 ptoa((psize_t)physmem))); 760 case HW_USERMEM64: 761 return (sysctl_rdquad(oldp, oldlenp, newp, 762 ptoa((psize_t)physmem - uvmexp.wired))); 763 case HW_ALLOWPOWERDOWN: 764 if (securelevel > 0) 765 return (sysctl_rdint(oldp, oldlenp, newp, 766 allowpowerdown)); 767 return (sysctl_int(oldp, oldlenp, newp, newlen, 768 &allowpowerdown)); 769 #ifdef __HAVE_CPU_TOPOLOGY 770 case HW_SMT: 771 return (sysctl_hwsmt(oldp, oldlenp, newp, newlen)); 772 #endif 773 default: 774 return sysctl_bounded_arr(hw_vars, nitems(hw_vars), name, 775 namelen, oldp, oldlenp, newp, newlen); 776 } 777 /* NOTREACHED */ 778 } 779 780 #ifdef DEBUG_SYSCTL 781 /* 782 * Debugging related system variables. 783 */ 784 extern struct ctldebug debug_vfs_busyprt; 785 struct ctldebug debug1, debug2, debug3, debug4; 786 struct ctldebug debug5, debug6, debug7, debug8, debug9; 787 struct ctldebug debug10, debug11, debug12, debug13, debug14; 788 struct ctldebug debug15, debug16, debug17, debug18, debug19; 789 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = { 790 &debug_vfs_busyprt, 791 &debug1, &debug2, &debug3, &debug4, 792 &debug5, &debug6, &debug7, &debug8, &debug9, 793 &debug10, &debug11, &debug12, &debug13, &debug14, 794 &debug15, &debug16, &debug17, &debug18, &debug19, 795 }; 796 int 797 debug_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 798 size_t newlen, struct proc *p) 799 { 800 struct ctldebug *cdp; 801 802 /* all sysctl names at this level are name and field */ 803 if (namelen != 2) 804 return (ENOTDIR); /* overloaded */ 805 if (name[0] < 0 || name[0] >= nitems(debugvars)) 806 return (EOPNOTSUPP); 807 cdp = debugvars[name[0]]; 808 if (cdp->debugname == 0) 809 return (EOPNOTSUPP); 810 switch (name[1]) { 811 case CTL_DEBUG_NAME: 812 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname)); 813 case CTL_DEBUG_VALUE: 814 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar)); 815 default: 816 return (EOPNOTSUPP); 817 } 818 /* NOTREACHED */ 819 } 820 #endif /* DEBUG_SYSCTL */ 821 822 /* 823 * Reads, or writes that lower the value 824 */ 825 int 826 sysctl_int_lower(void *oldp, size_t *oldlenp, void *newp, size_t newlen, int *valp) 827 { 828 unsigned int oval = *valp, val = *valp; 829 int error; 830 831 if (newp == NULL) 832 return (sysctl_rdint(oldp, oldlenp, newp, *valp)); 833 834 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val))) 835 return (error); 836 if (val > oval) 837 return (EPERM); /* do not allow raising */ 838 *(unsigned int *)valp = val; 839 return (0); 840 } 841 842 /* 843 * Validate parameters and get old / set new parameters 844 * for an integer-valued sysctl function. 845 */ 846 int 847 sysctl_int(void *oldp, size_t *oldlenp, void *newp, size_t newlen, int *valp) 848 { 849 return (sysctl_int_bounded(oldp, oldlenp, newp, newlen, valp, 0, 0)); 850 } 851 852 int 853 sysctl_int_bounded(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 854 int *valp, int minimum, int maximum) 855 { 856 int error = 0; 857 int val; 858 859 if (oldp && *oldlenp < sizeof(int)) 860 return (ENOMEM); 861 if (newp && newlen != sizeof(int)) 862 return (EINVAL); 863 *oldlenp = sizeof(int); 864 val = *valp; 865 if (oldp) 866 error = copyout(&val, oldp, sizeof(int)); 867 if (error == 0 && newp) 868 error = copyin(newp, &val, sizeof(int)); 869 if (error) 870 return (error); 871 if (minimum == maximum || (minimum <= val && val <= maximum)) 872 *valp = val; 873 else 874 error = EINVAL; 875 return (error); 876 } 877 878 /* 879 * As above, but read-only. 880 */ 881 int 882 sysctl_rdint(void *oldp, size_t *oldlenp, void *newp, int val) 883 { 884 int error = 0; 885 886 if (oldp && *oldlenp < sizeof(int)) 887 return (ENOMEM); 888 if (newp) 889 return (EPERM); 890 *oldlenp = sizeof(int); 891 if (oldp) 892 error = copyout((caddr_t)&val, oldp, sizeof(int)); 893 return (error); 894 } 895 896 /* 897 * Array of bounded integer values. 898 */ 899 int 900 sysctl_bounded_arr(const struct sysctl_bounded_args *valpp, u_int valplen, 901 int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 902 size_t newlen) 903 { 904 u_int i; 905 if (namelen != 1) 906 return (ENOTDIR); 907 for (i = 0; i < valplen; ++i) { 908 if (valpp[i].mib == name[0]) { 909 if (valpp[i].minimum <= valpp[i].maximum) { 910 return (sysctl_int_bounded(oldp, oldlenp, newp, 911 newlen, valpp[i].var, valpp[i].minimum, 912 valpp[i].maximum)); 913 } else { 914 return (sysctl_rdint(oldp, oldlenp, newp, 915 *valpp[i].var)); 916 } 917 } 918 } 919 return (EOPNOTSUPP); 920 } 921 922 /* 923 * Validate parameters and get old / set new parameters 924 * for an integer-valued sysctl function. 925 */ 926 int 927 sysctl_quad(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 928 int64_t *valp) 929 { 930 int error = 0; 931 932 if (oldp && *oldlenp < sizeof(int64_t)) 933 return (ENOMEM); 934 if (newp && newlen != sizeof(int64_t)) 935 return (EINVAL); 936 *oldlenp = sizeof(int64_t); 937 if (oldp) 938 error = copyout(valp, oldp, sizeof(int64_t)); 939 if (error == 0 && newp) 940 error = copyin(newp, valp, sizeof(int64_t)); 941 return (error); 942 } 943 944 /* 945 * As above, but read-only. 946 */ 947 int 948 sysctl_rdquad(void *oldp, size_t *oldlenp, void *newp, int64_t val) 949 { 950 int error = 0; 951 952 if (oldp && *oldlenp < sizeof(int64_t)) 953 return (ENOMEM); 954 if (newp) 955 return (EPERM); 956 *oldlenp = sizeof(int64_t); 957 if (oldp) 958 error = copyout((caddr_t)&val, oldp, sizeof(int64_t)); 959 return (error); 960 } 961 962 /* 963 * Validate parameters and get old / set new parameters 964 * for a string-valued sysctl function. 965 */ 966 int 967 sysctl_string(void *oldp, size_t *oldlenp, void *newp, size_t newlen, char *str, 968 size_t maxlen) 969 { 970 return sysctl__string(oldp, oldlenp, newp, newlen, str, maxlen, 0); 971 } 972 973 int 974 sysctl_tstring(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 975 char *str, size_t maxlen) 976 { 977 return sysctl__string(oldp, oldlenp, newp, newlen, str, maxlen, 1); 978 } 979 980 int 981 sysctl__string(void *oldp, size_t *oldlenp, void *newp, size_t newlen, 982 char *str, size_t maxlen, int trunc) 983 { 984 size_t len; 985 int error = 0; 986 987 len = strlen(str) + 1; 988 if (oldp && *oldlenp < len) { 989 if (trunc == 0 || *oldlenp == 0) 990 return (ENOMEM); 991 } 992 if (newp && newlen >= maxlen) 993 return (EINVAL); 994 if (oldp) { 995 if (trunc && *oldlenp < len) { 996 len = *oldlenp; 997 error = copyout(str, oldp, len - 1); 998 if (error == 0) 999 error = copyout("", (char *)oldp + len - 1, 1); 1000 } else { 1001 error = copyout(str, oldp, len); 1002 } 1003 } 1004 *oldlenp = len; 1005 if (error == 0 && newp) { 1006 error = copyin(newp, str, newlen); 1007 str[newlen] = 0; 1008 } 1009 return (error); 1010 } 1011 1012 /* 1013 * As above, but read-only. 1014 */ 1015 int 1016 sysctl_rdstring(void *oldp, size_t *oldlenp, void *newp, const char *str) 1017 { 1018 size_t len; 1019 int error = 0; 1020 1021 len = strlen(str) + 1; 1022 if (oldp && *oldlenp < len) 1023 return (ENOMEM); 1024 if (newp) 1025 return (EPERM); 1026 *oldlenp = len; 1027 if (oldp) 1028 error = copyout(str, oldp, len); 1029 return (error); 1030 } 1031 1032 /* 1033 * Validate parameters and get old / set new parameters 1034 * for a structure oriented sysctl function. 1035 */ 1036 int 1037 sysctl_struct(void *oldp, size_t *oldlenp, void *newp, size_t newlen, void *sp, 1038 size_t len) 1039 { 1040 int error = 0; 1041 1042 if (oldp && *oldlenp < len) 1043 return (ENOMEM); 1044 if (newp && newlen > len) 1045 return (EINVAL); 1046 if (oldp) { 1047 *oldlenp = len; 1048 error = copyout(sp, oldp, len); 1049 } 1050 if (error == 0 && newp) 1051 error = copyin(newp, sp, len); 1052 return (error); 1053 } 1054 1055 /* 1056 * Validate parameters and get old parameters 1057 * for a structure oriented sysctl function. 1058 */ 1059 int 1060 sysctl_rdstruct(void *oldp, size_t *oldlenp, void *newp, const void *sp, 1061 size_t len) 1062 { 1063 int error = 0; 1064 1065 if (oldp && *oldlenp < len) 1066 return (ENOMEM); 1067 if (newp) 1068 return (EPERM); 1069 *oldlenp = len; 1070 if (oldp) 1071 error = copyout(sp, oldp, len); 1072 return (error); 1073 } 1074 1075 #ifndef SMALL_KERNEL 1076 void 1077 fill_file(struct kinfo_file *kf, struct file *fp, struct filedesc *fdp, 1078 int fd, struct vnode *vp, struct process *pr, struct proc *p, 1079 struct socket *so, int show_pointers) 1080 { 1081 struct vattr va; 1082 1083 memset(kf, 0, sizeof(*kf)); 1084 1085 kf->fd_fd = fd; /* might not really be an fd */ 1086 1087 if (fp != NULL) { 1088 if (show_pointers) 1089 kf->f_fileaddr = PTRTOINT64(fp); 1090 kf->f_flag = fp->f_flag; 1091 kf->f_iflags = fp->f_iflags; 1092 kf->f_type = fp->f_type; 1093 kf->f_count = fp->f_count; 1094 if (show_pointers) 1095 kf->f_ucred = PTRTOINT64(fp->f_cred); 1096 kf->f_uid = fp->f_cred->cr_uid; 1097 kf->f_gid = fp->f_cred->cr_gid; 1098 if (show_pointers) 1099 kf->f_ops = PTRTOINT64(fp->f_ops); 1100 if (show_pointers) 1101 kf->f_data = PTRTOINT64(fp->f_data); 1102 kf->f_usecount = 0; 1103 1104 if (suser(p) == 0 || p->p_ucred->cr_uid == fp->f_cred->cr_uid) { 1105 mtx_enter(&fp->f_mtx); 1106 kf->f_offset = fp->f_offset; 1107 kf->f_rxfer = fp->f_rxfer; 1108 kf->f_rwfer = fp->f_wxfer; 1109 kf->f_seek = fp->f_seek; 1110 kf->f_rbytes = fp->f_rbytes; 1111 kf->f_wbytes = fp->f_wbytes; 1112 mtx_leave(&fp->f_mtx); 1113 } else 1114 kf->f_offset = -1; 1115 } else if (vp != NULL) { 1116 /* fake it */ 1117 kf->f_type = DTYPE_VNODE; 1118 kf->f_flag = FREAD; 1119 if (fd == KERN_FILE_TRACE) 1120 kf->f_flag |= FWRITE; 1121 } else if (so != NULL) { 1122 /* fake it */ 1123 kf->f_type = DTYPE_SOCKET; 1124 } 1125 1126 /* information about the object associated with this file */ 1127 switch (kf->f_type) { 1128 case DTYPE_VNODE: 1129 if (fp != NULL) 1130 vp = (struct vnode *)fp->f_data; 1131 1132 if (show_pointers) 1133 kf->v_un = PTRTOINT64(vp->v_un.vu_socket); 1134 kf->v_type = vp->v_type; 1135 kf->v_tag = vp->v_tag; 1136 kf->v_flag = vp->v_flag; 1137 if (show_pointers) 1138 kf->v_data = PTRTOINT64(vp->v_data); 1139 if (show_pointers) 1140 kf->v_mount = PTRTOINT64(vp->v_mount); 1141 if (vp->v_mount) 1142 strlcpy(kf->f_mntonname, 1143 vp->v_mount->mnt_stat.f_mntonname, 1144 sizeof(kf->f_mntonname)); 1145 1146 if (VOP_GETATTR(vp, &va, p->p_ucred, p) == 0) { 1147 kf->va_fileid = va.va_fileid; 1148 kf->va_mode = MAKEIMODE(va.va_type, va.va_mode); 1149 kf->va_size = va.va_size; 1150 kf->va_rdev = va.va_rdev; 1151 kf->va_fsid = va.va_fsid & 0xffffffff; 1152 kf->va_nlink = va.va_nlink; 1153 } 1154 break; 1155 1156 case DTYPE_SOCKET: { 1157 int locked = 0; 1158 1159 if (so == NULL) { 1160 so = (struct socket *)fp->f_data; 1161 /* if so is passed as parameter it is already locked */ 1162 switch (so->so_proto->pr_domain->dom_family) { 1163 case AF_INET: 1164 case AF_INET6: 1165 NET_LOCK(); 1166 locked = 1; 1167 break; 1168 } 1169 } 1170 1171 kf->so_type = so->so_type; 1172 kf->so_state = so->so_state; 1173 if (show_pointers) 1174 kf->so_pcb = PTRTOINT64(so->so_pcb); 1175 else 1176 kf->so_pcb = -1; 1177 kf->so_protocol = so->so_proto->pr_protocol; 1178 kf->so_family = so->so_proto->pr_domain->dom_family; 1179 kf->so_rcv_cc = so->so_rcv.sb_cc; 1180 kf->so_snd_cc = so->so_snd.sb_cc; 1181 if (isspliced(so)) { 1182 if (show_pointers) 1183 kf->so_splice = 1184 PTRTOINT64(so->so_sp->ssp_socket); 1185 kf->so_splicelen = so->so_sp->ssp_len; 1186 } else if (issplicedback(so)) 1187 kf->so_splicelen = -1; 1188 if (so->so_pcb == NULL) { 1189 if (locked) 1190 NET_UNLOCK(); 1191 break; 1192 } 1193 switch (kf->so_family) { 1194 case AF_INET: { 1195 struct inpcb *inpcb = so->so_pcb; 1196 1197 NET_ASSERT_LOCKED(); 1198 if (show_pointers) 1199 kf->inp_ppcb = PTRTOINT64(inpcb->inp_ppcb); 1200 kf->inp_lport = inpcb->inp_lport; 1201 kf->inp_laddru[0] = inpcb->inp_laddr.s_addr; 1202 kf->inp_fport = inpcb->inp_fport; 1203 kf->inp_faddru[0] = inpcb->inp_faddr.s_addr; 1204 kf->inp_rtableid = inpcb->inp_rtableid; 1205 if (so->so_type == SOCK_RAW) 1206 kf->inp_proto = inpcb->inp_ip.ip_p; 1207 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 1208 struct tcpcb *tcpcb = (void *)inpcb->inp_ppcb; 1209 kf->t_rcv_wnd = tcpcb->rcv_wnd; 1210 kf->t_snd_wnd = tcpcb->snd_wnd; 1211 kf->t_snd_cwnd = tcpcb->snd_cwnd; 1212 kf->t_state = tcpcb->t_state; 1213 } 1214 break; 1215 } 1216 case AF_INET6: { 1217 struct inpcb *inpcb = so->so_pcb; 1218 1219 NET_ASSERT_LOCKED(); 1220 if (show_pointers) 1221 kf->inp_ppcb = PTRTOINT64(inpcb->inp_ppcb); 1222 kf->inp_lport = inpcb->inp_lport; 1223 kf->inp_laddru[0] = inpcb->inp_laddr6.s6_addr32[0]; 1224 kf->inp_laddru[1] = inpcb->inp_laddr6.s6_addr32[1]; 1225 kf->inp_laddru[2] = inpcb->inp_laddr6.s6_addr32[2]; 1226 kf->inp_laddru[3] = inpcb->inp_laddr6.s6_addr32[3]; 1227 kf->inp_fport = inpcb->inp_fport; 1228 kf->inp_faddru[0] = inpcb->inp_faddr6.s6_addr32[0]; 1229 kf->inp_faddru[1] = inpcb->inp_faddr6.s6_addr32[1]; 1230 kf->inp_faddru[2] = inpcb->inp_faddr6.s6_addr32[2]; 1231 kf->inp_faddru[3] = inpcb->inp_faddr6.s6_addr32[3]; 1232 kf->inp_rtableid = inpcb->inp_rtableid; 1233 if (so->so_type == SOCK_RAW) 1234 kf->inp_proto = inpcb->inp_ipv6.ip6_nxt; 1235 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 1236 struct tcpcb *tcpcb = (void *)inpcb->inp_ppcb; 1237 kf->t_rcv_wnd = tcpcb->rcv_wnd; 1238 kf->t_snd_wnd = tcpcb->snd_wnd; 1239 kf->t_state = tcpcb->t_state; 1240 } 1241 break; 1242 } 1243 case AF_UNIX: { 1244 struct unpcb *unpcb = so->so_pcb; 1245 1246 kf->f_msgcount = unpcb->unp_msgcount; 1247 if (show_pointers) { 1248 kf->unp_conn = PTRTOINT64(unpcb->unp_conn); 1249 kf->unp_refs = PTRTOINT64( 1250 SLIST_FIRST(&unpcb->unp_refs)); 1251 kf->unp_nextref = PTRTOINT64( 1252 SLIST_NEXT(unpcb, unp_nextref)); 1253 kf->v_un = PTRTOINT64(unpcb->unp_vnode); 1254 kf->unp_addr = PTRTOINT64(unpcb->unp_addr); 1255 } 1256 if (unpcb->unp_addr != NULL) { 1257 struct sockaddr_un *un = mtod(unpcb->unp_addr, 1258 struct sockaddr_un *); 1259 memcpy(kf->unp_path, un->sun_path, un->sun_len 1260 - offsetof(struct sockaddr_un,sun_path)); 1261 } 1262 break; 1263 } 1264 } 1265 if (locked) 1266 NET_UNLOCK(); 1267 break; 1268 } 1269 1270 case DTYPE_PIPE: { 1271 struct pipe *pipe = (struct pipe *)fp->f_data; 1272 1273 if (show_pointers) 1274 kf->pipe_peer = PTRTOINT64(pipe->pipe_peer); 1275 kf->pipe_state = pipe->pipe_state; 1276 break; 1277 } 1278 1279 case DTYPE_KQUEUE: { 1280 struct kqueue *kqi = (struct kqueue *)fp->f_data; 1281 1282 kf->kq_count = kqi->kq_count; 1283 kf->kq_state = kqi->kq_state; 1284 break; 1285 } 1286 } 1287 1288 /* per-process information for KERN_FILE_BY[PU]ID */ 1289 if (pr != NULL) { 1290 kf->p_pid = pr->ps_pid; 1291 kf->p_uid = pr->ps_ucred->cr_uid; 1292 kf->p_gid = pr->ps_ucred->cr_gid; 1293 kf->p_tid = -1; 1294 strlcpy(kf->p_comm, pr->ps_comm, sizeof(kf->p_comm)); 1295 } 1296 if (fdp != NULL) { 1297 fdplock(fdp); 1298 kf->fd_ofileflags = fdp->fd_ofileflags[fd]; 1299 fdpunlock(fdp); 1300 } 1301 } 1302 1303 /* 1304 * Get file structures. 1305 */ 1306 int 1307 sysctl_file(int *name, u_int namelen, char *where, size_t *sizep, 1308 struct proc *p) 1309 { 1310 struct kinfo_file *kf; 1311 struct filedesc *fdp; 1312 struct file *fp; 1313 struct process *pr; 1314 size_t buflen, elem_size, elem_count, outsize; 1315 char *dp = where; 1316 int arg, i, error = 0, needed = 0, matched; 1317 u_int op; 1318 int show_pointers; 1319 1320 if (namelen > 4) 1321 return (ENOTDIR); 1322 if (namelen < 4 || name[2] > sizeof(*kf)) 1323 return (EINVAL); 1324 1325 buflen = where != NULL ? *sizep : 0; 1326 op = name[0]; 1327 arg = name[1]; 1328 elem_size = name[2]; 1329 elem_count = name[3]; 1330 outsize = MIN(sizeof(*kf), elem_size); 1331 1332 if (elem_size < 1) 1333 return (EINVAL); 1334 1335 show_pointers = suser(curproc) == 0; 1336 1337 kf = malloc(sizeof(*kf), M_TEMP, M_WAITOK); 1338 1339 #define FILLIT2(fp, fdp, i, vp, pr, so) do { \ 1340 if (buflen >= elem_size && elem_count > 0) { \ 1341 fill_file(kf, fp, fdp, i, vp, pr, p, so, show_pointers);\ 1342 error = copyout(kf, dp, outsize); \ 1343 if (error) \ 1344 break; \ 1345 dp += elem_size; \ 1346 buflen -= elem_size; \ 1347 elem_count--; \ 1348 } \ 1349 needed += elem_size; \ 1350 } while (0) 1351 #define FILLIT(fp, fdp, i, vp, pr) \ 1352 FILLIT2(fp, fdp, i, vp, pr, NULL) 1353 #define FILLSO(so) \ 1354 FILLIT2(NULL, NULL, 0, NULL, NULL, so) 1355 1356 switch (op) { 1357 case KERN_FILE_BYFILE: 1358 /* use the inp-tables to pick up closed connections, too */ 1359 if (arg == DTYPE_SOCKET) { 1360 struct inpcb *inp; 1361 1362 NET_LOCK(); 1363 TAILQ_FOREACH(inp, &tcbtable.inpt_queue, inp_queue) 1364 FILLSO(inp->inp_socket); 1365 TAILQ_FOREACH(inp, &udbtable.inpt_queue, inp_queue) 1366 FILLSO(inp->inp_socket); 1367 TAILQ_FOREACH(inp, &rawcbtable.inpt_queue, inp_queue) 1368 FILLSO(inp->inp_socket); 1369 #ifdef INET6 1370 TAILQ_FOREACH(inp, &rawin6pcbtable.inpt_queue, 1371 inp_queue) 1372 FILLSO(inp->inp_socket); 1373 #endif 1374 NET_UNLOCK(); 1375 } 1376 fp = NULL; 1377 while ((fp = fd_iterfile(fp, p)) != NULL) { 1378 if ((arg == 0 || fp->f_type == arg)) { 1379 int af, skip = 0; 1380 if (arg == DTYPE_SOCKET && fp->f_type == arg) { 1381 af = ((struct socket *)fp->f_data)-> 1382 so_proto->pr_domain->dom_family; 1383 if (af == AF_INET || af == AF_INET6) 1384 skip = 1; 1385 } 1386 if (!skip) 1387 FILLIT(fp, NULL, 0, NULL, NULL); 1388 } 1389 } 1390 break; 1391 case KERN_FILE_BYPID: 1392 /* A arg of -1 indicates all processes */ 1393 if (arg < -1) { 1394 error = EINVAL; 1395 break; 1396 } 1397 matched = 0; 1398 LIST_FOREACH(pr, &allprocess, ps_list) { 1399 /* 1400 * skip system, exiting, embryonic and undead 1401 * processes 1402 */ 1403 if (pr->ps_flags & (PS_SYSTEM | PS_EMBRYO | PS_EXITING)) 1404 continue; 1405 if (arg > 0 && pr->ps_pid != (pid_t)arg) { 1406 /* not the pid we are looking for */ 1407 continue; 1408 } 1409 matched = 1; 1410 fdp = pr->ps_fd; 1411 if (pr->ps_textvp) 1412 FILLIT(NULL, NULL, KERN_FILE_TEXT, pr->ps_textvp, pr); 1413 if (fdp->fd_cdir) 1414 FILLIT(NULL, NULL, KERN_FILE_CDIR, fdp->fd_cdir, pr); 1415 if (fdp->fd_rdir) 1416 FILLIT(NULL, NULL, KERN_FILE_RDIR, fdp->fd_rdir, pr); 1417 if (pr->ps_tracevp) 1418 FILLIT(NULL, NULL, KERN_FILE_TRACE, pr->ps_tracevp, pr); 1419 for (i = 0; i < fdp->fd_nfiles; i++) { 1420 if ((fp = fd_getfile(fdp, i)) == NULL) 1421 continue; 1422 FILLIT(fp, fdp, i, NULL, pr); 1423 FRELE(fp, p); 1424 } 1425 } 1426 if (!matched) 1427 error = ESRCH; 1428 break; 1429 case KERN_FILE_BYUID: 1430 LIST_FOREACH(pr, &allprocess, ps_list) { 1431 /* 1432 * skip system, exiting, embryonic and undead 1433 * processes 1434 */ 1435 if (pr->ps_flags & (PS_SYSTEM | PS_EMBRYO | PS_EXITING)) 1436 continue; 1437 if (arg >= 0 && pr->ps_ucred->cr_uid != (uid_t)arg) { 1438 /* not the uid we are looking for */ 1439 continue; 1440 } 1441 fdp = pr->ps_fd; 1442 if (fdp->fd_cdir) 1443 FILLIT(NULL, NULL, KERN_FILE_CDIR, fdp->fd_cdir, pr); 1444 if (fdp->fd_rdir) 1445 FILLIT(NULL, NULL, KERN_FILE_RDIR, fdp->fd_rdir, pr); 1446 if (pr->ps_tracevp) 1447 FILLIT(NULL, NULL, KERN_FILE_TRACE, pr->ps_tracevp, pr); 1448 for (i = 0; i < fdp->fd_nfiles; i++) { 1449 if ((fp = fd_getfile(fdp, i)) == NULL) 1450 continue; 1451 FILLIT(fp, fdp, i, NULL, pr); 1452 FRELE(fp, p); 1453 } 1454 } 1455 break; 1456 default: 1457 error = EINVAL; 1458 break; 1459 } 1460 free(kf, M_TEMP, sizeof(*kf)); 1461 1462 if (!error) { 1463 if (where == NULL) 1464 needed += KERN_FILESLOP * elem_size; 1465 else if (*sizep < needed) 1466 error = ENOMEM; 1467 *sizep = needed; 1468 } 1469 1470 return (error); 1471 } 1472 1473 /* 1474 * try over estimating by 5 procs 1475 */ 1476 #define KERN_PROCSLOP 5 1477 1478 int 1479 sysctl_doproc(int *name, u_int namelen, char *where, size_t *sizep) 1480 { 1481 struct kinfo_proc *kproc = NULL; 1482 struct proc *p; 1483 struct process *pr; 1484 char *dp; 1485 int arg, buflen, doingzomb, elem_size, elem_count; 1486 int error, needed, op; 1487 int dothreads = 0; 1488 int show_pointers; 1489 1490 dp = where; 1491 buflen = where != NULL ? *sizep : 0; 1492 needed = error = 0; 1493 1494 if (namelen != 4 || name[2] <= 0 || name[3] < 0 || 1495 name[2] > sizeof(*kproc)) 1496 return (EINVAL); 1497 op = name[0]; 1498 arg = name[1]; 1499 elem_size = name[2]; 1500 elem_count = name[3]; 1501 1502 dothreads = op & KERN_PROC_SHOW_THREADS; 1503 op &= ~KERN_PROC_SHOW_THREADS; 1504 1505 show_pointers = suser(curproc) == 0; 1506 1507 if (where != NULL) 1508 kproc = malloc(sizeof(*kproc), M_TEMP, M_WAITOK); 1509 1510 pr = LIST_FIRST(&allprocess); 1511 doingzomb = 0; 1512 again: 1513 for (; pr != NULL; pr = LIST_NEXT(pr, ps_list)) { 1514 /* XXX skip processes in the middle of being zapped */ 1515 if (pr->ps_pgrp == NULL) 1516 continue; 1517 1518 /* 1519 * Skip embryonic processes. 1520 */ 1521 if (pr->ps_flags & PS_EMBRYO) 1522 continue; 1523 1524 /* 1525 * TODO - make more efficient (see notes below). 1526 */ 1527 switch (op) { 1528 1529 case KERN_PROC_PID: 1530 /* could do this with just a lookup */ 1531 if (pr->ps_pid != (pid_t)arg) 1532 continue; 1533 break; 1534 1535 case KERN_PROC_PGRP: 1536 /* could do this by traversing pgrp */ 1537 if (pr->ps_pgrp->pg_id != (pid_t)arg) 1538 continue; 1539 break; 1540 1541 case KERN_PROC_SESSION: 1542 if (pr->ps_session->s_leader == NULL || 1543 pr->ps_session->s_leader->ps_pid != (pid_t)arg) 1544 continue; 1545 break; 1546 1547 case KERN_PROC_TTY: 1548 if ((pr->ps_flags & PS_CONTROLT) == 0 || 1549 pr->ps_session->s_ttyp == NULL || 1550 pr->ps_session->s_ttyp->t_dev != (dev_t)arg) 1551 continue; 1552 break; 1553 1554 case KERN_PROC_UID: 1555 if (pr->ps_ucred->cr_uid != (uid_t)arg) 1556 continue; 1557 break; 1558 1559 case KERN_PROC_RUID: 1560 if (pr->ps_ucred->cr_ruid != (uid_t)arg) 1561 continue; 1562 break; 1563 1564 case KERN_PROC_ALL: 1565 if (pr->ps_flags & PS_SYSTEM) 1566 continue; 1567 break; 1568 1569 case KERN_PROC_KTHREAD: 1570 /* no filtering */ 1571 break; 1572 1573 default: 1574 error = EINVAL; 1575 goto err; 1576 } 1577 1578 if (buflen >= elem_size && elem_count > 0) { 1579 fill_kproc(pr, kproc, NULL, show_pointers); 1580 error = copyout(kproc, dp, elem_size); 1581 if (error) 1582 goto err; 1583 dp += elem_size; 1584 buflen -= elem_size; 1585 elem_count--; 1586 } 1587 needed += elem_size; 1588 1589 /* Skip per-thread entries if not required by op */ 1590 if (!dothreads) 1591 continue; 1592 1593 TAILQ_FOREACH(p, &pr->ps_threads, p_thr_link) { 1594 if (buflen >= elem_size && elem_count > 0) { 1595 fill_kproc(pr, kproc, p, show_pointers); 1596 error = copyout(kproc, dp, elem_size); 1597 if (error) 1598 goto err; 1599 dp += elem_size; 1600 buflen -= elem_size; 1601 elem_count--; 1602 } 1603 needed += elem_size; 1604 } 1605 } 1606 if (doingzomb == 0) { 1607 pr = LIST_FIRST(&zombprocess); 1608 doingzomb++; 1609 goto again; 1610 } 1611 if (where != NULL) { 1612 *sizep = dp - where; 1613 if (needed > *sizep) { 1614 error = ENOMEM; 1615 goto err; 1616 } 1617 } else { 1618 needed += KERN_PROCSLOP * elem_size; 1619 *sizep = needed; 1620 } 1621 err: 1622 if (kproc) 1623 free(kproc, M_TEMP, sizeof(*kproc)); 1624 return (error); 1625 } 1626 1627 /* 1628 * Fill in a kproc structure for the specified process. 1629 */ 1630 void 1631 fill_kproc(struct process *pr, struct kinfo_proc *ki, struct proc *p, 1632 int show_pointers) 1633 { 1634 struct session *s = pr->ps_session; 1635 struct tty *tp; 1636 struct vmspace *vm = pr->ps_vmspace; 1637 struct timespec booted, st, ut, utc; 1638 int isthread; 1639 1640 isthread = p != NULL; 1641 if (!isthread) 1642 p = pr->ps_mainproc; /* XXX */ 1643 1644 FILL_KPROC(ki, strlcpy, p, pr, pr->ps_ucred, pr->ps_pgrp, 1645 p, pr, s, vm, pr->ps_limit, pr->ps_sigacts, isthread, 1646 show_pointers); 1647 1648 /* stuff that's too painful to generalize into the macros */ 1649 if (pr->ps_pptr) 1650 ki->p_ppid = pr->ps_ppid; 1651 if (s->s_leader) 1652 ki->p_sid = s->s_leader->ps_pid; 1653 1654 if ((pr->ps_flags & PS_CONTROLT) && (tp = s->s_ttyp)) { 1655 ki->p_tdev = tp->t_dev; 1656 ki->p_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : -1; 1657 if (show_pointers) 1658 ki->p_tsess = PTRTOINT64(tp->t_session); 1659 } else { 1660 ki->p_tdev = NODEV; 1661 ki->p_tpgid = -1; 1662 } 1663 1664 /* fixups that can only be done in the kernel */ 1665 if ((pr->ps_flags & PS_ZOMBIE) == 0) { 1666 if ((pr->ps_flags & PS_EMBRYO) == 0 && vm != NULL) 1667 ki->p_vm_rssize = vm_resident_count(vm); 1668 calctsru(isthread ? &p->p_tu : &pr->ps_tu, &ut, &st, NULL); 1669 ki->p_uutime_sec = ut.tv_sec; 1670 ki->p_uutime_usec = ut.tv_nsec/1000; 1671 ki->p_ustime_sec = st.tv_sec; 1672 ki->p_ustime_usec = st.tv_nsec/1000; 1673 1674 /* Convert starting uptime to a starting UTC time. */ 1675 nanoboottime(&booted); 1676 timespecadd(&booted, &pr->ps_start, &utc); 1677 ki->p_ustart_sec = utc.tv_sec; 1678 ki->p_ustart_usec = utc.tv_nsec / 1000; 1679 1680 #ifdef MULTIPROCESSOR 1681 if (p->p_cpu != NULL) 1682 ki->p_cpuid = CPU_INFO_UNIT(p->p_cpu); 1683 #endif 1684 } 1685 1686 /* get %cpu and schedule state: just one thread or sum of all? */ 1687 if (isthread) { 1688 ki->p_pctcpu = p->p_pctcpu; 1689 ki->p_stat = p->p_stat; 1690 } else { 1691 ki->p_pctcpu = 0; 1692 ki->p_stat = (pr->ps_flags & PS_ZOMBIE) ? SDEAD : SIDL; 1693 TAILQ_FOREACH(p, &pr->ps_threads, p_thr_link) { 1694 ki->p_pctcpu += p->p_pctcpu; 1695 /* find best state: ONPROC > RUN > STOP > SLEEP > .. */ 1696 if (p->p_stat == SONPROC || ki->p_stat == SONPROC) 1697 ki->p_stat = SONPROC; 1698 else if (p->p_stat == SRUN || ki->p_stat == SRUN) 1699 ki->p_stat = SRUN; 1700 else if (p->p_stat == SSTOP || ki->p_stat == SSTOP) 1701 ki->p_stat = SSTOP; 1702 else if (p->p_stat == SSLEEP) 1703 ki->p_stat = SSLEEP; 1704 } 1705 } 1706 } 1707 1708 int 1709 sysctl_proc_args(int *name, u_int namelen, void *oldp, size_t *oldlenp, 1710 struct proc *cp) 1711 { 1712 struct process *vpr; 1713 pid_t pid; 1714 struct ps_strings pss; 1715 struct iovec iov; 1716 struct uio uio; 1717 int error, cnt, op; 1718 size_t limit; 1719 char **rargv, **vargv; /* reader vs. victim */ 1720 char *rarg, *varg, *buf; 1721 struct vmspace *vm; 1722 vaddr_t ps_strings; 1723 1724 if (namelen > 2) 1725 return (ENOTDIR); 1726 if (namelen < 2) 1727 return (EINVAL); 1728 1729 pid = name[0]; 1730 op = name[1]; 1731 1732 switch (op) { 1733 case KERN_PROC_ARGV: 1734 case KERN_PROC_NARGV: 1735 case KERN_PROC_ENV: 1736 case KERN_PROC_NENV: 1737 break; 1738 default: 1739 return (EOPNOTSUPP); 1740 } 1741 1742 if ((vpr = prfind(pid)) == NULL) 1743 return (ESRCH); 1744 1745 if (oldp == NULL) { 1746 if (op == KERN_PROC_NARGV || op == KERN_PROC_NENV) 1747 *oldlenp = sizeof(int); 1748 else 1749 *oldlenp = ARG_MAX; /* XXX XXX XXX */ 1750 return (0); 1751 } 1752 1753 /* Either system process or exiting/zombie */ 1754 if (vpr->ps_flags & (PS_SYSTEM | PS_EXITING)) 1755 return (EINVAL); 1756 1757 /* Execing - danger. */ 1758 if ((vpr->ps_flags & PS_INEXEC)) 1759 return (EBUSY); 1760 1761 /* Only owner or root can get env */ 1762 if ((op == KERN_PROC_NENV || op == KERN_PROC_ENV) && 1763 (vpr->ps_ucred->cr_uid != cp->p_ucred->cr_uid && 1764 (error = suser(cp)) != 0)) 1765 return (error); 1766 1767 ps_strings = vpr->ps_strings; 1768 vm = vpr->ps_vmspace; 1769 uvmspace_addref(vm); 1770 vpr = NULL; 1771 1772 buf = malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 1773 1774 iov.iov_base = &pss; 1775 iov.iov_len = sizeof(pss); 1776 uio.uio_iov = &iov; 1777 uio.uio_iovcnt = 1; 1778 uio.uio_offset = (off_t)ps_strings; 1779 uio.uio_resid = sizeof(pss); 1780 uio.uio_segflg = UIO_SYSSPACE; 1781 uio.uio_rw = UIO_READ; 1782 uio.uio_procp = cp; 1783 1784 if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0) 1785 goto out; 1786 1787 if (op == KERN_PROC_NARGV) { 1788 error = sysctl_rdint(oldp, oldlenp, NULL, pss.ps_nargvstr); 1789 goto out; 1790 } 1791 if (op == KERN_PROC_NENV) { 1792 error = sysctl_rdint(oldp, oldlenp, NULL, pss.ps_nenvstr); 1793 goto out; 1794 } 1795 1796 if (op == KERN_PROC_ARGV) { 1797 cnt = pss.ps_nargvstr; 1798 vargv = pss.ps_argvstr; 1799 } else { 1800 cnt = pss.ps_nenvstr; 1801 vargv = pss.ps_envstr; 1802 } 1803 1804 /* -1 to have space for a terminating NUL */ 1805 limit = *oldlenp - 1; 1806 *oldlenp = 0; 1807 1808 rargv = oldp; 1809 1810 /* 1811 * *oldlenp - number of bytes copied out into readers buffer. 1812 * limit - maximal number of bytes allowed into readers buffer. 1813 * rarg - pointer into readers buffer where next arg will be stored. 1814 * rargv - pointer into readers buffer where the next rarg pointer 1815 * will be stored. 1816 * vargv - pointer into victim address space where the next argument 1817 * will be read. 1818 */ 1819 1820 /* space for cnt pointers and a NULL */ 1821 rarg = (char *)(rargv + cnt + 1); 1822 *oldlenp += (cnt + 1) * sizeof(char **); 1823 1824 while (cnt > 0 && *oldlenp < limit) { 1825 size_t len, vstrlen; 1826 1827 /* Write to readers argv */ 1828 if ((error = copyout(&rarg, rargv, sizeof(rarg))) != 0) 1829 goto out; 1830 1831 /* read the victim argv */ 1832 iov.iov_base = &varg; 1833 iov.iov_len = sizeof(varg); 1834 uio.uio_iov = &iov; 1835 uio.uio_iovcnt = 1; 1836 uio.uio_offset = (off_t)(vaddr_t)vargv; 1837 uio.uio_resid = sizeof(varg); 1838 uio.uio_segflg = UIO_SYSSPACE; 1839 uio.uio_rw = UIO_READ; 1840 uio.uio_procp = cp; 1841 if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0) 1842 goto out; 1843 1844 if (varg == NULL) 1845 break; 1846 1847 /* 1848 * read the victim arg. We must jump through hoops to avoid 1849 * crossing a page boundary too much and returning an error. 1850 */ 1851 more: 1852 len = PAGE_SIZE - (((vaddr_t)varg) & PAGE_MASK); 1853 /* leave space for the terminating NUL */ 1854 iov.iov_base = buf; 1855 iov.iov_len = len; 1856 uio.uio_iov = &iov; 1857 uio.uio_iovcnt = 1; 1858 uio.uio_offset = (off_t)(vaddr_t)varg; 1859 uio.uio_resid = len; 1860 uio.uio_segflg = UIO_SYSSPACE; 1861 uio.uio_rw = UIO_READ; 1862 uio.uio_procp = cp; 1863 if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0) 1864 goto out; 1865 1866 for (vstrlen = 0; vstrlen < len; vstrlen++) { 1867 if (buf[vstrlen] == '\0') 1868 break; 1869 } 1870 1871 /* Don't overflow readers buffer. */ 1872 if (*oldlenp + vstrlen + 1 >= limit) { 1873 error = ENOMEM; 1874 goto out; 1875 } 1876 1877 if ((error = copyout(buf, rarg, vstrlen)) != 0) 1878 goto out; 1879 1880 *oldlenp += vstrlen; 1881 rarg += vstrlen; 1882 1883 /* The string didn't end in this page? */ 1884 if (vstrlen == len) { 1885 varg += vstrlen; 1886 goto more; 1887 } 1888 1889 /* End of string. Terminate it with a NUL */ 1890 buf[0] = '\0'; 1891 if ((error = copyout(buf, rarg, 1)) != 0) 1892 goto out; 1893 *oldlenp += 1; 1894 rarg += 1; 1895 1896 vargv++; 1897 rargv++; 1898 cnt--; 1899 } 1900 1901 if (*oldlenp >= limit) { 1902 error = ENOMEM; 1903 goto out; 1904 } 1905 1906 /* Write the terminating null */ 1907 rarg = NULL; 1908 error = copyout(&rarg, rargv, sizeof(rarg)); 1909 1910 out: 1911 uvmspace_free(vm); 1912 free(buf, M_TEMP, PAGE_SIZE); 1913 return (error); 1914 } 1915 1916 int 1917 sysctl_proc_cwd(int *name, u_int namelen, void *oldp, size_t *oldlenp, 1918 struct proc *cp) 1919 { 1920 struct process *findpr; 1921 struct vnode *vp; 1922 pid_t pid; 1923 int error; 1924 size_t lenused, len; 1925 char *path, *bp, *bend; 1926 1927 if (namelen > 1) 1928 return (ENOTDIR); 1929 if (namelen < 1) 1930 return (EINVAL); 1931 1932 pid = name[0]; 1933 if ((findpr = prfind(pid)) == NULL) 1934 return (ESRCH); 1935 1936 if (oldp == NULL) { 1937 *oldlenp = MAXPATHLEN * 4; 1938 return (0); 1939 } 1940 1941 /* Either system process or exiting/zombie */ 1942 if (findpr->ps_flags & (PS_SYSTEM | PS_EXITING)) 1943 return (EINVAL); 1944 1945 /* Only owner or root can get cwd */ 1946 if (findpr->ps_ucred->cr_uid != cp->p_ucred->cr_uid && 1947 (error = suser(cp)) != 0) 1948 return (error); 1949 1950 len = *oldlenp; 1951 if (len > MAXPATHLEN * 4) 1952 len = MAXPATHLEN * 4; 1953 else if (len < 2) 1954 return (ERANGE); 1955 *oldlenp = 0; 1956 1957 /* snag a reference to the vnode before we can sleep */ 1958 vp = findpr->ps_fd->fd_cdir; 1959 vref(vp); 1960 1961 path = malloc(len, M_TEMP, M_WAITOK); 1962 1963 bp = &path[len]; 1964 bend = bp; 1965 *(--bp) = '\0'; 1966 1967 /* Same as sys__getcwd */ 1968 error = vfs_getcwd_common(vp, NULL, 1969 &bp, path, len / 2, GETCWD_CHECK_ACCESS, cp); 1970 if (error == 0) { 1971 *oldlenp = lenused = bend - bp; 1972 error = copyout(bp, oldp, lenused); 1973 } 1974 1975 vrele(vp); 1976 free(path, M_TEMP, len); 1977 1978 return (error); 1979 } 1980 1981 int 1982 sysctl_proc_nobroadcastkill(int *name, u_int namelen, void *newp, size_t newlen, 1983 void *oldp, size_t *oldlenp, struct proc *cp) 1984 { 1985 struct process *findpr; 1986 pid_t pid; 1987 int error, flag; 1988 1989 if (namelen > 1) 1990 return (ENOTDIR); 1991 if (namelen < 1) 1992 return (EINVAL); 1993 1994 pid = name[0]; 1995 if ((findpr = prfind(pid)) == NULL) 1996 return (ESRCH); 1997 1998 /* Either system process or exiting/zombie */ 1999 if (findpr->ps_flags & (PS_SYSTEM | PS_EXITING)) 2000 return (EINVAL); 2001 2002 /* Only root can change PS_NOBROADCASTKILL */ 2003 if (newp != 0 && (error = suser(cp)) != 0) 2004 return (error); 2005 2006 /* get the PS_NOBROADCASTKILL flag */ 2007 flag = findpr->ps_flags & PS_NOBROADCASTKILL ? 1 : 0; 2008 2009 error = sysctl_int(oldp, oldlenp, newp, newlen, &flag); 2010 if (error == 0 && newp) { 2011 if (flag) 2012 atomic_setbits_int(&findpr->ps_flags, 2013 PS_NOBROADCASTKILL); 2014 else 2015 atomic_clearbits_int(&findpr->ps_flags, 2016 PS_NOBROADCASTKILL); 2017 } 2018 2019 return (error); 2020 } 2021 2022 /* Arbitrary but reasonable limit for one iteration. */ 2023 #define VMMAP_MAXLEN MAXPHYS 2024 2025 int 2026 sysctl_proc_vmmap(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2027 struct proc *cp) 2028 { 2029 struct process *findpr; 2030 pid_t pid; 2031 int error; 2032 size_t oldlen, len; 2033 struct kinfo_vmentry *kve, *ukve; 2034 u_long *ustart, start; 2035 2036 if (namelen > 1) 2037 return (ENOTDIR); 2038 if (namelen < 1) 2039 return (EINVAL); 2040 2041 /* Provide max buffer length as hint. */ 2042 if (oldp == NULL) { 2043 if (oldlenp == NULL) 2044 return (EINVAL); 2045 else { 2046 *oldlenp = VMMAP_MAXLEN; 2047 return (0); 2048 } 2049 } 2050 2051 pid = name[0]; 2052 if (pid == cp->p_p->ps_pid) { 2053 /* Self process mapping. */ 2054 findpr = cp->p_p; 2055 } else if (pid > 0) { 2056 if ((findpr = prfind(pid)) == NULL) 2057 return (ESRCH); 2058 2059 /* Either system process or exiting/zombie */ 2060 if (findpr->ps_flags & (PS_SYSTEM | PS_EXITING)) 2061 return (EINVAL); 2062 2063 #if 1 2064 /* XXX Allow only root for now */ 2065 if ((error = suser(cp)) != 0) 2066 return (error); 2067 #else 2068 /* Only owner or root can get vmmap */ 2069 if (findpr->ps_ucred->cr_uid != cp->p_ucred->cr_uid && 2070 (error = suser(cp)) != 0) 2071 return (error); 2072 #endif 2073 } else { 2074 /* Only root can get kernel_map */ 2075 if ((error = suser(cp)) != 0) 2076 return (error); 2077 findpr = NULL; 2078 } 2079 2080 /* Check the given size. */ 2081 oldlen = *oldlenp; 2082 if (oldlen == 0 || oldlen % sizeof(*kve) != 0) 2083 return (EINVAL); 2084 2085 /* Deny huge allocation. */ 2086 if (oldlen > VMMAP_MAXLEN) 2087 return (EINVAL); 2088 2089 /* 2090 * Iterate from the given address passed as the first element's 2091 * kve_start via oldp. 2092 */ 2093 ukve = (struct kinfo_vmentry *)oldp; 2094 ustart = &ukve->kve_start; 2095 error = copyin(ustart, &start, sizeof(start)); 2096 if (error != 0) 2097 return (error); 2098 2099 /* Allocate wired memory to not block. */ 2100 kve = malloc(oldlen, M_TEMP, M_WAITOK); 2101 2102 /* Set the base address and read entries. */ 2103 kve[0].kve_start = start; 2104 len = oldlen; 2105 error = fill_vmmap(findpr, kve, &len); 2106 if (error != 0 && error != ENOMEM) 2107 goto done; 2108 if (len == 0) 2109 goto done; 2110 2111 KASSERT(len <= oldlen); 2112 KASSERT((len % sizeof(struct kinfo_vmentry)) == 0); 2113 2114 error = copyout(kve, oldp, len); 2115 2116 done: 2117 *oldlenp = len; 2118 2119 free(kve, M_TEMP, oldlen); 2120 2121 return (error); 2122 } 2123 #endif 2124 2125 /* 2126 * Initialize disknames/diskstats for export by sysctl. If update is set, 2127 * then we simply update the disk statistics information. 2128 */ 2129 int 2130 sysctl_diskinit(int update, struct proc *p) 2131 { 2132 struct diskstats *sdk; 2133 struct disk *dk; 2134 const char *duid; 2135 int i, tlen, l; 2136 2137 if ((i = rw_enter(&sysctl_disklock, RW_WRITE|RW_INTR)) != 0) 2138 return i; 2139 2140 if (disk_change) { 2141 for (dk = TAILQ_FIRST(&disklist), tlen = 0; dk; 2142 dk = TAILQ_NEXT(dk, dk_link)) { 2143 if (dk->dk_name) 2144 tlen += strlen(dk->dk_name); 2145 tlen += 18; /* label uid + separators */ 2146 } 2147 tlen++; 2148 2149 if (disknames) 2150 free(disknames, M_SYSCTL, disknameslen); 2151 if (diskstats) 2152 free(diskstats, M_SYSCTL, diskstatslen); 2153 diskstats = NULL; 2154 disknames = NULL; 2155 diskstats = mallocarray(disk_count, sizeof(struct diskstats), 2156 M_SYSCTL, M_WAITOK|M_ZERO); 2157 diskstatslen = disk_count * sizeof(struct diskstats); 2158 disknames = malloc(tlen, M_SYSCTL, M_WAITOK|M_ZERO); 2159 disknameslen = tlen; 2160 disknames[0] = '\0'; 2161 2162 for (dk = TAILQ_FIRST(&disklist), i = 0, l = 0; dk; 2163 dk = TAILQ_NEXT(dk, dk_link), i++) { 2164 duid = NULL; 2165 if (dk->dk_label && !duid_iszero(dk->dk_label->d_uid)) 2166 duid = duid_format(dk->dk_label->d_uid); 2167 snprintf(disknames + l, tlen - l, "%s:%s,", 2168 dk->dk_name ? dk->dk_name : "", 2169 duid ? duid : ""); 2170 l += strlen(disknames + l); 2171 sdk = diskstats + i; 2172 strlcpy(sdk->ds_name, dk->dk_name, 2173 sizeof(sdk->ds_name)); 2174 mtx_enter(&dk->dk_mtx); 2175 sdk->ds_busy = dk->dk_busy; 2176 sdk->ds_rxfer = dk->dk_rxfer; 2177 sdk->ds_wxfer = dk->dk_wxfer; 2178 sdk->ds_seek = dk->dk_seek; 2179 sdk->ds_rbytes = dk->dk_rbytes; 2180 sdk->ds_wbytes = dk->dk_wbytes; 2181 sdk->ds_attachtime = dk->dk_attachtime; 2182 sdk->ds_timestamp = dk->dk_timestamp; 2183 sdk->ds_time = dk->dk_time; 2184 mtx_leave(&dk->dk_mtx); 2185 } 2186 2187 /* Eliminate trailing comma */ 2188 if (l != 0) 2189 disknames[l - 1] = '\0'; 2190 disk_change = 0; 2191 } else if (update) { 2192 /* Just update, number of drives hasn't changed */ 2193 for (dk = TAILQ_FIRST(&disklist), i = 0; dk; 2194 dk = TAILQ_NEXT(dk, dk_link), i++) { 2195 sdk = diskstats + i; 2196 strlcpy(sdk->ds_name, dk->dk_name, 2197 sizeof(sdk->ds_name)); 2198 mtx_enter(&dk->dk_mtx); 2199 sdk->ds_busy = dk->dk_busy; 2200 sdk->ds_rxfer = dk->dk_rxfer; 2201 sdk->ds_wxfer = dk->dk_wxfer; 2202 sdk->ds_seek = dk->dk_seek; 2203 sdk->ds_rbytes = dk->dk_rbytes; 2204 sdk->ds_wbytes = dk->dk_wbytes; 2205 sdk->ds_attachtime = dk->dk_attachtime; 2206 sdk->ds_timestamp = dk->dk_timestamp; 2207 sdk->ds_time = dk->dk_time; 2208 mtx_leave(&dk->dk_mtx); 2209 } 2210 } 2211 rw_exit_write(&sysctl_disklock); 2212 return 0; 2213 } 2214 2215 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM) 2216 int 2217 sysctl_sysvipc(int *name, u_int namelen, void *where, size_t *sizep) 2218 { 2219 #ifdef SYSVSEM 2220 struct sem_sysctl_info *semsi; 2221 #endif 2222 #ifdef SYSVSHM 2223 struct shm_sysctl_info *shmsi; 2224 #endif 2225 size_t infosize, dssize, tsize, buflen, bufsiz; 2226 int i, nds, error, ret; 2227 void *buf; 2228 2229 if (namelen != 1) 2230 return (EINVAL); 2231 2232 buflen = *sizep; 2233 2234 switch (*name) { 2235 case KERN_SYSVIPC_MSG_INFO: 2236 #ifdef SYSVMSG 2237 return (sysctl_sysvmsg(name, namelen, where, sizep)); 2238 #else 2239 return (EOPNOTSUPP); 2240 #endif 2241 case KERN_SYSVIPC_SEM_INFO: 2242 #ifdef SYSVSEM 2243 infosize = sizeof(semsi->seminfo); 2244 nds = seminfo.semmni; 2245 dssize = sizeof(semsi->semids[0]); 2246 break; 2247 #else 2248 return (EOPNOTSUPP); 2249 #endif 2250 case KERN_SYSVIPC_SHM_INFO: 2251 #ifdef SYSVSHM 2252 infosize = sizeof(shmsi->shminfo); 2253 nds = shminfo.shmmni; 2254 dssize = sizeof(shmsi->shmids[0]); 2255 break; 2256 #else 2257 return (EOPNOTSUPP); 2258 #endif 2259 default: 2260 return (EINVAL); 2261 } 2262 tsize = infosize + (nds * dssize); 2263 2264 /* Return just the total size required. */ 2265 if (where == NULL) { 2266 *sizep = tsize; 2267 return (0); 2268 } 2269 2270 /* Not enough room for even the info struct. */ 2271 if (buflen < infosize) { 2272 *sizep = 0; 2273 return (ENOMEM); 2274 } 2275 bufsiz = min(tsize, buflen); 2276 buf = malloc(bufsiz, M_TEMP, M_WAITOK|M_ZERO); 2277 2278 switch (*name) { 2279 #ifdef SYSVSEM 2280 case KERN_SYSVIPC_SEM_INFO: 2281 semsi = (struct sem_sysctl_info *)buf; 2282 semsi->seminfo = seminfo; 2283 break; 2284 #endif 2285 #ifdef SYSVSHM 2286 case KERN_SYSVIPC_SHM_INFO: 2287 shmsi = (struct shm_sysctl_info *)buf; 2288 shmsi->shminfo = shminfo; 2289 break; 2290 #endif 2291 } 2292 buflen -= infosize; 2293 2294 ret = 0; 2295 if (buflen > 0) { 2296 /* Fill in the IPC data structures. */ 2297 for (i = 0; i < nds; i++) { 2298 if (buflen < dssize) { 2299 ret = ENOMEM; 2300 break; 2301 } 2302 switch (*name) { 2303 #ifdef SYSVSEM 2304 case KERN_SYSVIPC_SEM_INFO: 2305 if (sema[i] != NULL) 2306 memcpy(&semsi->semids[i], sema[i], 2307 dssize); 2308 else 2309 memset(&semsi->semids[i], 0, dssize); 2310 break; 2311 #endif 2312 #ifdef SYSVSHM 2313 case KERN_SYSVIPC_SHM_INFO: 2314 if (shmsegs[i] != NULL) 2315 memcpy(&shmsi->shmids[i], shmsegs[i], 2316 dssize); 2317 else 2318 memset(&shmsi->shmids[i], 0, dssize); 2319 break; 2320 #endif 2321 } 2322 buflen -= dssize; 2323 } 2324 } 2325 *sizep -= buflen; 2326 error = copyout(buf, where, *sizep); 2327 free(buf, M_TEMP, bufsiz); 2328 /* If copyout succeeded, use return code set earlier. */ 2329 return (error ? error : ret); 2330 } 2331 #endif /* SYSVMSG || SYSVSEM || SYSVSHM */ 2332 2333 #ifndef SMALL_KERNEL 2334 2335 int 2336 sysctl_intrcnt(int *name, u_int namelen, void *oldp, size_t *oldlenp) 2337 { 2338 return (evcount_sysctl(name, namelen, oldp, oldlenp, NULL, 0)); 2339 } 2340 2341 2342 int 2343 sysctl_sensors(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2344 void *newp, size_t newlen) 2345 { 2346 struct ksensor *ks; 2347 struct sensor *us; 2348 struct ksensordev *ksd; 2349 struct sensordev *usd; 2350 int dev, numt, ret; 2351 enum sensor_type type; 2352 2353 if (namelen != 1 && namelen != 3) 2354 return (ENOTDIR); 2355 2356 dev = name[0]; 2357 if (namelen == 1) { 2358 ret = sensordev_get(dev, &ksd); 2359 if (ret) 2360 return (ret); 2361 2362 /* Grab a copy, to clear the kernel pointers */ 2363 usd = malloc(sizeof(*usd), M_TEMP, M_WAITOK|M_ZERO); 2364 usd->num = ksd->num; 2365 strlcpy(usd->xname, ksd->xname, sizeof(usd->xname)); 2366 memcpy(usd->maxnumt, ksd->maxnumt, sizeof(usd->maxnumt)); 2367 usd->sensors_count = ksd->sensors_count; 2368 2369 ret = sysctl_rdstruct(oldp, oldlenp, newp, usd, 2370 sizeof(struct sensordev)); 2371 2372 free(usd, M_TEMP, sizeof(*usd)); 2373 return (ret); 2374 } 2375 2376 type = name[1]; 2377 numt = name[2]; 2378 2379 ret = sensor_find(dev, type, numt, &ks); 2380 if (ret) 2381 return (ret); 2382 2383 /* Grab a copy, to clear the kernel pointers */ 2384 us = malloc(sizeof(*us), M_TEMP, M_WAITOK|M_ZERO); 2385 memcpy(us->desc, ks->desc, sizeof(us->desc)); 2386 us->tv = ks->tv; 2387 us->value = ks->value; 2388 us->type = ks->type; 2389 us->status = ks->status; 2390 us->numt = ks->numt; 2391 us->flags = ks->flags; 2392 2393 ret = sysctl_rdstruct(oldp, oldlenp, newp, us, 2394 sizeof(struct sensor)); 2395 free(us, M_TEMP, sizeof(*us)); 2396 return (ret); 2397 } 2398 #endif /* SMALL_KERNEL */ 2399 2400 int 2401 sysctl_cptime2(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2402 void *newp, size_t newlen) 2403 { 2404 CPU_INFO_ITERATOR cii; 2405 struct cpu_info *ci; 2406 int found = 0; 2407 2408 if (namelen != 1) 2409 return (ENOTDIR); 2410 2411 CPU_INFO_FOREACH(cii, ci) { 2412 if (name[0] == CPU_INFO_UNIT(ci)) { 2413 found = 1; 2414 break; 2415 } 2416 } 2417 if (!found) 2418 return (ENOENT); 2419 2420 return (sysctl_rdstruct(oldp, oldlenp, newp, 2421 &ci->ci_schedstate.spc_cp_time, 2422 sizeof(ci->ci_schedstate.spc_cp_time))); 2423 } 2424 2425 #if NAUDIO > 0 2426 int 2427 sysctl_audio(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2428 void *newp, size_t newlen) 2429 { 2430 if (namelen != 1) 2431 return (ENOTDIR); 2432 2433 if (name[0] != KERN_AUDIO_RECORD) 2434 return (ENOENT); 2435 2436 return (sysctl_int(oldp, oldlenp, newp, newlen, &audio_record_enable)); 2437 } 2438 #endif 2439 2440 #if NVIDEO > 0 2441 int 2442 sysctl_video(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2443 void *newp, size_t newlen) 2444 { 2445 if (namelen != 1) 2446 return (ENOTDIR); 2447 2448 if (name[0] != KERN_VIDEO_RECORD) 2449 return (ENOENT); 2450 2451 return (sysctl_int(oldp, oldlenp, newp, newlen, &video_record_enable)); 2452 } 2453 #endif 2454 2455 int 2456 sysctl_cpustats(int *name, u_int namelen, void *oldp, size_t *oldlenp, 2457 void *newp, size_t newlen) 2458 { 2459 CPU_INFO_ITERATOR cii; 2460 struct cpustats cs; 2461 struct cpu_info *ci; 2462 int found = 0; 2463 2464 if (namelen != 1) 2465 return (ENOTDIR); 2466 2467 CPU_INFO_FOREACH(cii, ci) { 2468 if (name[0] == CPU_INFO_UNIT(ci)) { 2469 found = 1; 2470 break; 2471 } 2472 } 2473 if (!found) 2474 return (ENOENT); 2475 2476 memcpy(&cs.cs_time, &ci->ci_schedstate.spc_cp_time, sizeof(cs.cs_time)); 2477 cs.cs_flags = 0; 2478 if (cpu_is_online(ci)) 2479 cs.cs_flags |= CPUSTATS_ONLINE; 2480 2481 return (sysctl_rdstruct(oldp, oldlenp, newp, &cs, sizeof(cs))); 2482 } 2483 2484 int 2485 sysctl_utc_offset(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 2486 { 2487 struct timespec adjusted, now; 2488 int adjustment_seconds, error, new_offset_minutes, old_offset_minutes; 2489 2490 old_offset_minutes = utc_offset / 60; /* seconds -> minutes */ 2491 if (securelevel > 0) 2492 return sysctl_rdint(oldp, oldlenp, newp, old_offset_minutes); 2493 2494 new_offset_minutes = old_offset_minutes; 2495 error = sysctl_int(oldp, oldlenp, newp, newlen, &new_offset_minutes); 2496 if (error) 2497 return error; 2498 if (new_offset_minutes < -24 * 60 || new_offset_minutes > 24 * 60) 2499 return EINVAL; 2500 if (new_offset_minutes == old_offset_minutes) 2501 return 0; 2502 2503 utc_offset = new_offset_minutes * 60; /* minutes -> seconds */ 2504 adjustment_seconds = (new_offset_minutes - old_offset_minutes) * 60; 2505 2506 nanotime(&now); 2507 adjusted = now; 2508 adjusted.tv_sec -= adjustment_seconds; 2509 tc_setrealtimeclock(&adjusted); 2510 resettodr(); 2511 2512 return 0; 2513 } 2514