1 /* $NetBSD: kern_resource.c,v 1.191 2023/07/08 20:02:10 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_resource.c 8.8 (Berkeley) 2/14/95 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.191 2023/07/08 20:02:10 riastradh Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/file.h> 46 #include <sys/resourcevar.h> 47 #include <sys/kmem.h> 48 #include <sys/namei.h> 49 #include <sys/pool.h> 50 #include <sys/proc.h> 51 #include <sys/sysctl.h> 52 #include <sys/timevar.h> 53 #include <sys/kauth.h> 54 #include <sys/atomic.h> 55 #include <sys/mount.h> 56 #include <sys/syscallargs.h> 57 #include <sys/atomic.h> 58 59 #include <uvm/uvm_extern.h> 60 61 /* 62 * Maximum process data and stack limits. 63 * They are variables so they are patchable. 64 */ 65 rlim_t maxdmap = MAXDSIZ; 66 rlim_t maxsmap = MAXSSIZ; 67 68 static pool_cache_t plimit_cache __read_mostly; 69 static pool_cache_t pstats_cache __read_mostly; 70 71 static kauth_listener_t resource_listener; 72 static struct sysctllog *proc_sysctllog; 73 74 static int donice(struct lwp *, struct proc *, int); 75 static void sysctl_proc_setup(void); 76 77 static int 78 resource_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 79 void *arg0, void *arg1, void *arg2, void *arg3) 80 { 81 struct proc *p; 82 int result; 83 84 result = KAUTH_RESULT_DEFER; 85 p = arg0; 86 87 switch (action) { 88 case KAUTH_PROCESS_NICE: 89 if (kauth_cred_geteuid(cred) != kauth_cred_geteuid(p->p_cred) && 90 kauth_cred_getuid(cred) != kauth_cred_geteuid(p->p_cred)) { 91 break; 92 } 93 94 if ((u_long)arg1 >= p->p_nice) 95 result = KAUTH_RESULT_ALLOW; 96 97 break; 98 99 case KAUTH_PROCESS_RLIMIT: { 100 enum kauth_process_req req; 101 102 req = (enum kauth_process_req)(uintptr_t)arg1; 103 104 switch (req) { 105 case KAUTH_REQ_PROCESS_RLIMIT_GET: 106 result = KAUTH_RESULT_ALLOW; 107 break; 108 109 case KAUTH_REQ_PROCESS_RLIMIT_SET: { 110 struct rlimit *new_rlimit; 111 u_long which; 112 113 if ((p != curlwp->l_proc) && 114 (proc_uidmatch(cred, p->p_cred) != 0)) 115 break; 116 117 new_rlimit = arg2; 118 which = (u_long)arg3; 119 120 if (new_rlimit->rlim_max <= p->p_rlimit[which].rlim_max) 121 result = KAUTH_RESULT_ALLOW; 122 123 break; 124 } 125 126 default: 127 break; 128 } 129 130 break; 131 } 132 133 default: 134 break; 135 } 136 137 return result; 138 } 139 140 void 141 resource_init(void) 142 { 143 144 plimit_cache = pool_cache_init(sizeof(struct plimit), 0, 0, 0, 145 "plimitpl", NULL, IPL_NONE, NULL, NULL, NULL); 146 pstats_cache = pool_cache_init(sizeof(struct pstats), 0, 0, 0, 147 "pstatspl", NULL, IPL_NONE, NULL, NULL, NULL); 148 149 resource_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 150 resource_listener_cb, NULL); 151 152 sysctl_proc_setup(); 153 } 154 155 /* 156 * Resource controls and accounting. 157 */ 158 159 int 160 sys_getpriority(struct lwp *l, const struct sys_getpriority_args *uap, 161 register_t *retval) 162 { 163 /* { 164 syscallarg(int) which; 165 syscallarg(id_t) who; 166 } */ 167 struct proc *curp = l->l_proc, *p; 168 id_t who = SCARG(uap, who); 169 int low = NZERO + PRIO_MAX + 1; 170 171 mutex_enter(&proc_lock); 172 switch (SCARG(uap, which)) { 173 case PRIO_PROCESS: 174 p = who ? proc_find(who) : curp; 175 if (p != NULL) 176 low = p->p_nice; 177 break; 178 179 case PRIO_PGRP: { 180 struct pgrp *pg; 181 182 if (who == 0) 183 pg = curp->p_pgrp; 184 else if ((pg = pgrp_find(who)) == NULL) 185 break; 186 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 187 if (p->p_nice < low) 188 low = p->p_nice; 189 } 190 break; 191 } 192 193 case PRIO_USER: 194 if (who == 0) 195 who = (int)kauth_cred_geteuid(l->l_cred); 196 PROCLIST_FOREACH(p, &allproc) { 197 mutex_enter(p->p_lock); 198 if (kauth_cred_geteuid(p->p_cred) == 199 (uid_t)who && p->p_nice < low) 200 low = p->p_nice; 201 mutex_exit(p->p_lock); 202 } 203 break; 204 205 default: 206 mutex_exit(&proc_lock); 207 return EINVAL; 208 } 209 mutex_exit(&proc_lock); 210 211 if (low == NZERO + PRIO_MAX + 1) { 212 return ESRCH; 213 } 214 *retval = low - NZERO; 215 return 0; 216 } 217 218 int 219 sys_setpriority(struct lwp *l, const struct sys_setpriority_args *uap, 220 register_t *retval) 221 { 222 /* { 223 syscallarg(int) which; 224 syscallarg(id_t) who; 225 syscallarg(int) prio; 226 } */ 227 struct proc *curp = l->l_proc, *p; 228 id_t who = SCARG(uap, who); 229 int found = 0, error = 0; 230 231 mutex_enter(&proc_lock); 232 switch (SCARG(uap, which)) { 233 case PRIO_PROCESS: 234 p = who ? proc_find(who) : curp; 235 if (p != NULL) { 236 mutex_enter(p->p_lock); 237 found++; 238 error = donice(l, p, SCARG(uap, prio)); 239 mutex_exit(p->p_lock); 240 } 241 break; 242 243 case PRIO_PGRP: { 244 struct pgrp *pg; 245 246 if (who == 0) 247 pg = curp->p_pgrp; 248 else if ((pg = pgrp_find(who)) == NULL) 249 break; 250 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 251 mutex_enter(p->p_lock); 252 found++; 253 error = donice(l, p, SCARG(uap, prio)); 254 mutex_exit(p->p_lock); 255 if (error) 256 break; 257 } 258 break; 259 } 260 261 case PRIO_USER: 262 if (who == 0) 263 who = (int)kauth_cred_geteuid(l->l_cred); 264 PROCLIST_FOREACH(p, &allproc) { 265 mutex_enter(p->p_lock); 266 if (kauth_cred_geteuid(p->p_cred) == 267 (uid_t)SCARG(uap, who)) { 268 found++; 269 error = donice(l, p, SCARG(uap, prio)); 270 } 271 mutex_exit(p->p_lock); 272 if (error) 273 break; 274 } 275 break; 276 277 default: 278 mutex_exit(&proc_lock); 279 return EINVAL; 280 } 281 mutex_exit(&proc_lock); 282 283 return (found == 0) ? ESRCH : error; 284 } 285 286 /* 287 * Renice a process. 288 * 289 * Call with the target process' credentials locked. 290 */ 291 static int 292 donice(struct lwp *l, struct proc *chgp, int n) 293 { 294 kauth_cred_t cred = l->l_cred; 295 296 KASSERT(mutex_owned(chgp->p_lock)); 297 298 if (kauth_cred_geteuid(cred) && kauth_cred_getuid(cred) && 299 kauth_cred_geteuid(cred) != kauth_cred_geteuid(chgp->p_cred) && 300 kauth_cred_getuid(cred) != kauth_cred_geteuid(chgp->p_cred)) 301 return EPERM; 302 303 if (n > PRIO_MAX) { 304 n = PRIO_MAX; 305 } 306 if (n < PRIO_MIN) { 307 n = PRIO_MIN; 308 } 309 n += NZERO; 310 311 if (kauth_authorize_process(cred, KAUTH_PROCESS_NICE, chgp, 312 KAUTH_ARG(n), NULL, NULL)) { 313 return EACCES; 314 } 315 316 sched_nice(chgp, n); 317 return 0; 318 } 319 320 int 321 sys_setrlimit(struct lwp *l, const struct sys_setrlimit_args *uap, 322 register_t *retval) 323 { 324 /* { 325 syscallarg(int) which; 326 syscallarg(const struct rlimit *) rlp; 327 } */ 328 int error, which = SCARG(uap, which); 329 struct rlimit alim; 330 331 error = copyin(SCARG(uap, rlp), &alim, sizeof(struct rlimit)); 332 if (error) { 333 return error; 334 } 335 return dosetrlimit(l, l->l_proc, which, &alim); 336 } 337 338 int 339 dosetrlimit(struct lwp *l, struct proc *p, int which, struct rlimit *limp) 340 { 341 struct rlimit *alimp; 342 int error; 343 344 if ((u_int)which >= RLIM_NLIMITS) 345 return EINVAL; 346 347 if (limp->rlim_cur > limp->rlim_max) { 348 /* 349 * This is programming error. According to SUSv2, we should 350 * return error in this case. 351 */ 352 return EINVAL; 353 } 354 355 alimp = &p->p_rlimit[which]; 356 /* if we don't change the value, no need to limcopy() */ 357 if (limp->rlim_cur == alimp->rlim_cur && 358 limp->rlim_max == alimp->rlim_max) 359 return 0; 360 361 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RLIMIT, 362 p, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_SET), limp, KAUTH_ARG(which)); 363 if (error) 364 return error; 365 366 lim_privatise(p); 367 /* p->p_limit is now unchangeable */ 368 alimp = &p->p_rlimit[which]; 369 370 switch (which) { 371 372 case RLIMIT_DATA: 373 if (limp->rlim_cur > maxdmap) 374 limp->rlim_cur = maxdmap; 375 if (limp->rlim_max > maxdmap) 376 limp->rlim_max = maxdmap; 377 break; 378 379 case RLIMIT_STACK: 380 if (limp->rlim_cur > maxsmap) 381 limp->rlim_cur = maxsmap; 382 if (limp->rlim_max > maxsmap) 383 limp->rlim_max = maxsmap; 384 385 /* 386 * Return EINVAL if the new stack size limit is lower than 387 * current usage. Otherwise, the process would get SIGSEGV the 388 * moment it would try to access anything on its current stack. 389 * This conforms to SUSv2. 390 */ 391 if (btoc(limp->rlim_cur) < p->p_vmspace->vm_ssize || 392 btoc(limp->rlim_max) < p->p_vmspace->vm_ssize) { 393 return EINVAL; 394 } 395 396 /* 397 * Stack is allocated to the max at exec time with 398 * only "rlim_cur" bytes accessible (In other words, 399 * allocates stack dividing two contiguous regions at 400 * "rlim_cur" bytes boundary). 401 * 402 * Since allocation is done in terms of page, roundup 403 * "rlim_cur" (otherwise, contiguous regions 404 * overlap). If stack limit is going up make more 405 * accessible, if going down make inaccessible. 406 */ 407 limp->rlim_max = round_page(limp->rlim_max); 408 limp->rlim_cur = round_page(limp->rlim_cur); 409 if (limp->rlim_cur != alimp->rlim_cur) { 410 vaddr_t addr; 411 vsize_t size; 412 vm_prot_t prot; 413 char *base, *tmp; 414 415 base = p->p_vmspace->vm_minsaddr; 416 if (limp->rlim_cur > alimp->rlim_cur) { 417 prot = VM_PROT_READ | VM_PROT_WRITE; 418 size = limp->rlim_cur - alimp->rlim_cur; 419 tmp = STACK_GROW(base, alimp->rlim_cur); 420 } else { 421 prot = VM_PROT_NONE; 422 size = alimp->rlim_cur - limp->rlim_cur; 423 tmp = STACK_GROW(base, limp->rlim_cur); 424 } 425 addr = (vaddr_t)STACK_ALLOC(tmp, size); 426 (void) uvm_map_protect(&p->p_vmspace->vm_map, 427 addr, addr + size, prot, false); 428 } 429 break; 430 431 case RLIMIT_NOFILE: 432 if (limp->rlim_cur > maxfiles) 433 limp->rlim_cur = maxfiles; 434 if (limp->rlim_max > maxfiles) 435 limp->rlim_max = maxfiles; 436 break; 437 438 case RLIMIT_NPROC: 439 if (limp->rlim_cur > maxproc) 440 limp->rlim_cur = maxproc; 441 if (limp->rlim_max > maxproc) 442 limp->rlim_max = maxproc; 443 break; 444 445 case RLIMIT_NTHR: 446 if (limp->rlim_cur > maxlwp) 447 limp->rlim_cur = maxlwp; 448 if (limp->rlim_max > maxlwp) 449 limp->rlim_max = maxlwp; 450 break; 451 } 452 453 mutex_enter(&p->p_limit->pl_lock); 454 *alimp = *limp; 455 mutex_exit(&p->p_limit->pl_lock); 456 return 0; 457 } 458 459 int 460 sys_getrlimit(struct lwp *l, const struct sys_getrlimit_args *uap, 461 register_t *retval) 462 { 463 /* { 464 syscallarg(int) which; 465 syscallarg(struct rlimit *) rlp; 466 } */ 467 struct proc *p = l->l_proc; 468 int which = SCARG(uap, which); 469 struct rlimit rl; 470 471 if ((u_int)which >= RLIM_NLIMITS) 472 return EINVAL; 473 474 mutex_enter(p->p_lock); 475 memcpy(&rl, &p->p_rlimit[which], sizeof(rl)); 476 mutex_exit(p->p_lock); 477 478 return copyout(&rl, SCARG(uap, rlp), sizeof(rl)); 479 } 480 481 void 482 addrulwp(struct lwp *l, struct bintime *tm) 483 { 484 485 lwp_lock(l); 486 bintime_add(tm, &l->l_rtime); 487 if ((l->l_pflag & LP_RUNNING) != 0 && 488 (l->l_pflag & (LP_INTR | LP_TIMEINTR)) != LP_INTR) { 489 struct bintime diff; 490 /* 491 * Adjust for the current time slice. This is 492 * actually fairly important since the error 493 * here is on the order of a time quantum, 494 * which is much greater than the sampling 495 * error. 496 */ 497 binuptime(&diff); 498 membar_consumer(); /* for softint_dispatch() */ 499 bintime_sub(&diff, &l->l_stime); 500 bintime_add(tm, &diff); 501 } 502 lwp_unlock(l); 503 } 504 505 /* 506 * Transform the running time and tick information in proc p into user, 507 * system, and interrupt time usage. 508 * 509 * Should be called with p->p_lock held unless called from exit1(). 510 */ 511 void 512 calcru(struct proc *p, struct timeval *up, struct timeval *sp, 513 struct timeval *ip, struct timeval *rp) 514 { 515 uint64_t u, st, ut, it, tot, dt; 516 struct lwp *l; 517 struct bintime tm; 518 struct timeval tv; 519 520 KASSERT(p->p_stat == SDEAD || mutex_owned(p->p_lock)); 521 522 mutex_spin_enter(&p->p_stmutex); 523 st = p->p_sticks; 524 ut = p->p_uticks; 525 it = p->p_iticks; 526 mutex_spin_exit(&p->p_stmutex); 527 528 tm = p->p_rtime; 529 530 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 531 addrulwp(l, &tm); 532 } 533 534 tot = st + ut + it; 535 bintime2timeval(&tm, &tv); 536 u = (uint64_t)tv.tv_sec * 1000000ul + tv.tv_usec; 537 538 if (tot == 0) { 539 /* No ticks, so can't use to share time out, split 50-50 */ 540 st = ut = u / 2; 541 } else { 542 st = (u * st) / tot; 543 ut = (u * ut) / tot; 544 } 545 546 /* 547 * Try to avoid lying to the users (too much) 548 * 549 * Of course, user/sys time are based on sampling (ie: statistics) 550 * so that would be impossible, but convincing the mark 551 * that we have used less ?time this call than we had 552 * last time, is beyond reasonable... (the con fails!) 553 * 554 * Note that since actual used time cannot decrease, either 555 * utime or stime (or both) must be greater now than last time 556 * (or both the same) - if one seems to have decreased, hold 557 * it constant and steal the necessary bump from the other 558 * which must have increased. 559 */ 560 if (p->p_xutime > ut) { 561 dt = p->p_xutime - ut; 562 st -= uimin(dt, st); 563 ut = p->p_xutime; 564 } else if (p->p_xstime > st) { 565 dt = p->p_xstime - st; 566 ut -= uimin(dt, ut); 567 st = p->p_xstime; 568 } 569 570 if (sp != NULL) { 571 p->p_xstime = st; 572 sp->tv_sec = st / 1000000; 573 sp->tv_usec = st % 1000000; 574 } 575 if (up != NULL) { 576 p->p_xutime = ut; 577 up->tv_sec = ut / 1000000; 578 up->tv_usec = ut % 1000000; 579 } 580 if (ip != NULL) { 581 if (it != 0) /* it != 0 --> tot != 0 */ 582 it = (u * it) / tot; 583 ip->tv_sec = it / 1000000; 584 ip->tv_usec = it % 1000000; 585 } 586 if (rp != NULL) { 587 *rp = tv; 588 } 589 } 590 591 int 592 sys___getrusage50(struct lwp *l, const struct sys___getrusage50_args *uap, 593 register_t *retval) 594 { 595 /* { 596 syscallarg(int) who; 597 syscallarg(struct rusage *) rusage; 598 } */ 599 int error; 600 struct rusage ru; 601 struct proc *p = l->l_proc; 602 603 error = getrusage1(p, SCARG(uap, who), &ru); 604 if (error != 0) 605 return error; 606 607 return copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 608 } 609 610 int 611 getrusage1(struct proc *p, int who, struct rusage *ru) 612 { 613 614 switch (who) { 615 case RUSAGE_SELF: 616 mutex_enter(p->p_lock); 617 ruspace(p); 618 memcpy(ru, &p->p_stats->p_ru, sizeof(*ru)); 619 calcru(p, &ru->ru_utime, &ru->ru_stime, NULL, NULL); 620 rulwps(p, ru); 621 mutex_exit(p->p_lock); 622 break; 623 case RUSAGE_CHILDREN: 624 mutex_enter(p->p_lock); 625 memcpy(ru, &p->p_stats->p_cru, sizeof(*ru)); 626 mutex_exit(p->p_lock); 627 break; 628 default: 629 return EINVAL; 630 } 631 632 return 0; 633 } 634 635 void 636 ruspace(struct proc *p) 637 { 638 struct vmspace *vm = p->p_vmspace; 639 struct rusage *ru = &p->p_stats->p_ru; 640 641 ru->ru_ixrss = vm->vm_tsize << (PAGE_SHIFT - 10); 642 ru->ru_idrss = vm->vm_dsize << (PAGE_SHIFT - 10); 643 ru->ru_isrss = vm->vm_ssize << (PAGE_SHIFT - 10); 644 #ifdef __HAVE_NO_PMAP_STATS 645 /* We don't keep track of the max so we get the current */ 646 ru->ru_maxrss = vm_resident_count(vm) << (PAGE_SHIFT - 10); 647 #else 648 ru->ru_maxrss = vm->vm_rssmax << (PAGE_SHIFT - 10); 649 #endif 650 } 651 652 void 653 ruadd(struct rusage *ru, struct rusage *ru2) 654 { 655 long *ip, *ip2; 656 int i; 657 658 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); 659 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); 660 if (ru->ru_maxrss < ru2->ru_maxrss) 661 ru->ru_maxrss = ru2->ru_maxrss; 662 ip = &ru->ru_first; ip2 = &ru2->ru_first; 663 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 664 *ip++ += *ip2++; 665 } 666 667 void 668 rulwps(proc_t *p, struct rusage *ru) 669 { 670 lwp_t *l; 671 672 KASSERT(mutex_owned(p->p_lock)); 673 674 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 675 ruadd(ru, &l->l_ru); 676 ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw); 677 ru->ru_nivcsw += l->l_nivcsw; 678 } 679 } 680 681 /* 682 * lim_copy: make a copy of the plimit structure. 683 * 684 * We use copy-on-write after fork, and copy when a limit is changed. 685 */ 686 struct plimit * 687 lim_copy(struct plimit *lim) 688 { 689 struct plimit *newlim; 690 char *corename; 691 size_t alen, len; 692 693 newlim = pool_cache_get(plimit_cache, PR_WAITOK); 694 mutex_init(&newlim->pl_lock, MUTEX_DEFAULT, IPL_NONE); 695 newlim->pl_writeable = false; 696 newlim->pl_refcnt = 1; 697 newlim->pl_sv_limit = NULL; 698 699 mutex_enter(&lim->pl_lock); 700 memcpy(newlim->pl_rlimit, lim->pl_rlimit, 701 sizeof(struct rlimit) * RLIM_NLIMITS); 702 703 /* 704 * Note: the common case is a use of default core name. 705 */ 706 alen = 0; 707 corename = NULL; 708 for (;;) { 709 if (lim->pl_corename == defcorename) { 710 newlim->pl_corename = defcorename; 711 newlim->pl_cnlen = 0; 712 break; 713 } 714 len = lim->pl_cnlen; 715 if (len == alen) { 716 newlim->pl_corename = corename; 717 newlim->pl_cnlen = len; 718 memcpy(corename, lim->pl_corename, len); 719 corename = NULL; 720 break; 721 } 722 mutex_exit(&lim->pl_lock); 723 if (corename) { 724 kmem_free(corename, alen); 725 } 726 alen = len; 727 corename = kmem_alloc(alen, KM_SLEEP); 728 mutex_enter(&lim->pl_lock); 729 } 730 mutex_exit(&lim->pl_lock); 731 732 if (corename) { 733 kmem_free(corename, alen); 734 } 735 return newlim; 736 } 737 738 void 739 lim_addref(struct plimit *lim) 740 { 741 atomic_inc_uint(&lim->pl_refcnt); 742 } 743 744 /* 745 * lim_privatise: give a process its own private plimit structure. 746 */ 747 void 748 lim_privatise(proc_t *p) 749 { 750 struct plimit *lim = p->p_limit, *newlim; 751 752 if (lim->pl_writeable) { 753 return; 754 } 755 756 newlim = lim_copy(lim); 757 758 mutex_enter(p->p_lock); 759 if (p->p_limit->pl_writeable) { 760 /* Other thread won the race. */ 761 mutex_exit(p->p_lock); 762 lim_free(newlim); 763 return; 764 } 765 766 /* 767 * Since p->p_limit can be accessed without locked held, 768 * old limit structure must not be deleted yet. 769 */ 770 newlim->pl_sv_limit = p->p_limit; 771 newlim->pl_writeable = true; 772 p->p_limit = newlim; 773 mutex_exit(p->p_lock); 774 } 775 776 void 777 lim_setcorename(proc_t *p, char *name, size_t len) 778 { 779 struct plimit *lim; 780 char *oname; 781 size_t olen; 782 783 lim_privatise(p); 784 lim = p->p_limit; 785 786 mutex_enter(&lim->pl_lock); 787 oname = lim->pl_corename; 788 olen = lim->pl_cnlen; 789 lim->pl_corename = name; 790 lim->pl_cnlen = len; 791 mutex_exit(&lim->pl_lock); 792 793 if (oname != defcorename) { 794 kmem_free(oname, olen); 795 } 796 } 797 798 void 799 lim_free(struct plimit *lim) 800 { 801 struct plimit *sv_lim; 802 803 do { 804 membar_release(); 805 if (atomic_dec_uint_nv(&lim->pl_refcnt) > 0) { 806 return; 807 } 808 membar_acquire(); 809 if (lim->pl_corename != defcorename) { 810 kmem_free(lim->pl_corename, lim->pl_cnlen); 811 } 812 sv_lim = lim->pl_sv_limit; 813 mutex_destroy(&lim->pl_lock); 814 pool_cache_put(plimit_cache, lim); 815 } while ((lim = sv_lim) != NULL); 816 } 817 818 struct pstats * 819 pstatscopy(struct pstats *ps) 820 { 821 struct pstats *nps; 822 size_t len; 823 824 nps = pool_cache_get(pstats_cache, PR_WAITOK); 825 826 len = (char *)&nps->pstat_endzero - (char *)&nps->pstat_startzero; 827 memset(&nps->pstat_startzero, 0, len); 828 829 len = (char *)&nps->pstat_endcopy - (char *)&nps->pstat_startcopy; 830 memcpy(&nps->pstat_startcopy, &ps->pstat_startcopy, len); 831 832 return nps; 833 } 834 835 void 836 pstatsfree(struct pstats *ps) 837 { 838 839 pool_cache_put(pstats_cache, ps); 840 } 841 842 /* 843 * sysctl_proc_findproc: a routine for sysctl proc subtree helpers that 844 * need to pick a valid process by PID. 845 * 846 * => Hold a reference on the process, on success. 847 */ 848 static int 849 sysctl_proc_findproc(lwp_t *l, pid_t pid, proc_t **p2) 850 { 851 proc_t *p; 852 int error; 853 854 if (pid == PROC_CURPROC) { 855 p = l->l_proc; 856 } else { 857 mutex_enter(&proc_lock); 858 p = proc_find(pid); 859 if (p == NULL) { 860 mutex_exit(&proc_lock); 861 return ESRCH; 862 } 863 } 864 error = rw_tryenter(&p->p_reflock, RW_READER) ? 0 : EBUSY; 865 if (pid != PROC_CURPROC) { 866 mutex_exit(&proc_lock); 867 } 868 *p2 = p; 869 return error; 870 } 871 872 /* 873 * sysctl_proc_paxflags: helper routine to get process's paxctl flags 874 */ 875 static int 876 sysctl_proc_paxflags(SYSCTLFN_ARGS) 877 { 878 struct proc *p; 879 struct sysctlnode node; 880 int paxflags; 881 int error; 882 883 /* First, validate the request. */ 884 if (namelen != 0 || name[-1] != PROC_PID_PAXFLAGS) 885 return EINVAL; 886 887 /* Find the process. Hold a reference (p_reflock), if found. */ 888 error = sysctl_proc_findproc(l, (pid_t)name[-2], &p); 889 if (error) 890 return error; 891 892 /* XXX-elad */ 893 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, p, 894 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); 895 if (error) { 896 rw_exit(&p->p_reflock); 897 return error; 898 } 899 900 /* Retrieve the limits. */ 901 node = *rnode; 902 paxflags = p->p_pax; 903 node.sysctl_data = &paxflags; 904 905 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 906 907 /* If attempting to write new value, it's an error */ 908 if (error == 0 && newp != NULL) 909 error = EACCES; 910 911 rw_exit(&p->p_reflock); 912 return error; 913 } 914 915 /* 916 * sysctl_proc_corename: helper routine to get or set the core file name 917 * for a process specified by PID. 918 */ 919 static int 920 sysctl_proc_corename(SYSCTLFN_ARGS) 921 { 922 struct proc *p; 923 struct plimit *lim; 924 char *cnbuf, *cname; 925 struct sysctlnode node; 926 size_t len; 927 int error; 928 929 /* First, validate the request. */ 930 if (namelen != 0 || name[-1] != PROC_PID_CORENAME) 931 return EINVAL; 932 933 /* Find the process. Hold a reference (p_reflock), if found. */ 934 error = sysctl_proc_findproc(l, (pid_t)name[-2], &p); 935 if (error) 936 return error; 937 938 /* XXX-elad */ 939 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, p, 940 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); 941 if (error) { 942 rw_exit(&p->p_reflock); 943 return error; 944 } 945 946 cnbuf = PNBUF_GET(); 947 948 if (oldp) { 949 /* Get case: copy the core name into the buffer. */ 950 error = kauth_authorize_process(l->l_cred, 951 KAUTH_PROCESS_CORENAME, p, 952 KAUTH_ARG(KAUTH_REQ_PROCESS_CORENAME_GET), NULL, NULL); 953 if (error) { 954 goto done; 955 } 956 lim = p->p_limit; 957 mutex_enter(&lim->pl_lock); 958 strlcpy(cnbuf, lim->pl_corename, MAXPATHLEN); 959 mutex_exit(&lim->pl_lock); 960 } 961 962 node = *rnode; 963 node.sysctl_data = cnbuf; 964 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 965 966 /* Return if error, or if caller is only getting the core name. */ 967 if (error || newp == NULL) { 968 goto done; 969 } 970 971 /* 972 * Set case. Check permission and then validate new core name. 973 * It must be either "core", "/core", or end in ".core". 974 */ 975 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CORENAME, 976 p, KAUTH_ARG(KAUTH_REQ_PROCESS_CORENAME_SET), cnbuf, NULL); 977 if (error) { 978 goto done; 979 } 980 len = strlen(cnbuf); 981 if ((len < 4 || strcmp(cnbuf + len - 4, "core") != 0) || 982 (len > 4 && cnbuf[len - 5] != '/' && cnbuf[len - 5] != '.')) { 983 error = EINVAL; 984 goto done; 985 } 986 987 /* Allocate, copy and set the new core name for plimit structure. */ 988 cname = kmem_alloc(++len, KM_NOSLEEP); 989 if (cname == NULL) { 990 error = ENOMEM; 991 goto done; 992 } 993 memcpy(cname, cnbuf, len); 994 lim_setcorename(p, cname, len); 995 done: 996 rw_exit(&p->p_reflock); 997 PNBUF_PUT(cnbuf); 998 return error; 999 } 1000 1001 /* 1002 * sysctl_proc_stop: helper routine for checking/setting the stop flags. 1003 */ 1004 static int 1005 sysctl_proc_stop(SYSCTLFN_ARGS) 1006 { 1007 struct proc *p; 1008 int isset, flag, error = 0; 1009 struct sysctlnode node; 1010 1011 if (namelen != 0) 1012 return EINVAL; 1013 1014 /* Find the process. Hold a reference (p_reflock), if found. */ 1015 error = sysctl_proc_findproc(l, (pid_t)name[-2], &p); 1016 if (error) 1017 return error; 1018 1019 /* XXX-elad */ 1020 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, p, 1021 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); 1022 if (error) { 1023 goto out; 1024 } 1025 1026 /* Determine the flag. */ 1027 switch (rnode->sysctl_num) { 1028 case PROC_PID_STOPFORK: 1029 flag = PS_STOPFORK; 1030 break; 1031 case PROC_PID_STOPEXEC: 1032 flag = PS_STOPEXEC; 1033 break; 1034 case PROC_PID_STOPEXIT: 1035 flag = PS_STOPEXIT; 1036 break; 1037 default: 1038 error = EINVAL; 1039 goto out; 1040 } 1041 isset = (p->p_flag & flag) ? 1 : 0; 1042 node = *rnode; 1043 node.sysctl_data = &isset; 1044 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1045 1046 /* Return if error, or if callers is only getting the flag. */ 1047 if (error || newp == NULL) { 1048 goto out; 1049 } 1050 1051 /* Check if caller can set the flags. */ 1052 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_STOPFLAG, 1053 p, KAUTH_ARG(flag), NULL, NULL); 1054 if (error) { 1055 goto out; 1056 } 1057 mutex_enter(p->p_lock); 1058 if (isset) { 1059 p->p_sflag |= flag; 1060 } else { 1061 p->p_sflag &= ~flag; 1062 } 1063 mutex_exit(p->p_lock); 1064 out: 1065 rw_exit(&p->p_reflock); 1066 return error; 1067 } 1068 1069 /* 1070 * sysctl_proc_plimit: helper routine to get/set rlimits of a process. 1071 */ 1072 static int 1073 sysctl_proc_plimit(SYSCTLFN_ARGS) 1074 { 1075 struct proc *p; 1076 u_int limitno; 1077 int which, error = 0; 1078 struct rlimit alim; 1079 struct sysctlnode node; 1080 1081 if (namelen != 0) 1082 return EINVAL; 1083 1084 which = name[-1]; 1085 if (which != PROC_PID_LIMIT_TYPE_SOFT && 1086 which != PROC_PID_LIMIT_TYPE_HARD) 1087 return EINVAL; 1088 1089 limitno = name[-2] - 1; 1090 if (limitno >= RLIM_NLIMITS) 1091 return EINVAL; 1092 1093 if (name[-3] != PROC_PID_LIMIT) 1094 return EINVAL; 1095 1096 /* Find the process. Hold a reference (p_reflock), if found. */ 1097 error = sysctl_proc_findproc(l, (pid_t)name[-4], &p); 1098 if (error) 1099 return error; 1100 1101 /* XXX-elad */ 1102 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, p, 1103 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); 1104 if (error) 1105 goto out; 1106 1107 /* Check if caller can retrieve the limits. */ 1108 if (newp == NULL) { 1109 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RLIMIT, 1110 p, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_GET), &alim, 1111 KAUTH_ARG(which)); 1112 if (error) 1113 goto out; 1114 } 1115 1116 /* Retrieve the limits. */ 1117 node = *rnode; 1118 memcpy(&alim, &p->p_rlimit[limitno], sizeof(alim)); 1119 if (which == PROC_PID_LIMIT_TYPE_HARD) { 1120 node.sysctl_data = &alim.rlim_max; 1121 } else { 1122 node.sysctl_data = &alim.rlim_cur; 1123 } 1124 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1125 1126 /* Return if error, or if we are only retrieving the limits. */ 1127 if (error || newp == NULL) { 1128 goto out; 1129 } 1130 error = dosetrlimit(l, p, limitno, &alim); 1131 out: 1132 rw_exit(&p->p_reflock); 1133 return error; 1134 } 1135 1136 /* 1137 * Setup sysctl nodes. 1138 */ 1139 static void 1140 sysctl_proc_setup(void) 1141 { 1142 1143 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1144 CTLFLAG_PERMANENT|CTLFLAG_ANYNUMBER, 1145 CTLTYPE_NODE, "curproc", 1146 SYSCTL_DESCR("Per-process settings"), 1147 NULL, 0, NULL, 0, 1148 CTL_PROC, PROC_CURPROC, CTL_EOL); 1149 1150 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1151 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 1152 CTLTYPE_INT, "paxflags", 1153 SYSCTL_DESCR("Process PAX control flags"), 1154 sysctl_proc_paxflags, 0, NULL, 0, 1155 CTL_PROC, PROC_CURPROC, PROC_PID_PAXFLAGS, CTL_EOL); 1156 1157 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1158 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 1159 CTLTYPE_STRING, "corename", 1160 SYSCTL_DESCR("Core file name"), 1161 sysctl_proc_corename, 0, NULL, MAXPATHLEN, 1162 CTL_PROC, PROC_CURPROC, PROC_PID_CORENAME, CTL_EOL); 1163 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1164 CTLFLAG_PERMANENT, 1165 CTLTYPE_NODE, "rlimit", 1166 SYSCTL_DESCR("Process limits"), 1167 NULL, 0, NULL, 0, 1168 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, CTL_EOL); 1169 1170 #define create_proc_plimit(s, n) do { \ 1171 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, \ 1172 CTLFLAG_PERMANENT, \ 1173 CTLTYPE_NODE, s, \ 1174 SYSCTL_DESCR("Process " s " limits"), \ 1175 NULL, 0, NULL, 0, \ 1176 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 1177 CTL_EOL); \ 1178 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, \ 1179 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \ 1180 CTLTYPE_QUAD, "soft", \ 1181 SYSCTL_DESCR("Process soft " s " limit"), \ 1182 sysctl_proc_plimit, 0, NULL, 0, \ 1183 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 1184 PROC_PID_LIMIT_TYPE_SOFT, CTL_EOL); \ 1185 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, \ 1186 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \ 1187 CTLTYPE_QUAD, "hard", \ 1188 SYSCTL_DESCR("Process hard " s " limit"), \ 1189 sysctl_proc_plimit, 0, NULL, 0, \ 1190 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 1191 PROC_PID_LIMIT_TYPE_HARD, CTL_EOL); \ 1192 } while (0/*CONSTCOND*/) 1193 1194 create_proc_plimit("cputime", PROC_PID_LIMIT_CPU); 1195 create_proc_plimit("filesize", PROC_PID_LIMIT_FSIZE); 1196 create_proc_plimit("datasize", PROC_PID_LIMIT_DATA); 1197 create_proc_plimit("stacksize", PROC_PID_LIMIT_STACK); 1198 create_proc_plimit("coredumpsize", PROC_PID_LIMIT_CORE); 1199 create_proc_plimit("memoryuse", PROC_PID_LIMIT_RSS); 1200 create_proc_plimit("memorylocked", PROC_PID_LIMIT_MEMLOCK); 1201 create_proc_plimit("maxproc", PROC_PID_LIMIT_NPROC); 1202 create_proc_plimit("descriptors", PROC_PID_LIMIT_NOFILE); 1203 create_proc_plimit("sbsize", PROC_PID_LIMIT_SBSIZE); 1204 create_proc_plimit("vmemoryuse", PROC_PID_LIMIT_AS); 1205 create_proc_plimit("maxlwp", PROC_PID_LIMIT_NTHR); 1206 1207 #undef create_proc_plimit 1208 1209 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1210 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 1211 CTLTYPE_INT, "stopfork", 1212 SYSCTL_DESCR("Stop process at fork(2)"), 1213 sysctl_proc_stop, 0, NULL, 0, 1214 CTL_PROC, PROC_CURPROC, PROC_PID_STOPFORK, CTL_EOL); 1215 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1216 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 1217 CTLTYPE_INT, "stopexec", 1218 SYSCTL_DESCR("Stop process at execve(2)"), 1219 sysctl_proc_stop, 0, NULL, 0, 1220 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXEC, CTL_EOL); 1221 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1222 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 1223 CTLTYPE_INT, "stopexit", 1224 SYSCTL_DESCR("Stop process before completing exit"), 1225 sysctl_proc_stop, 0, NULL, 0, 1226 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXIT, CTL_EOL); 1227 } 1228