1 /* $OpenBSD: kern_resource.c,v 1.88 2024/08/20 13:29:25 mvs Exp $ */ 2 /* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */ 3 4 /*- 5 * Copyright (c) 1982, 1986, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/file.h> 44 #include <sys/resourcevar.h> 45 #include <sys/pool.h> 46 #include <sys/proc.h> 47 #include <sys/ktrace.h> 48 #include <sys/sched.h> 49 #include <sys/signalvar.h> 50 51 #include <sys/mount.h> 52 #include <sys/syscallargs.h> 53 54 #include <uvm/uvm_extern.h> 55 #include <uvm/uvm.h> 56 57 /* Resource usage check interval in msec */ 58 #define RUCHECK_INTERVAL 1000 59 60 /* SIGXCPU interval in seconds of process runtime */ 61 #define SIGXCPU_INTERVAL 5 62 63 struct plimit *lim_copy(struct plimit *); 64 struct plimit *lim_write_begin(void); 65 void lim_write_commit(struct plimit *); 66 67 void tuagg_sumup(struct tusage *, const struct tusage *); 68 69 /* 70 * Patchable maximum data and stack limits. 71 */ 72 rlim_t maxdmap = MAXDSIZ; 73 rlim_t maxsmap = MAXSSIZ; 74 75 /* 76 * Serializes resource limit updates. 77 * This lock has to be held together with ps_mtx when updating 78 * the process' ps_limit. 79 */ 80 struct rwlock rlimit_lock = RWLOCK_INITIALIZER("rlimitlk"); 81 82 /* 83 * Resource controls and accounting. 84 */ 85 86 int 87 sys_getpriority(struct proc *curp, void *v, register_t *retval) 88 { 89 struct sys_getpriority_args /* { 90 syscallarg(int) which; 91 syscallarg(id_t) who; 92 } */ *uap = v; 93 struct process *pr; 94 int low = NZERO + PRIO_MAX + 1; 95 96 switch (SCARG(uap, which)) { 97 98 case PRIO_PROCESS: 99 if (SCARG(uap, who) == 0) 100 pr = curp->p_p; 101 else 102 pr = prfind(SCARG(uap, who)); 103 if (pr == NULL) 104 break; 105 if (pr->ps_nice < low) 106 low = pr->ps_nice; 107 break; 108 109 case PRIO_PGRP: { 110 struct pgrp *pg; 111 112 if (SCARG(uap, who) == 0) 113 pg = curp->p_p->ps_pgrp; 114 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 115 break; 116 LIST_FOREACH(pr, &pg->pg_members, ps_pglist) 117 if (pr->ps_nice < low) 118 low = pr->ps_nice; 119 break; 120 } 121 122 case PRIO_USER: 123 if (SCARG(uap, who) == 0) 124 SCARG(uap, who) = curp->p_ucred->cr_uid; 125 LIST_FOREACH(pr, &allprocess, ps_list) 126 if (pr->ps_ucred->cr_uid == SCARG(uap, who) && 127 pr->ps_nice < low) 128 low = pr->ps_nice; 129 break; 130 131 default: 132 return (EINVAL); 133 } 134 if (low == NZERO + PRIO_MAX + 1) 135 return (ESRCH); 136 *retval = low - NZERO; 137 return (0); 138 } 139 140 int 141 sys_setpriority(struct proc *curp, void *v, register_t *retval) 142 { 143 struct sys_setpriority_args /* { 144 syscallarg(int) which; 145 syscallarg(id_t) who; 146 syscallarg(int) prio; 147 } */ *uap = v; 148 struct process *pr; 149 int found = 0, error = 0; 150 151 switch (SCARG(uap, which)) { 152 153 case PRIO_PROCESS: 154 if (SCARG(uap, who) == 0) 155 pr = curp->p_p; 156 else 157 pr = prfind(SCARG(uap, who)); 158 if (pr == NULL) 159 break; 160 error = donice(curp, pr, SCARG(uap, prio)); 161 found = 1; 162 break; 163 164 case PRIO_PGRP: { 165 struct pgrp *pg; 166 167 if (SCARG(uap, who) == 0) 168 pg = curp->p_p->ps_pgrp; 169 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 170 break; 171 LIST_FOREACH(pr, &pg->pg_members, ps_pglist) { 172 error = donice(curp, pr, SCARG(uap, prio)); 173 found = 1; 174 } 175 break; 176 } 177 178 case PRIO_USER: 179 if (SCARG(uap, who) == 0) 180 SCARG(uap, who) = curp->p_ucred->cr_uid; 181 LIST_FOREACH(pr, &allprocess, ps_list) 182 if (pr->ps_ucred->cr_uid == SCARG(uap, who)) { 183 error = donice(curp, pr, SCARG(uap, prio)); 184 found = 1; 185 } 186 break; 187 188 default: 189 return (EINVAL); 190 } 191 if (!found) 192 return (ESRCH); 193 return (error); 194 } 195 196 int 197 donice(struct proc *curp, struct process *chgpr, int n) 198 { 199 struct ucred *ucred = curp->p_ucred; 200 struct proc *p; 201 202 if (ucred->cr_uid != 0 && ucred->cr_ruid != 0 && 203 ucred->cr_uid != chgpr->ps_ucred->cr_uid && 204 ucred->cr_ruid != chgpr->ps_ucred->cr_uid) 205 return (EPERM); 206 if (n > PRIO_MAX) 207 n = PRIO_MAX; 208 if (n < PRIO_MIN) 209 n = PRIO_MIN; 210 n += NZERO; 211 if (n < chgpr->ps_nice && suser(curp)) 212 return (EACCES); 213 chgpr->ps_nice = n; 214 mtx_enter(&chgpr->ps_mtx); 215 SCHED_LOCK(); 216 TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) { 217 setpriority(p, p->p_estcpu, n); 218 } 219 SCHED_UNLOCK(); 220 mtx_leave(&chgpr->ps_mtx); 221 return (0); 222 } 223 224 int 225 sys_setrlimit(struct proc *p, void *v, register_t *retval) 226 { 227 struct sys_setrlimit_args /* { 228 syscallarg(int) which; 229 syscallarg(const struct rlimit *) rlp; 230 } */ *uap = v; 231 struct rlimit alim; 232 int error; 233 234 error = copyin((caddr_t)SCARG(uap, rlp), (caddr_t)&alim, 235 sizeof (struct rlimit)); 236 if (error) 237 return (error); 238 #ifdef KTRACE 239 if (KTRPOINT(p, KTR_STRUCT)) 240 ktrrlimit(p, &alim); 241 #endif 242 return (dosetrlimit(p, SCARG(uap, which), &alim)); 243 } 244 245 int 246 dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) 247 { 248 struct rlimit *alimp; 249 struct plimit *limit; 250 rlim_t maxlim; 251 int error; 252 253 if (which >= RLIM_NLIMITS || limp->rlim_cur > limp->rlim_max) 254 return (EINVAL); 255 256 rw_enter_write(&rlimit_lock); 257 258 alimp = &p->p_p->ps_limit->pl_rlimit[which]; 259 if (limp->rlim_max > alimp->rlim_max) { 260 if ((error = suser(p)) != 0) { 261 rw_exit_write(&rlimit_lock); 262 return (error); 263 } 264 } 265 266 /* Get exclusive write access to the limit structure. */ 267 limit = lim_write_begin(); 268 alimp = &limit->pl_rlimit[which]; 269 270 switch (which) { 271 case RLIMIT_DATA: 272 maxlim = maxdmap; 273 break; 274 case RLIMIT_STACK: 275 maxlim = maxsmap; 276 break; 277 case RLIMIT_NOFILE: 278 maxlim = atomic_load_int(&maxfiles); 279 break; 280 case RLIMIT_NPROC: 281 maxlim = atomic_load_int(&maxprocess); 282 break; 283 default: 284 maxlim = RLIM_INFINITY; 285 break; 286 } 287 288 if (limp->rlim_max > maxlim) 289 limp->rlim_max = maxlim; 290 if (limp->rlim_cur > limp->rlim_max) 291 limp->rlim_cur = limp->rlim_max; 292 293 if (which == RLIMIT_CPU && limp->rlim_cur != RLIM_INFINITY && 294 alimp->rlim_cur == RLIM_INFINITY) 295 timeout_add_msec(&p->p_p->ps_rucheck_to, RUCHECK_INTERVAL); 296 297 if (which == RLIMIT_STACK) { 298 /* 299 * Stack is allocated to the max at exec time with only 300 * "rlim_cur" bytes accessible. If stack limit is going 301 * up make more accessible, if going down make inaccessible. 302 */ 303 if (limp->rlim_cur != alimp->rlim_cur) { 304 vaddr_t addr; 305 vsize_t size; 306 vm_prot_t prot; 307 struct vmspace *vm = p->p_vmspace; 308 309 if (limp->rlim_cur > alimp->rlim_cur) { 310 prot = PROT_READ | PROT_WRITE; 311 size = limp->rlim_cur - alimp->rlim_cur; 312 #ifdef MACHINE_STACK_GROWS_UP 313 addr = (vaddr_t)vm->vm_maxsaddr + 314 alimp->rlim_cur; 315 #else 316 addr = (vaddr_t)vm->vm_minsaddr - 317 limp->rlim_cur; 318 #endif 319 } else { 320 prot = PROT_NONE; 321 size = alimp->rlim_cur - limp->rlim_cur; 322 #ifdef MACHINE_STACK_GROWS_UP 323 addr = (vaddr_t)vm->vm_maxsaddr + 324 limp->rlim_cur; 325 #else 326 addr = (vaddr_t)vm->vm_minsaddr - 327 alimp->rlim_cur; 328 #endif 329 } 330 addr = trunc_page(addr); 331 size = round_page(size); 332 KERNEL_LOCK(); 333 (void) uvm_map_protect(&vm->vm_map, addr, 334 addr+size, prot, UVM_ET_STACK, FALSE, FALSE); 335 KERNEL_UNLOCK(); 336 } 337 } 338 339 *alimp = *limp; 340 341 lim_write_commit(limit); 342 rw_exit_write(&rlimit_lock); 343 344 return (0); 345 } 346 347 int 348 sys_getrlimit(struct proc *p, void *v, register_t *retval) 349 { 350 struct sys_getrlimit_args /* { 351 syscallarg(int) which; 352 syscallarg(struct rlimit *) rlp; 353 } */ *uap = v; 354 struct plimit *limit; 355 struct rlimit alimp; 356 int error; 357 358 if (SCARG(uap, which) < 0 || SCARG(uap, which) >= RLIM_NLIMITS) 359 return (EINVAL); 360 limit = lim_read_enter(); 361 alimp = limit->pl_rlimit[SCARG(uap, which)]; 362 lim_read_leave(limit); 363 error = copyout(&alimp, SCARG(uap, rlp), sizeof(struct rlimit)); 364 #ifdef KTRACE 365 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) 366 ktrrlimit(p, &alimp); 367 #endif 368 return (error); 369 } 370 371 /* Add the counts from *from to *tu, ensuring a consistent read of *from. */ 372 void 373 tuagg_sumup(struct tusage *tu, const struct tusage *from) 374 { 375 struct tusage tmp; 376 uint64_t enter, leave; 377 378 enter = from->tu_gen; 379 for (;;) { 380 /* the generation number is odd during an update */ 381 while (enter & 1) { 382 CPU_BUSY_CYCLE(); 383 enter = from->tu_gen; 384 } 385 386 membar_consumer(); 387 tmp = *from; 388 membar_consumer(); 389 leave = from->tu_gen; 390 391 if (enter == leave) 392 break; 393 enter = leave; 394 } 395 396 tu->tu_uticks += tmp.tu_uticks; 397 tu->tu_sticks += tmp.tu_sticks; 398 tu->tu_iticks += tmp.tu_iticks; 399 timespecadd(&tu->tu_runtime, &tmp.tu_runtime, &tu->tu_runtime); 400 } 401 402 void 403 tuagg_get_proc(struct tusage *tu, struct proc *p) 404 { 405 memset(tu, 0, sizeof(*tu)); 406 tuagg_sumup(tu, &p->p_tu); 407 } 408 409 void 410 tuagg_get_process(struct tusage *tu, struct process *pr) 411 { 412 struct proc *q; 413 414 memset(tu, 0, sizeof(*tu)); 415 416 mtx_enter(&pr->ps_mtx); 417 tuagg_sumup(tu, &pr->ps_tu); 418 /* add on all living threads */ 419 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) 420 tuagg_sumup(tu, &q->p_tu); 421 mtx_leave(&pr->ps_mtx); 422 } 423 424 /* 425 * Update the process ps_tu usage with the values from proc p while 426 * doing so the times for proc p are reset. 427 * This requires that p is either curproc or SDEAD and that the 428 * IPL is higher than IPL_STATCLOCK. ps_mtx uses IPL_HIGH so 429 * this should always be the case. 430 */ 431 void 432 tuagg_add_process(struct process *pr, struct proc *p) 433 { 434 MUTEX_ASSERT_LOCKED(&pr->ps_mtx); 435 KASSERT(curproc == p || p->p_stat == SDEAD); 436 437 tu_enter(&pr->ps_tu); 438 tuagg_sumup(&pr->ps_tu, &p->p_tu); 439 tu_leave(&pr->ps_tu); 440 441 /* Now reset CPU time usage for the thread. */ 442 timespecclear(&p->p_tu.tu_runtime); 443 p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; 444 } 445 446 /* 447 * Transform the running time and tick information in a struct tusage 448 * into user, system, and interrupt time usage. 449 */ 450 void 451 calctsru(struct tusage *tup, struct timespec *up, struct timespec *sp, 452 struct timespec *ip) 453 { 454 u_quad_t st, ut, it; 455 456 st = tup->tu_sticks; 457 ut = tup->tu_uticks; 458 it = tup->tu_iticks; 459 460 if (st + ut + it == 0) { 461 timespecclear(up); 462 timespecclear(sp); 463 if (ip != NULL) 464 timespecclear(ip); 465 return; 466 } 467 468 st = st * 1000000000 / stathz; 469 sp->tv_sec = st / 1000000000; 470 sp->tv_nsec = st % 1000000000; 471 ut = ut * 1000000000 / stathz; 472 up->tv_sec = ut / 1000000000; 473 up->tv_nsec = ut % 1000000000; 474 if (ip != NULL) { 475 it = it * 1000000000 / stathz; 476 ip->tv_sec = it / 1000000000; 477 ip->tv_nsec = it % 1000000000; 478 } 479 } 480 481 void 482 calcru(struct tusage *tup, struct timeval *up, struct timeval *sp, 483 struct timeval *ip) 484 { 485 struct timespec u, s, i; 486 487 calctsru(tup, &u, &s, ip != NULL ? &i : NULL); 488 TIMESPEC_TO_TIMEVAL(up, &u); 489 TIMESPEC_TO_TIMEVAL(sp, &s); 490 if (ip != NULL) 491 TIMESPEC_TO_TIMEVAL(ip, &i); 492 } 493 494 int 495 sys_getrusage(struct proc *p, void *v, register_t *retval) 496 { 497 struct sys_getrusage_args /* { 498 syscallarg(int) who; 499 syscallarg(struct rusage *) rusage; 500 } */ *uap = v; 501 struct rusage ru; 502 int error; 503 504 error = dogetrusage(p, SCARG(uap, who), &ru); 505 if (error == 0) { 506 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 507 #ifdef KTRACE 508 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) 509 ktrrusage(p, &ru); 510 #endif 511 } 512 return (error); 513 } 514 515 int 516 dogetrusage(struct proc *p, int who, struct rusage *rup) 517 { 518 struct process *pr = p->p_p; 519 struct proc *q; 520 struct tusage tu = { 0 }; 521 522 KERNEL_ASSERT_LOCKED(); 523 524 switch (who) { 525 case RUSAGE_SELF: 526 /* start with the sum of dead threads, if any */ 527 if (pr->ps_ru != NULL) 528 *rup = *pr->ps_ru; 529 else 530 memset(rup, 0, sizeof(*rup)); 531 tuagg_sumup(&tu, &pr->ps_tu); 532 533 /* add on all living threads */ 534 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 535 ruadd(rup, &q->p_ru); 536 tuagg_sumup(&tu, &q->p_tu); 537 } 538 539 calcru(&tu, &rup->ru_utime, &rup->ru_stime, NULL); 540 break; 541 542 case RUSAGE_THREAD: 543 *rup = p->p_ru; 544 calcru(&p->p_tu, &rup->ru_utime, &rup->ru_stime, NULL); 545 break; 546 547 case RUSAGE_CHILDREN: 548 *rup = pr->ps_cru; 549 break; 550 551 default: 552 return (EINVAL); 553 } 554 return (0); 555 } 556 557 void 558 ruadd(struct rusage *ru, struct rusage *ru2) 559 { 560 long *ip, *ip2; 561 int i; 562 563 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); 564 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); 565 if (ru->ru_maxrss < ru2->ru_maxrss) 566 ru->ru_maxrss = ru2->ru_maxrss; 567 ip = &ru->ru_first; ip2 = &ru2->ru_first; 568 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 569 *ip++ += *ip2++; 570 } 571 572 /* 573 * Check if the process exceeds its cpu resource allocation. 574 * If over max, kill it. 575 */ 576 void 577 rucheck(void *arg) 578 { 579 struct rlimit rlim; 580 struct process *pr = arg; 581 time_t runtime; 582 583 KERNEL_ASSERT_LOCKED(); 584 585 SCHED_LOCK(); 586 runtime = pr->ps_tu.tu_runtime.tv_sec; 587 SCHED_UNLOCK(); 588 589 mtx_enter(&pr->ps_mtx); 590 rlim = pr->ps_limit->pl_rlimit[RLIMIT_CPU]; 591 mtx_leave(&pr->ps_mtx); 592 593 if ((rlim_t)runtime >= rlim.rlim_cur) { 594 if ((rlim_t)runtime >= rlim.rlim_max) { 595 prsignal(pr, SIGKILL); 596 } else if (runtime >= pr->ps_nextxcpu) { 597 prsignal(pr, SIGXCPU); 598 pr->ps_nextxcpu = runtime + SIGXCPU_INTERVAL; 599 } 600 } 601 602 timeout_add_msec(&pr->ps_rucheck_to, RUCHECK_INTERVAL); 603 } 604 605 struct pool plimit_pool; 606 607 void 608 lim_startup(struct plimit *limit0) 609 { 610 rlim_t lim; 611 int i; 612 613 pool_init(&plimit_pool, sizeof(struct plimit), 0, IPL_MPFLOOR, 614 PR_WAITOK, "plimitpl", NULL); 615 616 for (i = 0; i < nitems(limit0->pl_rlimit); i++) 617 limit0->pl_rlimit[i].rlim_cur = 618 limit0->pl_rlimit[i].rlim_max = RLIM_INFINITY; 619 limit0->pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; 620 limit0->pl_rlimit[RLIMIT_NOFILE].rlim_max = MIN(NOFILE_MAX, 621 (maxfiles - NOFILE > NOFILE) ? maxfiles - NOFILE : NOFILE); 622 limit0->pl_rlimit[RLIMIT_NPROC].rlim_cur = MAXUPRC; 623 lim = ptoa(uvmexp.free); 624 limit0->pl_rlimit[RLIMIT_RSS].rlim_max = lim; 625 lim = ptoa(64*1024); /* Default to very low */ 626 limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim; 627 limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3; 628 refcnt_init(&limit0->pl_refcnt); 629 } 630 631 /* 632 * Make a copy of the plimit structure. 633 * We share these structures copy-on-write after fork, 634 * and copy when a limit is changed. 635 */ 636 struct plimit * 637 lim_copy(struct plimit *lim) 638 { 639 struct plimit *newlim; 640 641 newlim = pool_get(&plimit_pool, PR_WAITOK); 642 memcpy(newlim->pl_rlimit, lim->pl_rlimit, 643 sizeof(struct rlimit) * RLIM_NLIMITS); 644 refcnt_init(&newlim->pl_refcnt); 645 return (newlim); 646 } 647 648 void 649 lim_free(struct plimit *lim) 650 { 651 if (refcnt_rele(&lim->pl_refcnt) == 0) 652 return; 653 pool_put(&plimit_pool, lim); 654 } 655 656 void 657 lim_fork(struct process *parent, struct process *child) 658 { 659 struct plimit *limit; 660 661 mtx_enter(&parent->ps_mtx); 662 limit = parent->ps_limit; 663 refcnt_take(&limit->pl_refcnt); 664 mtx_leave(&parent->ps_mtx); 665 666 child->ps_limit = limit; 667 668 if (limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) 669 timeout_add_msec(&child->ps_rucheck_to, RUCHECK_INTERVAL); 670 } 671 672 /* 673 * Return an exclusive write reference to the process' resource limit structure. 674 * The caller has to release the structure by calling lim_write_commit(). 675 * 676 * This invalidates any plimit read reference held by the calling thread. 677 */ 678 struct plimit * 679 lim_write_begin(void) 680 { 681 struct plimit *limit; 682 struct proc *p = curproc; 683 684 rw_assert_wrlock(&rlimit_lock); 685 686 if (p->p_limit != NULL) 687 lim_free(p->p_limit); 688 p->p_limit = NULL; 689 690 /* 691 * It is safe to access ps_limit here without holding ps_mtx 692 * because rlimit_lock excludes other writers. 693 */ 694 695 limit = p->p_p->ps_limit; 696 if (P_HASSIBLING(p) || refcnt_shared(&limit->pl_refcnt)) 697 limit = lim_copy(limit); 698 699 return (limit); 700 } 701 702 /* 703 * Finish exclusive write access to the plimit structure. 704 * This makes the structure visible to other threads in the process. 705 */ 706 void 707 lim_write_commit(struct plimit *limit) 708 { 709 struct plimit *olimit; 710 struct proc *p = curproc; 711 712 rw_assert_wrlock(&rlimit_lock); 713 714 if (limit != p->p_p->ps_limit) { 715 mtx_enter(&p->p_p->ps_mtx); 716 olimit = p->p_p->ps_limit; 717 p->p_p->ps_limit = limit; 718 mtx_leave(&p->p_p->ps_mtx); 719 720 lim_free(olimit); 721 } 722 } 723 724 /* 725 * Begin read access to the process' resource limit structure. 726 * The access has to be finished by calling lim_read_leave(). 727 * 728 * Sections denoted by lim_read_enter() and lim_read_leave() cannot nest. 729 */ 730 struct plimit * 731 lim_read_enter(void) 732 { 733 struct plimit *limit; 734 struct proc *p = curproc; 735 struct process *pr = p->p_p; 736 737 /* 738 * This thread might not observe the latest value of ps_limit 739 * if another thread updated the limits very recently on another CPU. 740 * However, the anomaly should disappear quickly, especially if 741 * there is any synchronization activity between the threads (or 742 * the CPUs). 743 */ 744 745 limit = p->p_limit; 746 if (limit != pr->ps_limit) { 747 mtx_enter(&pr->ps_mtx); 748 limit = pr->ps_limit; 749 refcnt_take(&limit->pl_refcnt); 750 mtx_leave(&pr->ps_mtx); 751 if (p->p_limit != NULL) 752 lim_free(p->p_limit); 753 p->p_limit = limit; 754 } 755 KASSERT(limit != NULL); 756 return (limit); 757 } 758 759 /* 760 * Get the value of the resource limit in given process. 761 */ 762 rlim_t 763 lim_cur_proc(struct proc *p, int which) 764 { 765 struct process *pr = p->p_p; 766 rlim_t val; 767 768 KASSERT(which >= 0 && which < RLIM_NLIMITS); 769 770 mtx_enter(&pr->ps_mtx); 771 val = pr->ps_limit->pl_rlimit[which].rlim_cur; 772 mtx_leave(&pr->ps_mtx); 773 return (val); 774 } 775