1 /* $NetBSD: kern_resource.c,v 1.124 2007/11/06 00:42:42 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_resource.c 8.8 (Berkeley) 2/14/95 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.124 2007/11/06 00:42:42 ad Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/file.h> 46 #include <sys/resourcevar.h> 47 #include <sys/malloc.h> 48 #include <sys/namei.h> 49 #include <sys/pool.h> 50 #include <sys/proc.h> 51 #include <sys/sysctl.h> 52 #include <sys/kauth.h> 53 54 #include <sys/mount.h> 55 #include <sys/syscallargs.h> 56 57 #include <uvm/uvm_extern.h> 58 59 /* 60 * Maximum process data and stack limits. 61 * They are variables so they are patchable. 62 */ 63 rlim_t maxdmap = MAXDSIZ; 64 rlim_t maxsmap = MAXSSIZ; 65 66 struct uihashhead *uihashtbl; 67 u_long uihash; /* size of hash table - 1 */ 68 kmutex_t uihashtbl_lock; 69 70 /* 71 * Resource controls and accounting. 72 */ 73 74 int 75 sys_getpriority(struct lwp *l, void *v, register_t *retval) 76 { 77 struct sys_getpriority_args /* { 78 syscallarg(int) which; 79 syscallarg(id_t) who; 80 } */ *uap = v; 81 struct proc *curp = l->l_proc, *p; 82 int low = NZERO + PRIO_MAX + 1; 83 int who = SCARG(uap, who); 84 85 mutex_enter(&proclist_lock); 86 switch (SCARG(uap, which)) { 87 case PRIO_PROCESS: 88 if (who == 0) 89 p = curp; 90 else 91 p = p_find(who, PFIND_LOCKED); 92 if (p != NULL) 93 low = p->p_nice; 94 break; 95 96 case PRIO_PGRP: { 97 struct pgrp *pg; 98 99 if (who == 0) 100 pg = curp->p_pgrp; 101 else if ((pg = pg_find(who, PFIND_LOCKED)) == NULL) 102 break; 103 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 104 if (p->p_nice < low) 105 low = p->p_nice; 106 } 107 break; 108 } 109 110 case PRIO_USER: 111 if (who == 0) 112 who = (int)kauth_cred_geteuid(l->l_cred); 113 PROCLIST_FOREACH(p, &allproc) { 114 mutex_enter(&p->p_mutex); 115 if (kauth_cred_geteuid(p->p_cred) == 116 (uid_t)who && p->p_nice < low) 117 low = p->p_nice; 118 mutex_exit(&p->p_mutex); 119 } 120 break; 121 122 default: 123 mutex_exit(&proclist_lock); 124 return (EINVAL); 125 } 126 mutex_exit(&proclist_lock); 127 128 if (low == NZERO + PRIO_MAX + 1) 129 return (ESRCH); 130 *retval = low - NZERO; 131 return (0); 132 } 133 134 /* ARGSUSED */ 135 int 136 sys_setpriority(struct lwp *l, void *v, register_t *retval) 137 { 138 struct sys_setpriority_args /* { 139 syscallarg(int) which; 140 syscallarg(id_t) who; 141 syscallarg(int) prio; 142 } */ *uap = v; 143 struct proc *curp = l->l_proc, *p; 144 int found = 0, error = 0; 145 int who = SCARG(uap, who); 146 147 mutex_enter(&proclist_lock); 148 switch (SCARG(uap, which)) { 149 case PRIO_PROCESS: 150 if (who == 0) 151 p = curp; 152 else 153 p = p_find(who, PFIND_LOCKED); 154 if (p != 0) { 155 mutex_enter(&p->p_mutex); 156 error = donice(l, p, SCARG(uap, prio)); 157 mutex_exit(&p->p_mutex); 158 } 159 found++; 160 break; 161 162 case PRIO_PGRP: { 163 struct pgrp *pg; 164 165 if (who == 0) 166 pg = curp->p_pgrp; 167 else if ((pg = pg_find(who, PFIND_LOCKED)) == NULL) 168 break; 169 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 170 mutex_enter(&p->p_mutex); 171 error = donice(l, p, SCARG(uap, prio)); 172 mutex_exit(&p->p_mutex); 173 found++; 174 } 175 break; 176 } 177 178 case PRIO_USER: 179 if (who == 0) 180 who = (int)kauth_cred_geteuid(l->l_cred); 181 PROCLIST_FOREACH(p, &allproc) { 182 mutex_enter(&p->p_mutex); 183 if (kauth_cred_geteuid(p->p_cred) == 184 (uid_t)SCARG(uap, who)) { 185 error = donice(l, p, SCARG(uap, prio)); 186 found++; 187 } 188 mutex_exit(&p->p_mutex); 189 } 190 break; 191 192 default: 193 error = EINVAL; 194 break; 195 } 196 mutex_exit(&proclist_lock); 197 if (found == 0) 198 return (ESRCH); 199 return (error); 200 } 201 202 /* 203 * Renice a process. 204 * 205 * Call with the target process' credentials locked. 206 */ 207 int 208 donice(struct lwp *l, struct proc *chgp, int n) 209 { 210 kauth_cred_t cred = l->l_cred; 211 int onice; 212 213 KASSERT(mutex_owned(&chgp->p_mutex)); 214 215 if (n > PRIO_MAX) 216 n = PRIO_MAX; 217 if (n < PRIO_MIN) 218 n = PRIO_MIN; 219 n += NZERO; 220 onice = chgp->p_nice; 221 onice = chgp->p_nice; 222 223 again: 224 if (kauth_authorize_process(cred, KAUTH_PROCESS_NICE, chgp, 225 KAUTH_ARG(n), NULL, NULL)) 226 return (EACCES); 227 mutex_spin_enter(&chgp->p_smutex); 228 if (onice != chgp->p_nice) { 229 mutex_spin_exit(&chgp->p_smutex); 230 goto again; 231 } 232 sched_nice(chgp, n); 233 mutex_spin_exit(&chgp->p_smutex); 234 return (0); 235 } 236 237 /* ARGSUSED */ 238 int 239 sys_setrlimit(struct lwp *l, void *v, register_t *retval) 240 { 241 struct sys_setrlimit_args /* { 242 syscallarg(int) which; 243 syscallarg(const struct rlimit *) rlp; 244 } */ *uap = v; 245 int which = SCARG(uap, which); 246 struct rlimit alim; 247 int error; 248 249 error = copyin(SCARG(uap, rlp), &alim, sizeof(struct rlimit)); 250 if (error) 251 return (error); 252 return (dosetrlimit(l, l->l_proc, which, &alim)); 253 } 254 255 int 256 dosetrlimit(struct lwp *l, struct proc *p, int which, struct rlimit *limp) 257 { 258 struct rlimit *alimp; 259 int error; 260 261 if ((u_int)which >= RLIM_NLIMITS) 262 return (EINVAL); 263 264 if (limp->rlim_cur < 0 || limp->rlim_max < 0) 265 return (EINVAL); 266 267 if (limp->rlim_cur > limp->rlim_max) { 268 /* 269 * This is programming error. According to SUSv2, we should 270 * return error in this case. 271 */ 272 return (EINVAL); 273 } 274 275 alimp = &p->p_rlimit[which]; 276 /* if we don't change the value, no need to limcopy() */ 277 if (limp->rlim_cur == alimp->rlim_cur && 278 limp->rlim_max == alimp->rlim_max) 279 return 0; 280 281 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RLIMIT, 282 p, limp, KAUTH_ARG(which), NULL); 283 if (error) 284 return (error); 285 286 lim_privatise(p, false); 287 /* p->p_limit is now unchangeable */ 288 alimp = &p->p_rlimit[which]; 289 290 switch (which) { 291 292 case RLIMIT_DATA: 293 if (limp->rlim_cur > maxdmap) 294 limp->rlim_cur = maxdmap; 295 if (limp->rlim_max > maxdmap) 296 limp->rlim_max = maxdmap; 297 break; 298 299 case RLIMIT_STACK: 300 if (limp->rlim_cur > maxsmap) 301 limp->rlim_cur = maxsmap; 302 if (limp->rlim_max > maxsmap) 303 limp->rlim_max = maxsmap; 304 305 /* 306 * Return EINVAL if the new stack size limit is lower than 307 * current usage. Otherwise, the process would get SIGSEGV the 308 * moment it would try to access anything on it's current stack. 309 * This conforms to SUSv2. 310 */ 311 if (limp->rlim_cur < p->p_vmspace->vm_ssize * PAGE_SIZE 312 || limp->rlim_max < p->p_vmspace->vm_ssize * PAGE_SIZE) { 313 return (EINVAL); 314 } 315 316 /* 317 * Stack is allocated to the max at exec time with 318 * only "rlim_cur" bytes accessible (In other words, 319 * allocates stack dividing two contiguous regions at 320 * "rlim_cur" bytes boundary). 321 * 322 * Since allocation is done in terms of page, roundup 323 * "rlim_cur" (otherwise, contiguous regions 324 * overlap). If stack limit is going up make more 325 * accessible, if going down make inaccessible. 326 */ 327 limp->rlim_cur = round_page(limp->rlim_cur); 328 if (limp->rlim_cur != alimp->rlim_cur) { 329 vaddr_t addr; 330 vsize_t size; 331 vm_prot_t prot; 332 333 if (limp->rlim_cur > alimp->rlim_cur) { 334 prot = VM_PROT_READ | VM_PROT_WRITE; 335 size = limp->rlim_cur - alimp->rlim_cur; 336 addr = (vaddr_t)p->p_vmspace->vm_minsaddr - 337 limp->rlim_cur; 338 } else { 339 prot = VM_PROT_NONE; 340 size = alimp->rlim_cur - limp->rlim_cur; 341 addr = (vaddr_t)p->p_vmspace->vm_minsaddr - 342 alimp->rlim_cur; 343 } 344 (void) uvm_map_protect(&p->p_vmspace->vm_map, 345 addr, addr+size, prot, false); 346 } 347 break; 348 349 case RLIMIT_NOFILE: 350 if (limp->rlim_cur > maxfiles) 351 limp->rlim_cur = maxfiles; 352 if (limp->rlim_max > maxfiles) 353 limp->rlim_max = maxfiles; 354 break; 355 356 case RLIMIT_NPROC: 357 if (limp->rlim_cur > maxproc) 358 limp->rlim_cur = maxproc; 359 if (limp->rlim_max > maxproc) 360 limp->rlim_max = maxproc; 361 break; 362 } 363 364 mutex_enter(&p->p_limit->pl_lock); 365 *alimp = *limp; 366 mutex_exit(&p->p_limit->pl_lock); 367 return (0); 368 } 369 370 /* ARGSUSED */ 371 int 372 sys_getrlimit(struct lwp *l, void *v, register_t *retval) 373 { 374 struct sys_getrlimit_args /* { 375 syscallarg(int) which; 376 syscallarg(struct rlimit *) rlp; 377 } */ *uap = v; 378 struct proc *p = l->l_proc; 379 int which = SCARG(uap, which); 380 struct rlimit rl; 381 382 if ((u_int)which >= RLIM_NLIMITS) 383 return (EINVAL); 384 385 mutex_enter(&p->p_mutex); 386 memcpy(&rl, &p->p_rlimit[which], sizeof(rl)); 387 mutex_exit(&p->p_mutex); 388 389 return copyout(&rl, SCARG(uap, rlp), sizeof(rl)); 390 } 391 392 /* 393 * Transform the running time and tick information in proc p into user, 394 * system, and interrupt time usage. 395 * 396 * Should be called with p->p_smutex held unless called from exit1(). 397 */ 398 void 399 calcru(struct proc *p, struct timeval *up, struct timeval *sp, 400 struct timeval *ip, struct timeval *rp) 401 { 402 u_quad_t u, st, ut, it, tot; 403 unsigned long sec; 404 long usec; 405 struct timeval tv; 406 struct lwp *l; 407 408 mutex_spin_enter(&p->p_stmutex); 409 st = p->p_sticks; 410 ut = p->p_uticks; 411 it = p->p_iticks; 412 mutex_spin_exit(&p->p_stmutex); 413 414 sec = p->p_rtime.tv_sec; 415 usec = p->p_rtime.tv_usec; 416 417 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 418 lwp_lock(l); 419 sec += l->l_rtime.tv_sec; 420 if ((usec += l->l_rtime.tv_usec) >= 1000000) { 421 sec++; 422 usec -= 1000000; 423 } 424 if ((l->l_flag & LW_RUNNING) != 0) { 425 /* 426 * Adjust for the current time slice. This is 427 * actually fairly important since the error 428 * here is on the order of a time quantum, 429 * which is much greater than the sampling 430 * error. 431 */ 432 microtime(&tv); 433 sec += tv.tv_sec - l->l_stime.tv_sec; 434 usec += tv.tv_usec - l->l_stime.tv_usec; 435 if (usec >= 1000000) { 436 sec++; 437 usec -= 1000000; 438 } 439 } 440 lwp_unlock(l); 441 } 442 443 tot = st + ut + it; 444 u = sec * 1000000ull + usec; 445 446 if (tot == 0) { 447 /* No ticks, so can't use to share time out, split 50-50 */ 448 st = ut = u / 2; 449 } else { 450 st = (u * st) / tot; 451 ut = (u * ut) / tot; 452 } 453 if (sp != NULL) { 454 sp->tv_sec = st / 1000000; 455 sp->tv_usec = st % 1000000; 456 } 457 if (up != NULL) { 458 up->tv_sec = ut / 1000000; 459 up->tv_usec = ut % 1000000; 460 } 461 if (ip != NULL) { 462 if (it != 0) 463 it = (u * it) / tot; 464 ip->tv_sec = it / 1000000; 465 ip->tv_usec = it % 1000000; 466 } 467 if (rp != NULL) { 468 rp->tv_sec = sec; 469 rp->tv_usec = usec; 470 } 471 } 472 473 /* ARGSUSED */ 474 int 475 sys_getrusage(struct lwp *l, void *v, register_t *retval) 476 { 477 struct sys_getrusage_args /* { 478 syscallarg(int) who; 479 syscallarg(struct rusage *) rusage; 480 } */ *uap = v; 481 struct rusage ru; 482 struct proc *p = l->l_proc; 483 484 switch (SCARG(uap, who)) { 485 case RUSAGE_SELF: 486 mutex_enter(&p->p_smutex); 487 memcpy(&ru, &p->p_stats->p_ru, sizeof(ru)); 488 calcru(p, &ru.ru_utime, &ru.ru_stime, NULL, NULL); 489 mutex_exit(&p->p_smutex); 490 break; 491 492 case RUSAGE_CHILDREN: 493 mutex_enter(&p->p_smutex); 494 memcpy(&ru, &p->p_stats->p_cru, sizeof(ru)); 495 mutex_exit(&p->p_smutex); 496 break; 497 498 default: 499 return EINVAL; 500 } 501 502 return copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 503 } 504 505 void 506 ruadd(struct rusage *ru, struct rusage *ru2) 507 { 508 long *ip, *ip2; 509 int i; 510 511 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); 512 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); 513 if (ru->ru_maxrss < ru2->ru_maxrss) 514 ru->ru_maxrss = ru2->ru_maxrss; 515 ip = &ru->ru_first; ip2 = &ru2->ru_first; 516 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 517 *ip++ += *ip2++; 518 } 519 520 /* 521 * Make a copy of the plimit structure. 522 * We share these structures copy-on-write after fork, 523 * and copy when a limit is changed. 524 * 525 * Unfortunately (due to PL_SHAREMOD) it is possibly for the structure 526 * we are copying to change beneath our feet! 527 */ 528 struct plimit * 529 lim_copy(struct plimit *lim) 530 { 531 struct plimit *newlim; 532 char *corename; 533 size_t alen, len; 534 535 newlim = pool_get(&plimit_pool, PR_WAITOK); 536 mutex_init(&newlim->pl_lock, MUTEX_DEFAULT, IPL_NONE); 537 newlim->pl_flags = 0; 538 newlim->pl_refcnt = 1; 539 newlim->pl_sv_limit = NULL; 540 541 mutex_enter(&lim->pl_lock); 542 memcpy(newlim->pl_rlimit, lim->pl_rlimit, 543 sizeof(struct rlimit) * RLIM_NLIMITS); 544 545 alen = 0; 546 corename = NULL; 547 for (;;) { 548 if (lim->pl_corename == defcorename) { 549 newlim->pl_corename = defcorename; 550 break; 551 } 552 len = strlen(lim->pl_corename) + 1; 553 if (len <= alen) { 554 newlim->pl_corename = corename; 555 memcpy(corename, lim->pl_corename, len); 556 corename = NULL; 557 break; 558 } 559 mutex_exit(&lim->pl_lock); 560 if (corename != NULL) 561 free(corename, M_TEMP); 562 alen = len; 563 corename = malloc(alen, M_TEMP, M_WAITOK); 564 mutex_enter(&lim->pl_lock); 565 } 566 mutex_exit(&lim->pl_lock); 567 if (corename != NULL) 568 free(corename, M_TEMP); 569 return newlim; 570 } 571 572 void 573 lim_addref(struct plimit *lim) 574 { 575 mutex_enter(&lim->pl_lock); 576 lim->pl_refcnt++; 577 mutex_exit(&lim->pl_lock); 578 } 579 580 /* 581 * Give a process it's own private plimit structure. 582 * This will only be shared (in fork) if modifications are to be shared. 583 */ 584 void 585 lim_privatise(struct proc *p, bool set_shared) 586 { 587 struct plimit *lim, *newlim; 588 589 lim = p->p_limit; 590 if (lim->pl_flags & PL_WRITEABLE) { 591 if (set_shared) 592 lim->pl_flags |= PL_SHAREMOD; 593 return; 594 } 595 596 if (set_shared && lim->pl_flags & PL_SHAREMOD) 597 return; 598 599 newlim = lim_copy(lim); 600 601 mutex_enter(&p->p_mutex); 602 if (p->p_limit->pl_flags & PL_WRITEABLE) { 603 /* Someone crept in while we were busy */ 604 mutex_exit(&p->p_mutex); 605 limfree(newlim); 606 if (set_shared) 607 p->p_limit->pl_flags |= PL_SHAREMOD; 608 return; 609 } 610 611 /* 612 * Since most accesses to p->p_limit aren't locked, we must not 613 * delete the old limit structure yet. 614 */ 615 newlim->pl_sv_limit = p->p_limit; 616 newlim->pl_flags |= PL_WRITEABLE; 617 if (set_shared) 618 newlim->pl_flags |= PL_SHAREMOD; 619 p->p_limit = newlim; 620 mutex_exit(&p->p_mutex); 621 } 622 623 void 624 limfree(struct plimit *lim) 625 { 626 struct plimit *sv_lim; 627 int n; 628 629 do { 630 mutex_enter(&lim->pl_lock); 631 n = --lim->pl_refcnt; 632 mutex_exit(&lim->pl_lock); 633 if (n > 0) 634 return; 635 #ifdef DIAGNOSTIC 636 if (n < 0) 637 panic("limfree"); 638 #endif 639 if (lim->pl_corename != defcorename) 640 free(lim->pl_corename, M_TEMP); 641 sv_lim = lim->pl_sv_limit; 642 mutex_destroy(&lim->pl_lock); 643 pool_put(&plimit_pool, lim); 644 } while ((lim = sv_lim) != NULL); 645 } 646 647 struct pstats * 648 pstatscopy(struct pstats *ps) 649 { 650 651 struct pstats *newps; 652 653 newps = pool_get(&pstats_pool, PR_WAITOK); 654 655 memset(&newps->pstat_startzero, 0, 656 (unsigned) ((char *)&newps->pstat_endzero - 657 (char *)&newps->pstat_startzero)); 658 memcpy(&newps->pstat_startcopy, &ps->pstat_startcopy, 659 ((char *)&newps->pstat_endcopy - 660 (char *)&newps->pstat_startcopy)); 661 662 return (newps); 663 664 } 665 666 void 667 pstatsfree(struct pstats *ps) 668 { 669 670 pool_put(&pstats_pool, ps); 671 } 672 673 /* 674 * sysctl interface in five parts 675 */ 676 677 /* 678 * a routine for sysctl proc subtree helpers that need to pick a valid 679 * process by pid. 680 */ 681 static int 682 sysctl_proc_findproc(struct lwp *l, struct proc **p2, pid_t pid) 683 { 684 struct proc *ptmp; 685 int error = 0; 686 687 if (pid == PROC_CURPROC) 688 ptmp = l->l_proc; 689 else if ((ptmp = pfind(pid)) == NULL) 690 error = ESRCH; 691 692 *p2 = ptmp; 693 return (error); 694 } 695 696 /* 697 * sysctl helper routine for setting a process's specific corefile 698 * name. picks the process based on the given pid and checks the 699 * correctness of the new value. 700 */ 701 static int 702 sysctl_proc_corename(SYSCTLFN_ARGS) 703 { 704 struct proc *ptmp; 705 struct plimit *lim; 706 int error = 0, len; 707 char *cname; 708 char *ocore; 709 char *tmp; 710 struct sysctlnode node; 711 712 /* 713 * is this all correct? 714 */ 715 if (namelen != 0) 716 return (EINVAL); 717 if (name[-1] != PROC_PID_CORENAME) 718 return (EINVAL); 719 720 /* 721 * whom are we tweaking? 722 */ 723 error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-2]); 724 if (error) 725 return (error); 726 727 /* XXX this should be in p_find() */ 728 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, 729 ptmp, NULL, NULL, NULL); 730 if (error) 731 return (error); 732 733 /* 734 * let them modify a temporary copy of the core name 735 */ 736 cname = PNBUF_GET(); 737 lim = ptmp->p_limit; 738 mutex_enter(&lim->pl_lock); 739 strlcpy(cname, lim->pl_corename, MAXPATHLEN); 740 mutex_exit(&lim->pl_lock); 741 742 node = *rnode; 743 node.sysctl_data = cname; 744 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 745 746 /* 747 * if that failed, or they have nothing new to say, or we've 748 * heard it before... 749 */ 750 if (error || newp == NULL) 751 goto done; 752 lim = ptmp->p_limit; 753 mutex_enter(&lim->pl_lock); 754 error = strcmp(cname, lim->pl_corename); 755 mutex_exit(&lim->pl_lock); 756 if (error == 0) 757 /* Unchanged */ 758 goto done; 759 760 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CORENAME, 761 ptmp, cname, NULL, NULL); 762 if (error) 763 return (error); 764 765 /* 766 * no error yet and cname now has the new core name in it. 767 * let's see if it looks acceptable. it must be either "core" 768 * or end in ".core" or "/core". 769 */ 770 len = strlen(cname); 771 if (len < 4) { 772 error = EINVAL; 773 } else if (strcmp(cname + len - 4, "core") != 0) { 774 error = EINVAL; 775 } else if (len > 4 && cname[len - 5] != '/' && cname[len - 5] != '.') { 776 error = EINVAL; 777 } 778 if (error != 0) { 779 goto done; 780 } 781 782 /* 783 * hmm...looks good. now...where do we put it? 784 */ 785 tmp = malloc(len + 1, M_TEMP, M_WAITOK|M_CANFAIL); 786 if (tmp == NULL) { 787 error = ENOMEM; 788 goto done; 789 } 790 memcpy(tmp, cname, len + 1); 791 792 lim_privatise(ptmp, false); 793 lim = ptmp->p_limit; 794 mutex_enter(&lim->pl_lock); 795 ocore = lim->pl_corename; 796 lim->pl_corename = tmp; 797 mutex_exit(&lim->pl_lock); 798 if (ocore != defcorename) 799 free(ocore, M_TEMP); 800 801 done: 802 PNBUF_PUT(cname); 803 return error; 804 } 805 806 /* 807 * sysctl helper routine for checking/setting a process's stop flags, 808 * one for fork and one for exec. 809 */ 810 static int 811 sysctl_proc_stop(SYSCTLFN_ARGS) 812 { 813 struct proc *ptmp; 814 int i, f, error = 0; 815 struct sysctlnode node; 816 817 if (namelen != 0) 818 return (EINVAL); 819 820 error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-2]); 821 if (error) 822 return (error); 823 824 /* XXX this should be in p_find() */ 825 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, 826 ptmp, NULL, NULL, NULL); 827 if (error) 828 return (error); 829 830 switch (rnode->sysctl_num) { 831 case PROC_PID_STOPFORK: 832 f = PS_STOPFORK; 833 break; 834 case PROC_PID_STOPEXEC: 835 f = PS_STOPEXEC; 836 break; 837 case PROC_PID_STOPEXIT: 838 f = PS_STOPEXIT; 839 break; 840 default: 841 return (EINVAL); 842 } 843 844 i = (ptmp->p_flag & f) ? 1 : 0; 845 node = *rnode; 846 node.sysctl_data = &i; 847 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 848 if (error || newp == NULL) 849 return (error); 850 851 mutex_enter(&ptmp->p_smutex); 852 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_STOPFLAG, 853 ptmp, KAUTH_ARG(f), NULL, NULL); 854 if (error) 855 return (error); 856 if (i) 857 ptmp->p_sflag |= f; 858 else 859 ptmp->p_sflag &= ~f; 860 mutex_exit(&ptmp->p_smutex); 861 862 return (0); 863 } 864 865 /* 866 * sysctl helper routine for a process's rlimits as exposed by sysctl. 867 */ 868 static int 869 sysctl_proc_plimit(SYSCTLFN_ARGS) 870 { 871 struct proc *ptmp; 872 u_int limitno; 873 int which, error = 0; 874 struct rlimit alim; 875 struct sysctlnode node; 876 877 if (namelen != 0) 878 return (EINVAL); 879 880 which = name[-1]; 881 if (which != PROC_PID_LIMIT_TYPE_SOFT && 882 which != PROC_PID_LIMIT_TYPE_HARD) 883 return (EINVAL); 884 885 limitno = name[-2] - 1; 886 if (limitno >= RLIM_NLIMITS) 887 return (EINVAL); 888 889 if (name[-3] != PROC_PID_LIMIT) 890 return (EINVAL); 891 892 error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-4]); 893 if (error) 894 return (error); 895 896 /* XXX this should be in p_find() */ 897 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, 898 ptmp, NULL, NULL, NULL); 899 if (error) 900 return (error); 901 902 node = *rnode; 903 memcpy(&alim, &ptmp->p_rlimit[limitno], sizeof(alim)); 904 if (which == PROC_PID_LIMIT_TYPE_HARD) 905 node.sysctl_data = &alim.rlim_max; 906 else 907 node.sysctl_data = &alim.rlim_cur; 908 909 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 910 if (error || newp == NULL) 911 return (error); 912 913 return (dosetrlimit(l, ptmp, limitno, &alim)); 914 } 915 916 /* 917 * and finally, the actually glue that sticks it to the tree 918 */ 919 SYSCTL_SETUP(sysctl_proc_setup, "sysctl proc subtree setup") 920 { 921 922 sysctl_createv(clog, 0, NULL, NULL, 923 CTLFLAG_PERMANENT, 924 CTLTYPE_NODE, "proc", NULL, 925 NULL, 0, NULL, 0, 926 CTL_PROC, CTL_EOL); 927 sysctl_createv(clog, 0, NULL, NULL, 928 CTLFLAG_PERMANENT|CTLFLAG_ANYNUMBER, 929 CTLTYPE_NODE, "curproc", 930 SYSCTL_DESCR("Per-process settings"), 931 NULL, 0, NULL, 0, 932 CTL_PROC, PROC_CURPROC, CTL_EOL); 933 934 sysctl_createv(clog, 0, NULL, NULL, 935 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 936 CTLTYPE_STRING, "corename", 937 SYSCTL_DESCR("Core file name"), 938 sysctl_proc_corename, 0, NULL, MAXPATHLEN, 939 CTL_PROC, PROC_CURPROC, PROC_PID_CORENAME, CTL_EOL); 940 sysctl_createv(clog, 0, NULL, NULL, 941 CTLFLAG_PERMANENT, 942 CTLTYPE_NODE, "rlimit", 943 SYSCTL_DESCR("Process limits"), 944 NULL, 0, NULL, 0, 945 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, CTL_EOL); 946 947 #define create_proc_plimit(s, n) do { \ 948 sysctl_createv(clog, 0, NULL, NULL, \ 949 CTLFLAG_PERMANENT, \ 950 CTLTYPE_NODE, s, \ 951 SYSCTL_DESCR("Process " s " limits"), \ 952 NULL, 0, NULL, 0, \ 953 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 954 CTL_EOL); \ 955 sysctl_createv(clog, 0, NULL, NULL, \ 956 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \ 957 CTLTYPE_QUAD, "soft", \ 958 SYSCTL_DESCR("Process soft " s " limit"), \ 959 sysctl_proc_plimit, 0, NULL, 0, \ 960 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 961 PROC_PID_LIMIT_TYPE_SOFT, CTL_EOL); \ 962 sysctl_createv(clog, 0, NULL, NULL, \ 963 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \ 964 CTLTYPE_QUAD, "hard", \ 965 SYSCTL_DESCR("Process hard " s " limit"), \ 966 sysctl_proc_plimit, 0, NULL, 0, \ 967 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 968 PROC_PID_LIMIT_TYPE_HARD, CTL_EOL); \ 969 } while (0/*CONSTCOND*/) 970 971 create_proc_plimit("cputime", PROC_PID_LIMIT_CPU); 972 create_proc_plimit("filesize", PROC_PID_LIMIT_FSIZE); 973 create_proc_plimit("datasize", PROC_PID_LIMIT_DATA); 974 create_proc_plimit("stacksize", PROC_PID_LIMIT_STACK); 975 create_proc_plimit("coredumpsize", PROC_PID_LIMIT_CORE); 976 create_proc_plimit("memoryuse", PROC_PID_LIMIT_RSS); 977 create_proc_plimit("memorylocked", PROC_PID_LIMIT_MEMLOCK); 978 create_proc_plimit("maxproc", PROC_PID_LIMIT_NPROC); 979 create_proc_plimit("descriptors", PROC_PID_LIMIT_NOFILE); 980 create_proc_plimit("sbsize", PROC_PID_LIMIT_SBSIZE); 981 982 #undef create_proc_plimit 983 984 sysctl_createv(clog, 0, NULL, NULL, 985 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 986 CTLTYPE_INT, "stopfork", 987 SYSCTL_DESCR("Stop process at fork(2)"), 988 sysctl_proc_stop, 0, NULL, 0, 989 CTL_PROC, PROC_CURPROC, PROC_PID_STOPFORK, CTL_EOL); 990 sysctl_createv(clog, 0, NULL, NULL, 991 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 992 CTLTYPE_INT, "stopexec", 993 SYSCTL_DESCR("Stop process at execve(2)"), 994 sysctl_proc_stop, 0, NULL, 0, 995 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXEC, CTL_EOL); 996 sysctl_createv(clog, 0, NULL, NULL, 997 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 998 CTLTYPE_INT, "stopexit", 999 SYSCTL_DESCR("Stop process before completing exit"), 1000 sysctl_proc_stop, 0, NULL, 0, 1001 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXIT, CTL_EOL); 1002 } 1003 1004 void 1005 uid_init(void) 1006 { 1007 1008 /* 1009 * XXXSMP This could be at IPL_SOFTNET, but for now we want 1010 * to to be deadlock free, so it must be at IPL_VM. 1011 */ 1012 mutex_init(&uihashtbl_lock, MUTEX_DRIVER, IPL_VM); 1013 1014 /* 1015 * Ensure that uid 0 is always in the user hash table, as 1016 * sbreserve() expects it available from interrupt context. 1017 */ 1018 (void)uid_find(0); 1019 } 1020 1021 struct uidinfo * 1022 uid_find(uid_t uid) 1023 { 1024 struct uidinfo *uip; 1025 struct uidinfo *newuip = NULL; 1026 struct uihashhead *uipp; 1027 1028 uipp = UIHASH(uid); 1029 1030 again: 1031 mutex_enter(&uihashtbl_lock); 1032 LIST_FOREACH(uip, uipp, ui_hash) 1033 if (uip->ui_uid == uid) { 1034 mutex_exit(&uihashtbl_lock); 1035 if (newuip) { 1036 mutex_destroy(&newuip->ui_lock); 1037 free(newuip, M_PROC); 1038 } 1039 return uip; 1040 } 1041 if (newuip == NULL) { 1042 mutex_exit(&uihashtbl_lock); 1043 /* Must not be called from interrupt context. */ 1044 newuip = malloc(sizeof(*uip), M_PROC, M_WAITOK | M_ZERO); 1045 /* XXX this could be IPL_SOFTNET */ 1046 mutex_init(&newuip->ui_lock, MUTEX_DRIVER, IPL_VM); 1047 goto again; 1048 } 1049 uip = newuip; 1050 1051 LIST_INSERT_HEAD(uipp, uip, ui_hash); 1052 uip->ui_uid = uid; 1053 mutex_exit(&uihashtbl_lock); 1054 1055 return uip; 1056 } 1057 1058 /* 1059 * Change the count associated with number of processes 1060 * a given user is using. 1061 */ 1062 int 1063 chgproccnt(uid_t uid, int diff) 1064 { 1065 struct uidinfo *uip; 1066 1067 if (diff == 0) 1068 return 0; 1069 1070 uip = uid_find(uid); 1071 mutex_enter(&uip->ui_lock); 1072 uip->ui_proccnt += diff; 1073 KASSERT(uip->ui_proccnt >= 0); 1074 mutex_exit(&uip->ui_lock); 1075 return uip->ui_proccnt; 1076 } 1077 1078 int 1079 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t xmax) 1080 { 1081 rlim_t nsb; 1082 1083 mutex_enter(&uip->ui_lock); 1084 nsb = uip->ui_sbsize + to - *hiwat; 1085 if (to > *hiwat && nsb > xmax) { 1086 mutex_exit(&uip->ui_lock); 1087 return 0; 1088 } 1089 *hiwat = to; 1090 uip->ui_sbsize = nsb; 1091 KASSERT(uip->ui_sbsize >= 0); 1092 mutex_exit(&uip->ui_lock); 1093 return 1; 1094 } 1095