1 /* $NetBSD: kern_resource.c,v 1.119 2007/08/08 14:07:11 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_resource.c 8.8 (Berkeley) 2/14/95 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.119 2007/08/08 14:07:11 ad Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/file.h> 46 #include <sys/resourcevar.h> 47 #include <sys/malloc.h> 48 #include <sys/namei.h> 49 #include <sys/pool.h> 50 #include <sys/proc.h> 51 #include <sys/sysctl.h> 52 #include <sys/kauth.h> 53 54 #include <sys/mount.h> 55 #include <sys/syscallargs.h> 56 57 #include <uvm/uvm_extern.h> 58 59 /* 60 * Maximum process data and stack limits. 61 * They are variables so they are patchable. 62 */ 63 rlim_t maxdmap = MAXDSIZ; 64 rlim_t maxsmap = MAXSSIZ; 65 66 struct uihashhead *uihashtbl; 67 u_long uihash; /* size of hash table - 1 */ 68 kmutex_t uihashtbl_lock; 69 70 /* 71 * Resource controls and accounting. 72 */ 73 74 int 75 sys_getpriority(struct lwp *l, void *v, register_t *retval) 76 { 77 struct sys_getpriority_args /* { 78 syscallarg(int) which; 79 syscallarg(id_t) who; 80 } */ *uap = v; 81 struct proc *curp = l->l_proc, *p; 82 int low = NZERO + PRIO_MAX + 1; 83 int who = SCARG(uap, who); 84 85 mutex_enter(&proclist_lock); 86 switch (SCARG(uap, which)) { 87 case PRIO_PROCESS: 88 if (who == 0) 89 p = curp; 90 else 91 p = p_find(who, PFIND_LOCKED); 92 if (p != NULL) 93 low = p->p_nice; 94 break; 95 96 case PRIO_PGRP: { 97 struct pgrp *pg; 98 99 if (who == 0) 100 pg = curp->p_pgrp; 101 else if ((pg = pg_find(who, PFIND_LOCKED)) == NULL) 102 break; 103 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 104 if (p->p_nice < low) 105 low = p->p_nice; 106 } 107 break; 108 } 109 110 case PRIO_USER: 111 if (who == 0) 112 who = (int)kauth_cred_geteuid(l->l_cred); 113 PROCLIST_FOREACH(p, &allproc) { 114 mutex_enter(&p->p_mutex); 115 if (kauth_cred_geteuid(p->p_cred) == 116 (uid_t)who && p->p_nice < low) 117 low = p->p_nice; 118 mutex_exit(&p->p_mutex); 119 } 120 break; 121 122 default: 123 mutex_exit(&proclist_lock); 124 return (EINVAL); 125 } 126 mutex_exit(&proclist_lock); 127 128 if (low == NZERO + PRIO_MAX + 1) 129 return (ESRCH); 130 *retval = low - NZERO; 131 return (0); 132 } 133 134 /* ARGSUSED */ 135 int 136 sys_setpriority(struct lwp *l, void *v, register_t *retval) 137 { 138 struct sys_setpriority_args /* { 139 syscallarg(int) which; 140 syscallarg(id_t) who; 141 syscallarg(int) prio; 142 } */ *uap = v; 143 struct proc *curp = l->l_proc, *p; 144 int found = 0, error = 0; 145 int who = SCARG(uap, who); 146 147 mutex_enter(&proclist_lock); 148 switch (SCARG(uap, which)) { 149 case PRIO_PROCESS: 150 if (who == 0) 151 p = curp; 152 else 153 p = p_find(who, PFIND_LOCKED); 154 if (p != 0) { 155 mutex_enter(&p->p_mutex); 156 error = donice(l, p, SCARG(uap, prio)); 157 mutex_exit(&p->p_mutex); 158 } 159 found++; 160 break; 161 162 case PRIO_PGRP: { 163 struct pgrp *pg; 164 165 if (who == 0) 166 pg = curp->p_pgrp; 167 else if ((pg = pg_find(who, PFIND_LOCKED)) == NULL) 168 break; 169 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 170 mutex_enter(&p->p_mutex); 171 error = donice(l, p, SCARG(uap, prio)); 172 mutex_exit(&p->p_mutex); 173 found++; 174 } 175 break; 176 } 177 178 case PRIO_USER: 179 if (who == 0) 180 who = (int)kauth_cred_geteuid(l->l_cred); 181 PROCLIST_FOREACH(p, &allproc) { 182 mutex_enter(&p->p_mutex); 183 if (kauth_cred_geteuid(p->p_cred) == 184 (uid_t)SCARG(uap, who)) { 185 error = donice(l, p, SCARG(uap, prio)); 186 found++; 187 } 188 mutex_exit(&p->p_mutex); 189 } 190 break; 191 192 default: 193 error = EINVAL; 194 break; 195 } 196 mutex_exit(&proclist_lock); 197 if (found == 0) 198 return (ESRCH); 199 return (error); 200 } 201 202 /* 203 * Renice a process. 204 * 205 * Call with the target process' credentials locked. 206 */ 207 int 208 donice(struct lwp *l, struct proc *chgp, int n) 209 { 210 kauth_cred_t cred = l->l_cred; 211 int onice; 212 213 KASSERT(mutex_owned(&chgp->p_mutex)); 214 215 if (n > PRIO_MAX) 216 n = PRIO_MAX; 217 if (n < PRIO_MIN) 218 n = PRIO_MIN; 219 n += NZERO; 220 onice = chgp->p_nice; 221 onice = chgp->p_nice; 222 223 again: 224 if (kauth_authorize_process(cred, KAUTH_PROCESS_NICE, chgp, 225 KAUTH_ARG(n), NULL, NULL)) 226 return (EACCES); 227 mutex_spin_enter(&chgp->p_stmutex); 228 if (onice != chgp->p_nice) { 229 mutex_spin_exit(&chgp->p_stmutex); 230 goto again; 231 } 232 sched_nice(chgp, n); 233 mutex_spin_exit(&chgp->p_stmutex); 234 return (0); 235 } 236 237 /* ARGSUSED */ 238 int 239 sys_setrlimit(struct lwp *l, void *v, register_t *retval) 240 { 241 struct sys_setrlimit_args /* { 242 syscallarg(int) which; 243 syscallarg(const struct rlimit *) rlp; 244 } */ *uap = v; 245 int which = SCARG(uap, which); 246 struct rlimit alim; 247 int error; 248 249 error = copyin(SCARG(uap, rlp), &alim, sizeof(struct rlimit)); 250 if (error) 251 return (error); 252 return (dosetrlimit(l, l->l_proc, which, &alim)); 253 } 254 255 int 256 dosetrlimit(struct lwp *l, struct proc *p, int which, struct rlimit *limp) 257 { 258 struct rlimit *alimp; 259 struct plimit *oldplim; 260 int error; 261 262 if ((u_int)which >= RLIM_NLIMITS) 263 return (EINVAL); 264 265 if (limp->rlim_cur < 0 || limp->rlim_max < 0) 266 return (EINVAL); 267 268 alimp = &p->p_rlimit[which]; 269 /* if we don't change the value, no need to limcopy() */ 270 if (limp->rlim_cur == alimp->rlim_cur && 271 limp->rlim_max == alimp->rlim_max) 272 return 0; 273 274 if (limp->rlim_cur > limp->rlim_max) { 275 /* 276 * This is programming error. According to SUSv2, we should 277 * return error in this case. 278 */ 279 return (EINVAL); 280 } 281 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RLIMIT, 282 p, limp, KAUTH_ARG(which), NULL); 283 if (error) 284 return (error); 285 286 mutex_enter(&p->p_mutex); 287 if (p->p_limit->p_refcnt > 1 && 288 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) { 289 oldplim = p->p_limit; 290 p->p_limit = limcopy(p); 291 limfree(oldplim); 292 alimp = &p->p_rlimit[which]; 293 } 294 295 switch (which) { 296 297 case RLIMIT_DATA: 298 if (limp->rlim_cur > maxdmap) 299 limp->rlim_cur = maxdmap; 300 if (limp->rlim_max > maxdmap) 301 limp->rlim_max = maxdmap; 302 break; 303 304 case RLIMIT_STACK: 305 if (limp->rlim_cur > maxsmap) 306 limp->rlim_cur = maxsmap; 307 if (limp->rlim_max > maxsmap) 308 limp->rlim_max = maxsmap; 309 310 /* 311 * Return EINVAL if the new stack size limit is lower than 312 * current usage. Otherwise, the process would get SIGSEGV the 313 * moment it would try to access anything on it's current stack. 314 * This conforms to SUSv2. 315 */ 316 if (limp->rlim_cur < p->p_vmspace->vm_ssize * PAGE_SIZE 317 || limp->rlim_max < p->p_vmspace->vm_ssize * PAGE_SIZE) { 318 mutex_exit(&p->p_mutex); 319 return (EINVAL); 320 } 321 322 /* 323 * Stack is allocated to the max at exec time with 324 * only "rlim_cur" bytes accessible (In other words, 325 * allocates stack dividing two contiguous regions at 326 * "rlim_cur" bytes boundary). 327 * 328 * Since allocation is done in terms of page, roundup 329 * "rlim_cur" (otherwise, contiguous regions 330 * overlap). If stack limit is going up make more 331 * accessible, if going down make inaccessible. 332 */ 333 limp->rlim_cur = round_page(limp->rlim_cur); 334 if (limp->rlim_cur != alimp->rlim_cur) { 335 vaddr_t addr; 336 vsize_t size; 337 vm_prot_t prot; 338 339 if (limp->rlim_cur > alimp->rlim_cur) { 340 prot = VM_PROT_READ | VM_PROT_WRITE; 341 size = limp->rlim_cur - alimp->rlim_cur; 342 addr = (vaddr_t)p->p_vmspace->vm_minsaddr - 343 limp->rlim_cur; 344 } else { 345 prot = VM_PROT_NONE; 346 size = alimp->rlim_cur - limp->rlim_cur; 347 addr = (vaddr_t)p->p_vmspace->vm_minsaddr - 348 alimp->rlim_cur; 349 } 350 (void) uvm_map_protect(&p->p_vmspace->vm_map, 351 addr, addr+size, prot, false); 352 } 353 break; 354 355 case RLIMIT_NOFILE: 356 if (limp->rlim_cur > maxfiles) 357 limp->rlim_cur = maxfiles; 358 if (limp->rlim_max > maxfiles) 359 limp->rlim_max = maxfiles; 360 break; 361 362 case RLIMIT_NPROC: 363 if (limp->rlim_cur > maxproc) 364 limp->rlim_cur = maxproc; 365 if (limp->rlim_max > maxproc) 366 limp->rlim_max = maxproc; 367 break; 368 } 369 *alimp = *limp; 370 mutex_exit(&p->p_mutex); 371 return (0); 372 } 373 374 /* ARGSUSED */ 375 int 376 sys_getrlimit(struct lwp *l, void *v, register_t *retval) 377 { 378 struct sys_getrlimit_args /* { 379 syscallarg(int) which; 380 syscallarg(struct rlimit *) rlp; 381 } */ *uap = v; 382 struct proc *p = l->l_proc; 383 int which = SCARG(uap, which); 384 struct rlimit rl; 385 386 if ((u_int)which >= RLIM_NLIMITS) 387 return (EINVAL); 388 389 mutex_enter(&p->p_mutex); 390 memcpy(&rl, &p->p_rlimit[which], sizeof(rl)); 391 mutex_exit(&p->p_mutex); 392 393 return copyout(&rl, SCARG(uap, rlp), sizeof(rl)); 394 } 395 396 /* 397 * Transform the running time and tick information in proc p into user, 398 * system, and interrupt time usage. 399 * 400 * Should be called with p->p_smutex held unless called from exit1(). 401 */ 402 void 403 calcru(struct proc *p, struct timeval *up, struct timeval *sp, 404 struct timeval *ip, struct timeval *rp) 405 { 406 u_quad_t u, st, ut, it, tot; 407 unsigned long sec; 408 long usec; 409 struct timeval tv; 410 struct lwp *l; 411 412 mutex_spin_enter(&p->p_stmutex); 413 st = p->p_sticks; 414 ut = p->p_uticks; 415 it = p->p_iticks; 416 mutex_spin_exit(&p->p_stmutex); 417 418 sec = p->p_rtime.tv_sec; 419 usec = p->p_rtime.tv_usec; 420 421 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 422 lwp_lock(l); 423 sec += l->l_rtime.tv_sec; 424 if ((usec += l->l_rtime.tv_usec) >= 1000000) { 425 sec++; 426 usec -= 1000000; 427 } 428 if (l->l_cpu == curcpu()) { 429 struct schedstate_percpu *spc; 430 431 KDASSERT(l->l_cpu != NULL); 432 spc = &l->l_cpu->ci_schedstate; 433 434 /* 435 * Adjust for the current time slice. This is 436 * actually fairly important since the error 437 * here is on the order of a time quantum, 438 * which is much greater than the sampling 439 * error. 440 */ 441 microtime(&tv); 442 sec += tv.tv_sec - spc->spc_runtime.tv_sec; 443 usec += tv.tv_usec - spc->spc_runtime.tv_usec; 444 if (usec >= 1000000) { 445 sec++; 446 usec -= 1000000; 447 } 448 } 449 lwp_unlock(l); 450 } 451 452 tot = st + ut + it; 453 u = sec * 1000000ull + usec; 454 455 if (tot == 0) { 456 /* No ticks, so can't use to share time out, split 50-50 */ 457 st = ut = u / 2; 458 } else { 459 st = (u * st) / tot; 460 ut = (u * ut) / tot; 461 } 462 if (sp != NULL) { 463 sp->tv_sec = st / 1000000; 464 sp->tv_usec = st % 1000000; 465 } 466 if (up != NULL) { 467 up->tv_sec = ut / 1000000; 468 up->tv_usec = ut % 1000000; 469 } 470 if (ip != NULL) { 471 if (it != 0) 472 it = (u * it) / tot; 473 ip->tv_sec = it / 1000000; 474 ip->tv_usec = it % 1000000; 475 } 476 if (rp != NULL) { 477 rp->tv_sec = sec; 478 rp->tv_usec = usec; 479 } 480 } 481 482 /* ARGSUSED */ 483 int 484 sys_getrusage(struct lwp *l, void *v, register_t *retval) 485 { 486 struct sys_getrusage_args /* { 487 syscallarg(int) who; 488 syscallarg(struct rusage *) rusage; 489 } */ *uap = v; 490 struct rusage ru; 491 struct proc *p = l->l_proc; 492 493 switch (SCARG(uap, who)) { 494 case RUSAGE_SELF: 495 mutex_enter(&p->p_smutex); 496 memcpy(&ru, &p->p_stats->p_ru, sizeof(ru)); 497 calcru(p, &ru.ru_utime, &ru.ru_stime, NULL, NULL); 498 mutex_exit(&p->p_smutex); 499 break; 500 501 case RUSAGE_CHILDREN: 502 mutex_enter(&p->p_smutex); 503 memcpy(&ru, &p->p_stats->p_cru, sizeof(ru)); 504 mutex_exit(&p->p_smutex); 505 break; 506 507 default: 508 return EINVAL; 509 } 510 511 return copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 512 } 513 514 void 515 ruadd(struct rusage *ru, struct rusage *ru2) 516 { 517 long *ip, *ip2; 518 int i; 519 520 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); 521 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); 522 if (ru->ru_maxrss < ru2->ru_maxrss) 523 ru->ru_maxrss = ru2->ru_maxrss; 524 ip = &ru->ru_first; ip2 = &ru2->ru_first; 525 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 526 *ip++ += *ip2++; 527 } 528 529 /* 530 * Make a copy of the plimit structure. 531 * We share these structures copy-on-write after fork, 532 * and copy when a limit is changed. 533 * 534 * XXXSMP This is atrocious, need to simplify. 535 */ 536 struct plimit * 537 limcopy(struct proc *p) 538 { 539 struct plimit *lim, *newlim; 540 char *corename; 541 size_t l; 542 543 KASSERT(mutex_owned(&p->p_mutex)); 544 545 mutex_exit(&p->p_mutex); 546 newlim = pool_get(&plimit_pool, PR_WAITOK); 547 mutex_init(&newlim->p_lock, MUTEX_DEFAULT, IPL_NONE); 548 newlim->p_lflags = 0; 549 newlim->p_refcnt = 1; 550 mutex_enter(&p->p_mutex); 551 552 for (;;) { 553 lim = p->p_limit; 554 mutex_enter(&lim->p_lock); 555 if (lim->pl_corename != defcorename) { 556 l = strlen(lim->pl_corename) + 1; 557 558 mutex_exit(&lim->p_lock); 559 mutex_exit(&p->p_mutex); 560 corename = malloc(l, M_TEMP, M_WAITOK); 561 mutex_enter(&p->p_mutex); 562 mutex_enter(&lim->p_lock); 563 564 if (l != strlen(lim->pl_corename) + 1) { 565 mutex_exit(&lim->p_lock); 566 mutex_exit(&p->p_mutex); 567 free(corename, M_TEMP); 568 mutex_enter(&p->p_mutex); 569 continue; 570 } 571 } else 572 l = 0; 573 574 memcpy(newlim->pl_rlimit, lim->pl_rlimit, 575 sizeof(struct rlimit) * RLIM_NLIMITS); 576 if (l != 0) 577 strlcpy(newlim->pl_corename, lim->pl_corename, l); 578 else 579 newlim->pl_corename = defcorename; 580 mutex_exit(&lim->p_lock); 581 break; 582 } 583 584 return (newlim); 585 } 586 587 void 588 limfree(struct plimit *lim) 589 { 590 int n; 591 592 mutex_enter(&lim->p_lock); 593 n = --lim->p_refcnt; 594 mutex_exit(&lim->p_lock); 595 if (n > 0) 596 return; 597 #ifdef DIAGNOSTIC 598 if (n < 0) 599 panic("limfree"); 600 #endif 601 if (lim->pl_corename != defcorename) 602 free(lim->pl_corename, M_TEMP); 603 mutex_destroy(&lim->p_lock); 604 pool_put(&plimit_pool, lim); 605 } 606 607 struct pstats * 608 pstatscopy(struct pstats *ps) 609 { 610 611 struct pstats *newps; 612 613 newps = pool_get(&pstats_pool, PR_WAITOK); 614 615 memset(&newps->pstat_startzero, 0, 616 (unsigned) ((char *)&newps->pstat_endzero - 617 (char *)&newps->pstat_startzero)); 618 memcpy(&newps->pstat_startcopy, &ps->pstat_startcopy, 619 ((char *)&newps->pstat_endcopy - 620 (char *)&newps->pstat_startcopy)); 621 622 return (newps); 623 624 } 625 626 void 627 pstatsfree(struct pstats *ps) 628 { 629 630 pool_put(&pstats_pool, ps); 631 } 632 633 /* 634 * sysctl interface in five parts 635 */ 636 637 /* 638 * a routine for sysctl proc subtree helpers that need to pick a valid 639 * process by pid. 640 */ 641 static int 642 sysctl_proc_findproc(struct lwp *l, struct proc **p2, pid_t pid) 643 { 644 struct proc *ptmp; 645 int error = 0; 646 647 if (pid == PROC_CURPROC) 648 ptmp = l->l_proc; 649 else if ((ptmp = pfind(pid)) == NULL) 650 error = ESRCH; 651 652 *p2 = ptmp; 653 return (error); 654 } 655 656 /* 657 * sysctl helper routine for setting a process's specific corefile 658 * name. picks the process based on the given pid and checks the 659 * correctness of the new value. 660 */ 661 static int 662 sysctl_proc_corename(SYSCTLFN_ARGS) 663 { 664 struct proc *ptmp; 665 struct plimit *lim; 666 int error = 0, len; 667 char *cname; 668 char *tmp; 669 struct sysctlnode node; 670 671 /* 672 * is this all correct? 673 */ 674 if (namelen != 0) 675 return (EINVAL); 676 if (name[-1] != PROC_PID_CORENAME) 677 return (EINVAL); 678 679 /* 680 * whom are we tweaking? 681 */ 682 error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-2]); 683 if (error) 684 return (error); 685 686 /* XXX this should be in p_find() */ 687 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, 688 ptmp, NULL, NULL, NULL); 689 if (error) 690 return (error); 691 692 cname = PNBUF_GET(); 693 /* 694 * let them modify a temporary copy of the core name 695 */ 696 node = *rnode; 697 strlcpy(cname, ptmp->p_limit->pl_corename, MAXPATHLEN); 698 node.sysctl_data = cname; 699 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 700 701 /* 702 * if that failed, or they have nothing new to say, or we've 703 * heard it before... 704 */ 705 if (error || newp == NULL || 706 strcmp(cname, ptmp->p_limit->pl_corename) == 0) { 707 goto done; 708 } 709 710 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CORENAME, 711 ptmp, cname, NULL, NULL); 712 if (error) 713 return (error); 714 715 /* 716 * no error yet and cname now has the new core name in it. 717 * let's see if it looks acceptable. it must be either "core" 718 * or end in ".core" or "/core". 719 */ 720 len = strlen(cname); 721 if (len < 4) { 722 error = EINVAL; 723 } else if (strcmp(cname + len - 4, "core") != 0) { 724 error = EINVAL; 725 } else if (len > 4 && cname[len - 5] != '/' && cname[len - 5] != '.') { 726 error = EINVAL; 727 } 728 if (error != 0) { 729 goto done; 730 } 731 732 /* 733 * hmm...looks good. now...where do we put it? 734 */ 735 tmp = malloc(len + 1, M_TEMP, M_WAITOK|M_CANFAIL); 736 if (tmp == NULL) { 737 error = ENOMEM; 738 goto done; 739 } 740 strlcpy(tmp, cname, len + 1); 741 742 mutex_enter(&ptmp->p_mutex); 743 lim = ptmp->p_limit; 744 if (lim->p_refcnt > 1 && (lim->p_lflags & PL_SHAREMOD) == 0) { 745 ptmp->p_limit = limcopy(ptmp); 746 limfree(lim); 747 lim = ptmp->p_limit; 748 } 749 if (lim->pl_corename != defcorename) 750 free(lim->pl_corename, M_TEMP); 751 lim->pl_corename = tmp; 752 mutex_exit(&ptmp->p_mutex); 753 done: 754 PNBUF_PUT(cname); 755 return error; 756 } 757 758 /* 759 * sysctl helper routine for checking/setting a process's stop flags, 760 * one for fork and one for exec. 761 */ 762 static int 763 sysctl_proc_stop(SYSCTLFN_ARGS) 764 { 765 struct proc *ptmp; 766 int i, f, error = 0; 767 struct sysctlnode node; 768 769 if (namelen != 0) 770 return (EINVAL); 771 772 error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-2]); 773 if (error) 774 return (error); 775 776 /* XXX this should be in p_find() */ 777 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, 778 ptmp, NULL, NULL, NULL); 779 if (error) 780 return (error); 781 782 switch (rnode->sysctl_num) { 783 case PROC_PID_STOPFORK: 784 f = PS_STOPFORK; 785 break; 786 case PROC_PID_STOPEXEC: 787 f = PS_STOPEXEC; 788 break; 789 case PROC_PID_STOPEXIT: 790 f = PS_STOPEXIT; 791 break; 792 default: 793 return (EINVAL); 794 } 795 796 i = (ptmp->p_flag & f) ? 1 : 0; 797 node = *rnode; 798 node.sysctl_data = &i; 799 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 800 if (error || newp == NULL) 801 return (error); 802 803 mutex_enter(&ptmp->p_smutex); 804 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_STOPFLAG, 805 ptmp, KAUTH_ARG(f), NULL, NULL); 806 if (error) 807 return (error); 808 if (i) 809 ptmp->p_sflag |= f; 810 else 811 ptmp->p_sflag &= ~f; 812 mutex_exit(&ptmp->p_smutex); 813 814 return (0); 815 } 816 817 /* 818 * sysctl helper routine for a process's rlimits as exposed by sysctl. 819 */ 820 static int 821 sysctl_proc_plimit(SYSCTLFN_ARGS) 822 { 823 struct proc *ptmp; 824 u_int limitno; 825 int which, error = 0; 826 struct rlimit alim; 827 struct sysctlnode node; 828 829 if (namelen != 0) 830 return (EINVAL); 831 832 which = name[-1]; 833 if (which != PROC_PID_LIMIT_TYPE_SOFT && 834 which != PROC_PID_LIMIT_TYPE_HARD) 835 return (EINVAL); 836 837 limitno = name[-2] - 1; 838 if (limitno >= RLIM_NLIMITS) 839 return (EINVAL); 840 841 if (name[-3] != PROC_PID_LIMIT) 842 return (EINVAL); 843 844 error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-4]); 845 if (error) 846 return (error); 847 848 /* XXX this should be in p_find() */ 849 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, 850 ptmp, NULL, NULL, NULL); 851 if (error) 852 return (error); 853 854 node = *rnode; 855 memcpy(&alim, &ptmp->p_rlimit[limitno], sizeof(alim)); 856 if (which == PROC_PID_LIMIT_TYPE_HARD) 857 node.sysctl_data = &alim.rlim_max; 858 else 859 node.sysctl_data = &alim.rlim_cur; 860 861 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 862 if (error || newp == NULL) 863 return (error); 864 865 return (dosetrlimit(l, ptmp, limitno, &alim)); 866 } 867 868 /* 869 * and finally, the actually glue that sticks it to the tree 870 */ 871 SYSCTL_SETUP(sysctl_proc_setup, "sysctl proc subtree setup") 872 { 873 874 sysctl_createv(clog, 0, NULL, NULL, 875 CTLFLAG_PERMANENT, 876 CTLTYPE_NODE, "proc", NULL, 877 NULL, 0, NULL, 0, 878 CTL_PROC, CTL_EOL); 879 sysctl_createv(clog, 0, NULL, NULL, 880 CTLFLAG_PERMANENT|CTLFLAG_ANYNUMBER, 881 CTLTYPE_NODE, "curproc", 882 SYSCTL_DESCR("Per-process settings"), 883 NULL, 0, NULL, 0, 884 CTL_PROC, PROC_CURPROC, CTL_EOL); 885 886 sysctl_createv(clog, 0, NULL, NULL, 887 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 888 CTLTYPE_STRING, "corename", 889 SYSCTL_DESCR("Core file name"), 890 sysctl_proc_corename, 0, NULL, MAXPATHLEN, 891 CTL_PROC, PROC_CURPROC, PROC_PID_CORENAME, CTL_EOL); 892 sysctl_createv(clog, 0, NULL, NULL, 893 CTLFLAG_PERMANENT, 894 CTLTYPE_NODE, "rlimit", 895 SYSCTL_DESCR("Process limits"), 896 NULL, 0, NULL, 0, 897 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, CTL_EOL); 898 899 #define create_proc_plimit(s, n) do { \ 900 sysctl_createv(clog, 0, NULL, NULL, \ 901 CTLFLAG_PERMANENT, \ 902 CTLTYPE_NODE, s, \ 903 SYSCTL_DESCR("Process " s " limits"), \ 904 NULL, 0, NULL, 0, \ 905 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 906 CTL_EOL); \ 907 sysctl_createv(clog, 0, NULL, NULL, \ 908 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \ 909 CTLTYPE_QUAD, "soft", \ 910 SYSCTL_DESCR("Process soft " s " limit"), \ 911 sysctl_proc_plimit, 0, NULL, 0, \ 912 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 913 PROC_PID_LIMIT_TYPE_SOFT, CTL_EOL); \ 914 sysctl_createv(clog, 0, NULL, NULL, \ 915 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \ 916 CTLTYPE_QUAD, "hard", \ 917 SYSCTL_DESCR("Process hard " s " limit"), \ 918 sysctl_proc_plimit, 0, NULL, 0, \ 919 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 920 PROC_PID_LIMIT_TYPE_HARD, CTL_EOL); \ 921 } while (0/*CONSTCOND*/) 922 923 create_proc_plimit("cputime", PROC_PID_LIMIT_CPU); 924 create_proc_plimit("filesize", PROC_PID_LIMIT_FSIZE); 925 create_proc_plimit("datasize", PROC_PID_LIMIT_DATA); 926 create_proc_plimit("stacksize", PROC_PID_LIMIT_STACK); 927 create_proc_plimit("coredumpsize", PROC_PID_LIMIT_CORE); 928 create_proc_plimit("memoryuse", PROC_PID_LIMIT_RSS); 929 create_proc_plimit("memorylocked", PROC_PID_LIMIT_MEMLOCK); 930 create_proc_plimit("maxproc", PROC_PID_LIMIT_NPROC); 931 create_proc_plimit("descriptors", PROC_PID_LIMIT_NOFILE); 932 create_proc_plimit("sbsize", PROC_PID_LIMIT_SBSIZE); 933 934 #undef create_proc_plimit 935 936 sysctl_createv(clog, 0, NULL, NULL, 937 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 938 CTLTYPE_INT, "stopfork", 939 SYSCTL_DESCR("Stop process at fork(2)"), 940 sysctl_proc_stop, 0, NULL, 0, 941 CTL_PROC, PROC_CURPROC, PROC_PID_STOPFORK, CTL_EOL); 942 sysctl_createv(clog, 0, NULL, NULL, 943 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 944 CTLTYPE_INT, "stopexec", 945 SYSCTL_DESCR("Stop process at execve(2)"), 946 sysctl_proc_stop, 0, NULL, 0, 947 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXEC, CTL_EOL); 948 sysctl_createv(clog, 0, NULL, NULL, 949 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 950 CTLTYPE_INT, "stopexit", 951 SYSCTL_DESCR("Stop process before completing exit"), 952 sysctl_proc_stop, 0, NULL, 0, 953 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXIT, CTL_EOL); 954 } 955 956 void 957 uid_init(void) 958 { 959 960 /* 961 * XXXSMP This could be at IPL_SOFTNET, but for now we want 962 * to to be deadlock free, so it must be at IPL_VM. 963 */ 964 mutex_init(&uihashtbl_lock, MUTEX_DRIVER, IPL_VM); 965 966 /* 967 * Ensure that uid 0 is always in the user hash table, as 968 * sbreserve() expects it available from interrupt context. 969 */ 970 (void)uid_find(0); 971 } 972 973 struct uidinfo * 974 uid_find(uid_t uid) 975 { 976 struct uidinfo *uip; 977 struct uidinfo *newuip = NULL; 978 struct uihashhead *uipp; 979 980 uipp = UIHASH(uid); 981 982 again: 983 mutex_enter(&uihashtbl_lock); 984 LIST_FOREACH(uip, uipp, ui_hash) 985 if (uip->ui_uid == uid) { 986 mutex_exit(&uihashtbl_lock); 987 if (newuip) { 988 free(newuip, M_PROC); 989 mutex_destroy(&newuip->ui_lock); 990 } 991 return uip; 992 } 993 if (newuip == NULL) { 994 mutex_exit(&uihashtbl_lock); 995 /* Must not be called from interrupt context. */ 996 newuip = malloc(sizeof(*uip), M_PROC, M_WAITOK | M_ZERO); 997 mutex_init(&newuip->ui_lock, MUTEX_DRIVER, IPL_SOFTNET); 998 goto again; 999 } 1000 uip = newuip; 1001 1002 LIST_INSERT_HEAD(uipp, uip, ui_hash); 1003 uip->ui_uid = uid; 1004 mutex_exit(&uihashtbl_lock); 1005 1006 return uip; 1007 } 1008 1009 /* 1010 * Change the count associated with number of processes 1011 * a given user is using. 1012 */ 1013 int 1014 chgproccnt(uid_t uid, int diff) 1015 { 1016 struct uidinfo *uip; 1017 1018 if (diff == 0) 1019 return 0; 1020 1021 uip = uid_find(uid); 1022 mutex_enter(&uip->ui_lock); 1023 uip->ui_proccnt += diff; 1024 KASSERT(uip->ui_proccnt >= 0); 1025 mutex_exit(&uip->ui_lock); 1026 return uip->ui_proccnt; 1027 } 1028 1029 int 1030 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t xmax) 1031 { 1032 rlim_t nsb; 1033 1034 mutex_enter(&uip->ui_lock); 1035 nsb = uip->ui_sbsize + to - *hiwat; 1036 if (to > *hiwat && nsb > xmax) { 1037 mutex_exit(&uip->ui_lock); 1038 return 0; 1039 } 1040 *hiwat = to; 1041 uip->ui_sbsize = nsb; 1042 KASSERT(uip->ui_sbsize >= 0); 1043 mutex_exit(&uip->ui_lock); 1044 return 1; 1045 } 1046