1 /* $NetBSD: kern_resource.c,v 1.101 2006/05/14 21:15:11 elad Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_resource.c 8.8 (Berkeley) 2/14/95 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.101 2006/05/14 21:15:11 elad Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/file.h> 46 #include <sys/resourcevar.h> 47 #include <sys/malloc.h> 48 #include <sys/namei.h> 49 #include <sys/pool.h> 50 #include <sys/proc.h> 51 #include <sys/sysctl.h> 52 #include <sys/kauth.h> 53 54 #include <sys/mount.h> 55 #include <sys/sa.h> 56 #include <sys/syscallargs.h> 57 58 #include <uvm/uvm_extern.h> 59 60 /* 61 * Maximum process data and stack limits. 62 * They are variables so they are patchable. 63 */ 64 rlim_t maxdmap = MAXDSIZ; 65 rlim_t maxsmap = MAXSSIZ; 66 67 struct uihashhead *uihashtbl; 68 u_long uihash; /* size of hash table - 1 */ 69 struct simplelock uihashtbl_slock = SIMPLELOCK_INITIALIZER; 70 71 72 /* 73 * Resource controls and accounting. 74 */ 75 76 int 77 sys_getpriority(struct lwp *l, void *v, register_t *retval) 78 { 79 struct sys_getpriority_args /* { 80 syscallarg(int) which; 81 syscallarg(id_t) who; 82 } */ *uap = v; 83 struct proc *curp = l->l_proc, *p; 84 int low = NZERO + PRIO_MAX + 1; 85 86 switch (SCARG(uap, which)) { 87 88 case PRIO_PROCESS: 89 if (SCARG(uap, who) == 0) 90 p = curp; 91 else 92 p = pfind(SCARG(uap, who)); 93 if (p == 0) 94 break; 95 low = p->p_nice; 96 break; 97 98 case PRIO_PGRP: { 99 struct pgrp *pg; 100 101 if (SCARG(uap, who) == 0) 102 pg = curp->p_pgrp; 103 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 104 break; 105 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 106 if (p->p_nice < low) 107 low = p->p_nice; 108 } 109 break; 110 } 111 112 case PRIO_USER: 113 if (SCARG(uap, who) == 0) 114 SCARG(uap, who) = kauth_cred_geteuid(curp->p_cred); 115 proclist_lock_read(); 116 PROCLIST_FOREACH(p, &allproc) { 117 if (kauth_cred_geteuid(p->p_cred) == (uid_t) SCARG(uap, who) && 118 p->p_nice < low) 119 low = p->p_nice; 120 } 121 proclist_unlock_read(); 122 break; 123 124 default: 125 return (EINVAL); 126 } 127 if (low == NZERO + PRIO_MAX + 1) 128 return (ESRCH); 129 *retval = low - NZERO; 130 return (0); 131 } 132 133 /* ARGSUSED */ 134 int 135 sys_setpriority(struct lwp *l, void *v, register_t *retval) 136 { 137 struct sys_setpriority_args /* { 138 syscallarg(int) which; 139 syscallarg(id_t) who; 140 syscallarg(int) prio; 141 } */ *uap = v; 142 struct proc *curp = l->l_proc, *p; 143 int found = 0, error = 0; 144 145 switch (SCARG(uap, which)) { 146 147 case PRIO_PROCESS: 148 if (SCARG(uap, who) == 0) 149 p = curp; 150 else 151 p = pfind(SCARG(uap, who)); 152 if (p == 0) 153 break; 154 error = donice(curp, p, SCARG(uap, prio)); 155 found++; 156 break; 157 158 case PRIO_PGRP: { 159 struct pgrp *pg; 160 161 if (SCARG(uap, who) == 0) 162 pg = curp->p_pgrp; 163 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 164 break; 165 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 166 error = donice(curp, p, SCARG(uap, prio)); 167 found++; 168 } 169 break; 170 } 171 172 case PRIO_USER: 173 if (SCARG(uap, who) == 0) 174 SCARG(uap, who) = kauth_cred_geteuid(curp->p_cred); 175 proclist_lock_read(); 176 PROCLIST_FOREACH(p, &allproc) { 177 if (kauth_cred_geteuid(p->p_cred) == (uid_t) SCARG(uap, who)) { 178 error = donice(curp, p, SCARG(uap, prio)); 179 found++; 180 } 181 } 182 proclist_unlock_read(); 183 break; 184 185 default: 186 return (EINVAL); 187 } 188 if (found == 0) 189 return (ESRCH); 190 return (error); 191 } 192 193 int 194 donice(struct proc *curp, struct proc *chgp, int n) 195 { 196 kauth_cred_t cred = curp->p_cred; 197 int s; 198 199 if (kauth_cred_geteuid(cred) && kauth_cred_getuid(cred) && 200 kauth_cred_geteuid(cred) != kauth_cred_geteuid(chgp->p_cred) && 201 kauth_cred_getuid(cred) != kauth_cred_geteuid(chgp->p_cred)) 202 return (EPERM); 203 if (n > PRIO_MAX) 204 n = PRIO_MAX; 205 if (n < PRIO_MIN) 206 n = PRIO_MIN; 207 n += NZERO; 208 if (n < chgp->p_nice && kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, 209 &curp->p_acflag)) 210 return (EACCES); 211 chgp->p_nice = n; 212 SCHED_LOCK(s); 213 (void)resetprocpriority(chgp); 214 SCHED_UNLOCK(s); 215 return (0); 216 } 217 218 /* ARGSUSED */ 219 int 220 sys_setrlimit(struct lwp *l, void *v, register_t *retval) 221 { 222 struct sys_setrlimit_args /* { 223 syscallarg(int) which; 224 syscallarg(const struct rlimit *) rlp; 225 } */ *uap = v; 226 struct proc *p = l->l_proc; 227 int which = SCARG(uap, which); 228 struct rlimit alim; 229 int error; 230 231 error = copyin(SCARG(uap, rlp), &alim, sizeof(struct rlimit)); 232 if (error) 233 return (error); 234 return (dosetrlimit(p, p->p_cred, which, &alim)); 235 } 236 237 int 238 dosetrlimit(struct proc *p, kauth_cred_t cred, int which, struct rlimit *limp) 239 { 240 struct rlimit *alimp; 241 struct plimit *oldplim; 242 int error; 243 244 if ((u_int)which >= RLIM_NLIMITS) 245 return (EINVAL); 246 247 if (limp->rlim_cur < 0 || limp->rlim_max < 0) 248 return (EINVAL); 249 250 alimp = &p->p_rlimit[which]; 251 /* if we don't change the value, no need to limcopy() */ 252 if (limp->rlim_cur == alimp->rlim_cur && 253 limp->rlim_max == alimp->rlim_max) 254 return 0; 255 256 if (limp->rlim_cur > limp->rlim_max) { 257 /* 258 * This is programming error. According to SUSv2, we should 259 * return error in this case. 260 */ 261 return (EINVAL); 262 } 263 if (limp->rlim_max > alimp->rlim_max 264 && (error = kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, 265 &p->p_acflag)) != 0) 266 return (error); 267 268 if (p->p_limit->p_refcnt > 1 && 269 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) { 270 p->p_limit = limcopy(oldplim = p->p_limit); 271 limfree(oldplim); 272 alimp = &p->p_rlimit[which]; 273 } 274 275 switch (which) { 276 277 case RLIMIT_DATA: 278 if (limp->rlim_cur > maxdmap) 279 limp->rlim_cur = maxdmap; 280 if (limp->rlim_max > maxdmap) 281 limp->rlim_max = maxdmap; 282 break; 283 284 case RLIMIT_STACK: 285 if (limp->rlim_cur > maxsmap) 286 limp->rlim_cur = maxsmap; 287 if (limp->rlim_max > maxsmap) 288 limp->rlim_max = maxsmap; 289 290 /* 291 * Return EINVAL if the new stack size limit is lower than 292 * current usage. Otherwise, the process would get SIGSEGV the 293 * moment it would try to access anything on it's current stack. 294 * This conforms to SUSv2. 295 */ 296 if (limp->rlim_cur < p->p_vmspace->vm_ssize * PAGE_SIZE 297 || limp->rlim_max < p->p_vmspace->vm_ssize * PAGE_SIZE) 298 return (EINVAL); 299 300 /* 301 * Stack is allocated to the max at exec time with 302 * only "rlim_cur" bytes accessible (In other words, 303 * allocates stack dividing two contiguous regions at 304 * "rlim_cur" bytes boundary). 305 * 306 * Since allocation is done in terms of page, roundup 307 * "rlim_cur" (otherwise, contiguous regions 308 * overlap). If stack limit is going up make more 309 * accessible, if going down make inaccessible. 310 */ 311 limp->rlim_cur = round_page(limp->rlim_cur); 312 if (limp->rlim_cur != alimp->rlim_cur) { 313 vaddr_t addr; 314 vsize_t size; 315 vm_prot_t prot; 316 317 if (limp->rlim_cur > alimp->rlim_cur) { 318 prot = VM_PROT_READ | VM_PROT_WRITE; 319 size = limp->rlim_cur - alimp->rlim_cur; 320 addr = (vaddr_t)p->p_vmspace->vm_minsaddr - 321 limp->rlim_cur; 322 } else { 323 prot = VM_PROT_NONE; 324 size = alimp->rlim_cur - limp->rlim_cur; 325 addr = (vaddr_t)p->p_vmspace->vm_minsaddr - 326 alimp->rlim_cur; 327 } 328 (void) uvm_map_protect(&p->p_vmspace->vm_map, 329 addr, addr+size, prot, FALSE); 330 } 331 break; 332 333 case RLIMIT_NOFILE: 334 if (limp->rlim_cur > maxfiles) 335 limp->rlim_cur = maxfiles; 336 if (limp->rlim_max > maxfiles) 337 limp->rlim_max = maxfiles; 338 break; 339 340 case RLIMIT_NPROC: 341 if (limp->rlim_cur > maxproc) 342 limp->rlim_cur = maxproc; 343 if (limp->rlim_max > maxproc) 344 limp->rlim_max = maxproc; 345 break; 346 } 347 *alimp = *limp; 348 return (0); 349 } 350 351 /* ARGSUSED */ 352 int 353 sys_getrlimit(struct lwp *l, void *v, register_t *retval) 354 { 355 struct sys_getrlimit_args /* { 356 syscallarg(int) which; 357 syscallarg(struct rlimit *) rlp; 358 } */ *uap = v; 359 struct proc *p = l->l_proc; 360 int which = SCARG(uap, which); 361 362 if ((u_int)which >= RLIM_NLIMITS) 363 return (EINVAL); 364 return (copyout(&p->p_rlimit[which], SCARG(uap, rlp), 365 sizeof(struct rlimit))); 366 } 367 368 /* 369 * Transform the running time and tick information in proc p into user, 370 * system, and interrupt time usage. 371 */ 372 void 373 calcru(struct proc *p, struct timeval *up, struct timeval *sp, 374 struct timeval *ip) 375 { 376 u_quad_t u, st, ut, it, tot; 377 unsigned long sec; 378 long usec; 379 int s; 380 struct timeval tv; 381 struct lwp *l; 382 383 s = splstatclock(); 384 st = p->p_sticks; 385 ut = p->p_uticks; 386 it = p->p_iticks; 387 splx(s); 388 389 sec = p->p_rtime.tv_sec; 390 usec = p->p_rtime.tv_usec; 391 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 392 if (l->l_stat == LSONPROC) { 393 struct schedstate_percpu *spc; 394 395 KDASSERT(l->l_cpu != NULL); 396 spc = &l->l_cpu->ci_schedstate; 397 398 /* 399 * Adjust for the current time slice. This is 400 * actually fairly important since the error 401 * here is on the order of a time quantum, 402 * which is much greater than the sampling 403 * error. 404 */ 405 microtime(&tv); 406 sec += tv.tv_sec - spc->spc_runtime.tv_sec; 407 usec += tv.tv_usec - spc->spc_runtime.tv_usec; 408 } 409 } 410 411 tot = st + ut + it; 412 u = sec * 1000000ull + usec; 413 414 if (tot == 0) { 415 /* No ticks, so can't use to share time out, split 50-50 */ 416 st = ut = u / 2; 417 } else { 418 st = (u * st) / tot; 419 ut = (u * ut) / tot; 420 } 421 sp->tv_sec = st / 1000000; 422 sp->tv_usec = st % 1000000; 423 up->tv_sec = ut / 1000000; 424 up->tv_usec = ut % 1000000; 425 if (ip != NULL) { 426 if (it != 0) 427 it = (u * it) / tot; 428 ip->tv_sec = it / 1000000; 429 ip->tv_usec = it % 1000000; 430 } 431 } 432 433 /* ARGSUSED */ 434 int 435 sys_getrusage(struct lwp *l, void *v, register_t *retval) 436 { 437 struct sys_getrusage_args /* { 438 syscallarg(int) who; 439 syscallarg(struct rusage *) rusage; 440 } */ *uap = v; 441 struct rusage *rup; 442 struct proc *p = l->l_proc; 443 444 switch (SCARG(uap, who)) { 445 446 case RUSAGE_SELF: 447 rup = &p->p_stats->p_ru; 448 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); 449 break; 450 451 case RUSAGE_CHILDREN: 452 rup = &p->p_stats->p_cru; 453 break; 454 455 default: 456 return (EINVAL); 457 } 458 return (copyout(rup, SCARG(uap, rusage), sizeof(struct rusage))); 459 } 460 461 void 462 ruadd(struct rusage *ru, struct rusage *ru2) 463 { 464 long *ip, *ip2; 465 int i; 466 467 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); 468 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); 469 if (ru->ru_maxrss < ru2->ru_maxrss) 470 ru->ru_maxrss = ru2->ru_maxrss; 471 ip = &ru->ru_first; ip2 = &ru2->ru_first; 472 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 473 *ip++ += *ip2++; 474 } 475 476 /* 477 * Make a copy of the plimit structure. 478 * We share these structures copy-on-write after fork, 479 * and copy when a limit is changed. 480 */ 481 struct plimit * 482 limcopy(struct plimit *lim) 483 { 484 struct plimit *newlim; 485 size_t l = 0; 486 487 simple_lock(&lim->p_slock); 488 if (lim->pl_corename != defcorename) 489 l = strlen(lim->pl_corename) + 1; 490 simple_unlock(&lim->p_slock); 491 492 newlim = pool_get(&plimit_pool, PR_WAITOK); 493 simple_lock_init(&newlim->p_slock); 494 newlim->p_lflags = 0; 495 newlim->p_refcnt = 1; 496 newlim->pl_corename = (l != 0) 497 ? malloc(l, M_TEMP, M_WAITOK) 498 : defcorename; 499 500 simple_lock(&lim->p_slock); 501 memcpy(newlim->pl_rlimit, lim->pl_rlimit, 502 sizeof(struct rlimit) * RLIM_NLIMITS); 503 504 if (l != 0) 505 strlcpy(newlim->pl_corename, lim->pl_corename, l); 506 simple_unlock(&lim->p_slock); 507 508 return (newlim); 509 } 510 511 void 512 limfree(struct plimit *lim) 513 { 514 int n; 515 516 simple_lock(&lim->p_slock); 517 n = --lim->p_refcnt; 518 simple_unlock(&lim->p_slock); 519 if (n > 0) 520 return; 521 #ifdef DIAGNOSTIC 522 if (n < 0) 523 panic("limfree"); 524 #endif 525 if (lim->pl_corename != defcorename) 526 free(lim->pl_corename, M_TEMP); 527 pool_put(&plimit_pool, lim); 528 } 529 530 struct pstats * 531 pstatscopy(struct pstats *ps) 532 { 533 534 struct pstats *newps; 535 536 newps = pool_get(&pstats_pool, PR_WAITOK); 537 538 memset(&newps->pstat_startzero, 0, 539 (unsigned) ((caddr_t)&newps->pstat_endzero - 540 (caddr_t)&newps->pstat_startzero)); 541 memcpy(&newps->pstat_startcopy, &ps->pstat_startcopy, 542 ((caddr_t)&newps->pstat_endcopy - 543 (caddr_t)&newps->pstat_startcopy)); 544 545 return (newps); 546 547 } 548 549 void 550 pstatsfree(struct pstats *ps) 551 { 552 553 pool_put(&pstats_pool, ps); 554 } 555 556 /* 557 * sysctl interface in five parts 558 */ 559 560 /* 561 * a routine for sysctl proc subtree helpers that need to pick a valid 562 * process by pid. 563 */ 564 static int 565 sysctl_proc_findproc(struct proc *p, struct proc **p2, pid_t pid) 566 { 567 struct proc *ptmp; 568 int error = 0; 569 570 if (pid == PROC_CURPROC) 571 ptmp = p; 572 else if ((ptmp = pfind(pid)) == NULL) 573 error = ESRCH; 574 else { 575 /* 576 * suid proc of ours or proc not ours 577 */ 578 if (kauth_cred_getuid(p->p_cred) != kauth_cred_getuid(ptmp->p_cred) || 579 kauth_cred_getuid(p->p_cred) != kauth_cred_getsvuid(ptmp->p_cred)) 580 error = kauth_authorize_generic(p->p_cred, 581 KAUTH_GENERIC_ISSUSER, &p->p_acflag); 582 583 /* 584 * sgid proc has sgid back to us temporarily 585 */ 586 else if (kauth_cred_getgid(ptmp->p_cred) != kauth_cred_getsvgid(ptmp->p_cred)) 587 error = kauth_authorize_generic(p->p_cred, 588 KAUTH_GENERIC_ISSUSER, &p->p_acflag); 589 590 /* 591 * our rgid must be in target's group list (ie, 592 * sub-processes started by a sgid process) 593 */ 594 else { 595 int ismember = 0; 596 597 if (kauth_cred_ismember_gid(p->p_cred, 598 kauth_cred_getgid(ptmp->p_cred), &ismember) != 0 || 599 !ismember) { 600 error = kauth_authorize_generic(p->p_cred, 601 KAUTH_GENERIC_ISSUSER, &p->p_acflag); 602 } 603 } 604 } 605 606 *p2 = ptmp; 607 return (error); 608 } 609 610 /* 611 * sysctl helper routine for setting a process's specific corefile 612 * name. picks the process based on the given pid and checks the 613 * correctness of the new value. 614 */ 615 static int 616 sysctl_proc_corename(SYSCTLFN_ARGS) 617 { 618 struct proc *ptmp, *p; 619 struct plimit *lim; 620 int error = 0, len; 621 char *cname; 622 char *tmp; 623 struct sysctlnode node; 624 625 /* 626 * is this all correct? 627 */ 628 if (namelen != 0) 629 return (EINVAL); 630 if (name[-1] != PROC_PID_CORENAME) 631 return (EINVAL); 632 633 /* 634 * whom are we tweaking? 635 */ 636 p = l->l_proc; 637 error = sysctl_proc_findproc(p, &ptmp, (pid_t)name[-2]); 638 if (error) 639 return (error); 640 641 cname = PNBUF_GET(); 642 /* 643 * let them modify a temporary copy of the core name 644 */ 645 node = *rnode; 646 strlcpy(cname, ptmp->p_limit->pl_corename, MAXPATHLEN); 647 node.sysctl_data = cname; 648 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 649 650 /* 651 * if that failed, or they have nothing new to say, or we've 652 * heard it before... 653 */ 654 if (error || newp == NULL || 655 strcmp(cname, ptmp->p_limit->pl_corename) == 0) { 656 goto done; 657 } 658 659 /* 660 * no error yet and cname now has the new core name in it. 661 * let's see if it looks acceptable. it must be either "core" 662 * or end in ".core" or "/core". 663 */ 664 len = strlen(cname); 665 if (len < 4) { 666 error = EINVAL; 667 } else if (strcmp(cname + len - 4, "core") != 0) { 668 error = EINVAL; 669 } else if (len > 4 && cname[len - 5] != '/' && cname[len - 5] != '.') { 670 error = EINVAL; 671 } 672 if (error != 0) { 673 goto done; 674 } 675 676 /* 677 * hmm...looks good. now...where do we put it? 678 */ 679 tmp = malloc(len + 1, M_TEMP, M_WAITOK|M_CANFAIL); 680 if (tmp == NULL) { 681 error = ENOMEM; 682 goto done; 683 } 684 strlcpy(tmp, cname, len + 1); 685 686 lim = ptmp->p_limit; 687 if (lim->p_refcnt > 1 && (lim->p_lflags & PL_SHAREMOD) == 0) { 688 ptmp->p_limit = limcopy(lim); 689 limfree(lim); 690 lim = ptmp->p_limit; 691 } 692 if (lim->pl_corename != defcorename) 693 free(lim->pl_corename, M_TEMP); 694 lim->pl_corename = tmp; 695 done: 696 PNBUF_PUT(cname); 697 return error; 698 } 699 700 /* 701 * sysctl helper routine for checking/setting a process's stop flags, 702 * one for fork and one for exec. 703 */ 704 static int 705 sysctl_proc_stop(SYSCTLFN_ARGS) 706 { 707 struct proc *p, *ptmp; 708 int i, f, error = 0; 709 struct sysctlnode node; 710 711 if (namelen != 0) 712 return (EINVAL); 713 714 p = l->l_proc; 715 error = sysctl_proc_findproc(p, &ptmp, (pid_t)name[-2]); 716 if (error) 717 return (error); 718 719 switch (rnode->sysctl_num) { 720 case PROC_PID_STOPFORK: 721 f = P_STOPFORK; 722 break; 723 case PROC_PID_STOPEXEC: 724 f = P_STOPEXEC; 725 break; 726 case PROC_PID_STOPEXIT: 727 f = P_STOPEXIT; 728 break; 729 default: 730 return (EINVAL); 731 } 732 733 i = (ptmp->p_flag & f) ? 1 : 0; 734 node = *rnode; 735 node.sysctl_data = &i; 736 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 737 if (error || newp == NULL) 738 return (error); 739 740 if (i) 741 ptmp->p_flag |= f; 742 else 743 ptmp->p_flag &= ~f; 744 745 return (0); 746 } 747 748 /* 749 * sysctl helper routine for a process's rlimits as exposed by sysctl. 750 */ 751 static int 752 sysctl_proc_plimit(SYSCTLFN_ARGS) 753 { 754 struct proc *ptmp, *p; 755 u_int limitno; 756 int which, error = 0; 757 struct rlimit alim; 758 struct sysctlnode node; 759 760 if (namelen != 0) 761 return (EINVAL); 762 763 which = name[-1]; 764 if (which != PROC_PID_LIMIT_TYPE_SOFT && 765 which != PROC_PID_LIMIT_TYPE_HARD) 766 return (EINVAL); 767 768 limitno = name[-2] - 1; 769 if (limitno >= RLIM_NLIMITS) 770 return (EINVAL); 771 772 if (name[-3] != PROC_PID_LIMIT) 773 return (EINVAL); 774 775 p = l->l_proc; 776 error = sysctl_proc_findproc(p, &ptmp, (pid_t)name[-4]); 777 if (error) 778 return (error); 779 780 node = *rnode; 781 memcpy(&alim, &ptmp->p_rlimit[limitno], sizeof(alim)); 782 if (which == PROC_PID_LIMIT_TYPE_HARD) 783 node.sysctl_data = &alim.rlim_max; 784 else 785 node.sysctl_data = &alim.rlim_cur; 786 787 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 788 if (error || newp == NULL) 789 return (error); 790 791 return (dosetrlimit(ptmp, p->p_cred, limitno, &alim)); 792 } 793 794 /* 795 * and finally, the actually glue that sticks it to the tree 796 */ 797 SYSCTL_SETUP(sysctl_proc_setup, "sysctl proc subtree setup") 798 { 799 800 sysctl_createv(clog, 0, NULL, NULL, 801 CTLFLAG_PERMANENT, 802 CTLTYPE_NODE, "proc", NULL, 803 NULL, 0, NULL, 0, 804 CTL_PROC, CTL_EOL); 805 sysctl_createv(clog, 0, NULL, NULL, 806 CTLFLAG_PERMANENT|CTLFLAG_ANYNUMBER, 807 CTLTYPE_NODE, "curproc", 808 SYSCTL_DESCR("Per-process settings"), 809 NULL, 0, NULL, 0, 810 CTL_PROC, PROC_CURPROC, CTL_EOL); 811 812 sysctl_createv(clog, 0, NULL, NULL, 813 CTLFLAG_PERMANENT|CTLFLAG_READONLY2|CTLFLAG_ANYWRITE, 814 CTLTYPE_STRING, "corename", 815 SYSCTL_DESCR("Core file name"), 816 sysctl_proc_corename, 0, NULL, MAXPATHLEN, 817 CTL_PROC, PROC_CURPROC, PROC_PID_CORENAME, CTL_EOL); 818 sysctl_createv(clog, 0, NULL, NULL, 819 CTLFLAG_PERMANENT, 820 CTLTYPE_NODE, "rlimit", 821 SYSCTL_DESCR("Process limits"), 822 NULL, 0, NULL, 0, 823 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, CTL_EOL); 824 825 #define create_proc_plimit(s, n) do { \ 826 sysctl_createv(clog, 0, NULL, NULL, \ 827 CTLFLAG_PERMANENT, \ 828 CTLTYPE_NODE, s, \ 829 SYSCTL_DESCR("Process " s " limits"), \ 830 NULL, 0, NULL, 0, \ 831 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 832 CTL_EOL); \ 833 sysctl_createv(clog, 0, NULL, NULL, \ 834 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \ 835 CTLTYPE_QUAD, "soft", \ 836 SYSCTL_DESCR("Process soft " s " limit"), \ 837 sysctl_proc_plimit, 0, NULL, 0, \ 838 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 839 PROC_PID_LIMIT_TYPE_SOFT, CTL_EOL); \ 840 sysctl_createv(clog, 0, NULL, NULL, \ 841 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \ 842 CTLTYPE_QUAD, "hard", \ 843 SYSCTL_DESCR("Process hard " s " limit"), \ 844 sysctl_proc_plimit, 0, NULL, 0, \ 845 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 846 PROC_PID_LIMIT_TYPE_HARD, CTL_EOL); \ 847 } while (0/*CONSTCOND*/) 848 849 create_proc_plimit("cputime", PROC_PID_LIMIT_CPU); 850 create_proc_plimit("filesize", PROC_PID_LIMIT_FSIZE); 851 create_proc_plimit("datasize", PROC_PID_LIMIT_DATA); 852 create_proc_plimit("stacksize", PROC_PID_LIMIT_STACK); 853 create_proc_plimit("coredumpsize", PROC_PID_LIMIT_CORE); 854 create_proc_plimit("memoryuse", PROC_PID_LIMIT_RSS); 855 create_proc_plimit("memorylocked", PROC_PID_LIMIT_MEMLOCK); 856 create_proc_plimit("maxproc", PROC_PID_LIMIT_NPROC); 857 create_proc_plimit("descriptors", PROC_PID_LIMIT_NOFILE); 858 create_proc_plimit("sbsize", PROC_PID_LIMIT_SBSIZE); 859 860 #undef create_proc_plimit 861 862 sysctl_createv(clog, 0, NULL, NULL, 863 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 864 CTLTYPE_INT, "stopfork", 865 SYSCTL_DESCR("Stop process at fork(2)"), 866 sysctl_proc_stop, 0, NULL, 0, 867 CTL_PROC, PROC_CURPROC, PROC_PID_STOPFORK, CTL_EOL); 868 sysctl_createv(clog, 0, NULL, NULL, 869 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 870 CTLTYPE_INT, "stopexec", 871 SYSCTL_DESCR("Stop process at execve(2)"), 872 sysctl_proc_stop, 0, NULL, 0, 873 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXEC, CTL_EOL); 874 sysctl_createv(clog, 0, NULL, NULL, 875 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 876 CTLTYPE_INT, "stopexit", 877 SYSCTL_DESCR("Stop process before completing exit"), 878 sysctl_proc_stop, 0, NULL, 0, 879 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXIT, CTL_EOL); 880 } 881 882 struct uidinfo * 883 uid_find(uid_t uid) 884 { 885 struct uidinfo *uip; 886 struct uidinfo *newuip = NULL; 887 struct uihashhead *uipp; 888 889 uipp = UIHASH(uid); 890 891 again: 892 simple_lock(&uihashtbl_slock); 893 LIST_FOREACH(uip, uipp, ui_hash) 894 if (uip->ui_uid == uid) { 895 simple_unlock(&uihashtbl_slock); 896 if (newuip) 897 free(newuip, M_PROC); 898 return uip; 899 } 900 901 if (newuip == NULL) { 902 simple_unlock(&uihashtbl_slock); 903 newuip = malloc(sizeof(*uip), M_PROC, M_WAITOK | M_ZERO); 904 goto again; 905 } 906 uip = newuip; 907 908 LIST_INSERT_HEAD(uipp, uip, ui_hash); 909 uip->ui_uid = uid; 910 simple_lock_init(&uip->ui_slock); 911 simple_unlock(&uihashtbl_slock); 912 913 return uip; 914 } 915 916 /* 917 * Change the count associated with number of processes 918 * a given user is using. 919 */ 920 int 921 chgproccnt(uid_t uid, int diff) 922 { 923 struct uidinfo *uip; 924 int s; 925 926 if (diff == 0) 927 return 0; 928 929 uip = uid_find(uid); 930 UILOCK(uip, s); 931 uip->ui_proccnt += diff; 932 KASSERT(uip->ui_proccnt >= 0); 933 UIUNLOCK(uip, s); 934 return uip->ui_proccnt; 935 } 936 937 int 938 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t xmax) 939 { 940 rlim_t nsb; 941 int s; 942 943 UILOCK(uip, s); 944 nsb = uip->ui_sbsize + to - *hiwat; 945 if (to > *hiwat && nsb > xmax) { 946 UIUNLOCK(uip, s); 947 splx(s); 948 return 0; 949 } 950 *hiwat = to; 951 uip->ui_sbsize = nsb; 952 KASSERT(uip->ui_sbsize >= 0); 953 UIUNLOCK(uip, s); 954 return 1; 955 } 956