1 /* $NetBSD: kern_resource.c,v 1.75 2003/12/06 04:25:57 atatat Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_resource.c 8.8 (Berkeley) 2/14/95 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.75 2003/12/06 04:25:57 atatat Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/file.h> 46 #include <sys/resourcevar.h> 47 #include <sys/malloc.h> 48 #include <sys/pool.h> 49 #include <sys/proc.h> 50 #include <sys/sysctl.h> 51 52 #include <sys/mount.h> 53 #include <sys/sa.h> 54 #include <sys/syscallargs.h> 55 56 #include <uvm/uvm_extern.h> 57 58 /* 59 * Maximum process data and stack limits. 60 * They are variables so they are patchable. 61 * 62 * XXXX Do we really need them to be patchable? 63 */ 64 rlim_t maxdmap = MAXDSIZ; 65 rlim_t maxsmap = MAXSSIZ; 66 67 /* 68 * Resource controls and accounting. 69 */ 70 71 int 72 sys_getpriority(l, v, retval) 73 struct lwp *l; 74 void *v; 75 register_t *retval; 76 { 77 struct sys_getpriority_args /* { 78 syscallarg(int) which; 79 syscallarg(int) who; 80 } */ *uap = v; 81 struct proc *curp = l->l_proc, *p; 82 int low = NZERO + PRIO_MAX + 1; 83 84 switch (SCARG(uap, which)) { 85 86 case PRIO_PROCESS: 87 if (SCARG(uap, who) == 0) 88 p = curp; 89 else 90 p = pfind(SCARG(uap, who)); 91 if (p == 0) 92 break; 93 low = p->p_nice; 94 break; 95 96 case PRIO_PGRP: { 97 struct pgrp *pg; 98 99 if (SCARG(uap, who) == 0) 100 pg = curp->p_pgrp; 101 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 102 break; 103 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 104 if (p->p_nice < low) 105 low = p->p_nice; 106 } 107 break; 108 } 109 110 case PRIO_USER: 111 if (SCARG(uap, who) == 0) 112 SCARG(uap, who) = curp->p_ucred->cr_uid; 113 proclist_lock_read(); 114 LIST_FOREACH(p, &allproc, p_list) { 115 if (p->p_ucred->cr_uid == (uid_t) SCARG(uap, who) && 116 p->p_nice < low) 117 low = p->p_nice; 118 } 119 proclist_unlock_read(); 120 break; 121 122 default: 123 return (EINVAL); 124 } 125 if (low == NZERO + PRIO_MAX + 1) 126 return (ESRCH); 127 *retval = low - NZERO; 128 return (0); 129 } 130 131 /* ARGSUSED */ 132 int 133 sys_setpriority(l, v, retval) 134 struct lwp *l; 135 void *v; 136 register_t *retval; 137 { 138 struct sys_setpriority_args /* { 139 syscallarg(int) which; 140 syscallarg(int) who; 141 syscallarg(int) prio; 142 } */ *uap = v; 143 struct proc *curp = l->l_proc, *p; 144 int found = 0, error = 0; 145 146 switch (SCARG(uap, which)) { 147 148 case PRIO_PROCESS: 149 if (SCARG(uap, who) == 0) 150 p = curp; 151 else 152 p = pfind(SCARG(uap, who)); 153 if (p == 0) 154 break; 155 error = donice(curp, p, SCARG(uap, prio)); 156 found++; 157 break; 158 159 case PRIO_PGRP: { 160 struct pgrp *pg; 161 162 if (SCARG(uap, who) == 0) 163 pg = curp->p_pgrp; 164 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 165 break; 166 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 167 error = donice(curp, p, SCARG(uap, prio)); 168 found++; 169 } 170 break; 171 } 172 173 case PRIO_USER: 174 if (SCARG(uap, who) == 0) 175 SCARG(uap, who) = curp->p_ucred->cr_uid; 176 proclist_lock_read(); 177 LIST_FOREACH(p, &allproc, p_list) { 178 if (p->p_ucred->cr_uid == (uid_t) SCARG(uap, who)) { 179 error = donice(curp, p, SCARG(uap, prio)); 180 found++; 181 } 182 } 183 proclist_unlock_read(); 184 break; 185 186 default: 187 return (EINVAL); 188 } 189 if (found == 0) 190 return (ESRCH); 191 return (error); 192 } 193 194 int 195 donice(curp, chgp, n) 196 struct proc *curp, *chgp; 197 int n; 198 { 199 struct pcred *pcred = curp->p_cred; 200 int s; 201 202 if (pcred->pc_ucred->cr_uid && pcred->p_ruid && 203 pcred->pc_ucred->cr_uid != chgp->p_ucred->cr_uid && 204 pcred->p_ruid != chgp->p_ucred->cr_uid) 205 return (EPERM); 206 if (n > PRIO_MAX) 207 n = PRIO_MAX; 208 if (n < PRIO_MIN) 209 n = PRIO_MIN; 210 n += NZERO; 211 if (n < chgp->p_nice && suser(pcred->pc_ucred, &curp->p_acflag)) 212 return (EACCES); 213 chgp->p_nice = n; 214 SCHED_LOCK(s); 215 (void)resetprocpriority(chgp); 216 SCHED_UNLOCK(s); 217 return (0); 218 } 219 220 /* ARGSUSED */ 221 int 222 sys_setrlimit(l, v, retval) 223 struct lwp *l; 224 void *v; 225 register_t *retval; 226 { 227 struct sys_setrlimit_args /* { 228 syscallarg(int) which; 229 syscallarg(const struct rlimit *) rlp; 230 } */ *uap = v; 231 struct proc *p = l->l_proc; 232 int which = SCARG(uap, which); 233 struct rlimit alim; 234 int error; 235 236 error = copyin(SCARG(uap, rlp), &alim, sizeof(struct rlimit)); 237 if (error) 238 return (error); 239 return (dosetrlimit(p, p->p_cred, which, &alim)); 240 } 241 242 int 243 dosetrlimit(p, cred, which, limp) 244 struct proc *p; 245 struct pcred *cred; 246 int which; 247 struct rlimit *limp; 248 { 249 struct rlimit *alimp; 250 struct plimit *newplim; 251 int error; 252 253 if ((u_int)which >= RLIM_NLIMITS) 254 return (EINVAL); 255 256 if (limp->rlim_cur < 0 || limp->rlim_max < 0) 257 return (EINVAL); 258 259 alimp = &p->p_rlimit[which]; 260 /* if we don't change the value, no need to limcopy() */ 261 if (limp->rlim_cur == alimp->rlim_cur && 262 limp->rlim_max == alimp->rlim_max) 263 return 0; 264 265 if (limp->rlim_cur > limp->rlim_max) { 266 /* 267 * This is programming error. According to SUSv2, we should 268 * return error in this case. 269 */ 270 return (EINVAL); 271 } 272 if (limp->rlim_max > alimp->rlim_max 273 && (error = suser(cred->pc_ucred, &p->p_acflag)) != 0) 274 return (error); 275 276 if (p->p_limit->p_refcnt > 1 && 277 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) { 278 newplim = limcopy(p->p_limit); 279 limfree(p->p_limit); 280 p->p_limit = newplim; 281 alimp = &p->p_rlimit[which]; 282 } 283 284 switch (which) { 285 286 case RLIMIT_DATA: 287 if (limp->rlim_cur > maxdmap) 288 limp->rlim_cur = maxdmap; 289 if (limp->rlim_max > maxdmap) 290 limp->rlim_max = maxdmap; 291 break; 292 293 case RLIMIT_STACK: 294 if (limp->rlim_cur > maxsmap) 295 limp->rlim_cur = maxsmap; 296 if (limp->rlim_max > maxsmap) 297 limp->rlim_max = maxsmap; 298 299 /* 300 * Return EINVAL if the new stack size limit is lower than 301 * current usage. Otherwise, the process would get SIGSEGV the 302 * moment it would try to access anything on it's current stack. 303 * This conforms to SUSv2. 304 */ 305 if (limp->rlim_cur < p->p_vmspace->vm_ssize * PAGE_SIZE 306 || limp->rlim_max < p->p_vmspace->vm_ssize * PAGE_SIZE) 307 return (EINVAL); 308 309 /* 310 * Stack is allocated to the max at exec time with 311 * only "rlim_cur" bytes accessible (In other words, 312 * allocates stack dividing two contiguous regions at 313 * "rlim_cur" bytes boundary). 314 * 315 * Since allocation is done in terms of page, roundup 316 * "rlim_cur" (otherwise, contiguous regions 317 * overlap). If stack limit is going up make more 318 * accessible, if going down make inaccessible. 319 */ 320 limp->rlim_cur = round_page(limp->rlim_cur); 321 if (limp->rlim_cur != alimp->rlim_cur) { 322 vaddr_t addr; 323 vsize_t size; 324 vm_prot_t prot; 325 326 if (limp->rlim_cur > alimp->rlim_cur) { 327 prot = VM_PROT_READ | VM_PROT_WRITE; 328 size = limp->rlim_cur - alimp->rlim_cur; 329 addr = USRSTACK - limp->rlim_cur; 330 } else { 331 prot = VM_PROT_NONE; 332 size = alimp->rlim_cur - limp->rlim_cur; 333 addr = USRSTACK - alimp->rlim_cur; 334 } 335 (void) uvm_map_protect(&p->p_vmspace->vm_map, 336 addr, addr+size, prot, FALSE); 337 } 338 break; 339 340 case RLIMIT_NOFILE: 341 if (limp->rlim_cur > maxfiles) 342 limp->rlim_cur = maxfiles; 343 if (limp->rlim_max > maxfiles) 344 limp->rlim_max = maxfiles; 345 break; 346 347 case RLIMIT_NPROC: 348 if (limp->rlim_cur > maxproc) 349 limp->rlim_cur = maxproc; 350 if (limp->rlim_max > maxproc) 351 limp->rlim_max = maxproc; 352 break; 353 } 354 *alimp = *limp; 355 return (0); 356 } 357 358 /* ARGSUSED */ 359 int 360 sys_getrlimit(l, v, retval) 361 struct lwp *l; 362 void *v; 363 register_t *retval; 364 { 365 struct sys_getrlimit_args /* { 366 syscallarg(int) which; 367 syscallarg(struct rlimit *) rlp; 368 } */ *uap = v; 369 struct proc *p = l->l_proc; 370 int which = SCARG(uap, which); 371 372 if ((u_int)which >= RLIM_NLIMITS) 373 return (EINVAL); 374 return (copyout(&p->p_rlimit[which], SCARG(uap, rlp), 375 sizeof(struct rlimit))); 376 } 377 378 /* 379 * Transform the running time and tick information in proc p into user, 380 * system, and interrupt time usage. 381 */ 382 void 383 calcru(p, up, sp, ip) 384 struct proc *p; 385 struct timeval *up; 386 struct timeval *sp; 387 struct timeval *ip; 388 { 389 u_quad_t u, st, ut, it, tot; 390 unsigned long sec; 391 long usec; 392 int s; 393 struct timeval tv; 394 struct lwp *l; 395 396 s = splstatclock(); 397 st = p->p_sticks; 398 ut = p->p_uticks; 399 it = p->p_iticks; 400 splx(s); 401 402 sec = p->p_rtime.tv_sec; 403 usec = p->p_rtime.tv_usec; 404 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 405 if (l->l_stat == LSONPROC) { 406 struct schedstate_percpu *spc; 407 408 KDASSERT(l->l_cpu != NULL); 409 spc = &l->l_cpu->ci_schedstate; 410 411 /* 412 * Adjust for the current time slice. This is 413 * actually fairly important since the error 414 * here is on the order of a time quantum, 415 * which is much greater than the sampling 416 * error. 417 */ 418 microtime(&tv); 419 sec += tv.tv_sec - spc->spc_runtime.tv_sec; 420 usec += tv.tv_usec - spc->spc_runtime.tv_usec; 421 } 422 } 423 424 tot = st + ut + it; 425 u = sec * 1000000ull + usec; 426 427 if (tot == 0) { 428 /* No ticks, so can't use to share time out, split 50-50 */ 429 st = ut = u / 2; 430 } else { 431 st = (u * st) / tot; 432 ut = (u * ut) / tot; 433 } 434 sp->tv_sec = st / 1000000; 435 sp->tv_usec = st % 1000000; 436 up->tv_sec = ut / 1000000; 437 up->tv_usec = ut % 1000000; 438 if (ip != NULL) { 439 if (it != 0) 440 it = (u * it) / tot; 441 ip->tv_sec = it / 1000000; 442 ip->tv_usec = it % 1000000; 443 } 444 } 445 446 /* ARGSUSED */ 447 int 448 sys_getrusage(l, v, retval) 449 struct lwp *l; 450 void *v; 451 register_t *retval; 452 { 453 struct sys_getrusage_args /* { 454 syscallarg(int) who; 455 syscallarg(struct rusage *) rusage; 456 } */ *uap = v; 457 struct rusage *rup; 458 struct proc *p = l->l_proc; 459 460 switch (SCARG(uap, who)) { 461 462 case RUSAGE_SELF: 463 rup = &p->p_stats->p_ru; 464 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); 465 break; 466 467 case RUSAGE_CHILDREN: 468 rup = &p->p_stats->p_cru; 469 break; 470 471 default: 472 return (EINVAL); 473 } 474 return (copyout(rup, SCARG(uap, rusage), sizeof(struct rusage))); 475 } 476 477 void 478 ruadd(ru, ru2) 479 struct rusage *ru, *ru2; 480 { 481 long *ip, *ip2; 482 int i; 483 484 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); 485 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); 486 if (ru->ru_maxrss < ru2->ru_maxrss) 487 ru->ru_maxrss = ru2->ru_maxrss; 488 ip = &ru->ru_first; ip2 = &ru2->ru_first; 489 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 490 *ip++ += *ip2++; 491 } 492 493 /* 494 * Make a copy of the plimit structure. 495 * We share these structures copy-on-write after fork, 496 * and copy when a limit is changed. 497 */ 498 struct plimit * 499 limcopy(lim) 500 struct plimit *lim; 501 { 502 struct plimit *newlim; 503 size_t l; 504 505 newlim = pool_get(&plimit_pool, PR_WAITOK); 506 memcpy(newlim->pl_rlimit, lim->pl_rlimit, 507 sizeof(struct rlimit) * RLIM_NLIMITS); 508 if (lim->pl_corename == defcorename) { 509 newlim->pl_corename = defcorename; 510 } else { 511 l = strlen(lim->pl_corename) + 1; 512 newlim->pl_corename = malloc(l, M_TEMP, M_WAITOK); 513 strlcpy(newlim->pl_corename, lim->pl_corename, l); 514 } 515 newlim->p_lflags = 0; 516 newlim->p_refcnt = 1; 517 return (newlim); 518 } 519 520 void 521 limfree(lim) 522 struct plimit *lim; 523 { 524 525 if (--lim->p_refcnt > 0) 526 return; 527 #ifdef DIAGNOSTIC 528 if (lim->p_refcnt < 0) 529 panic("limfree"); 530 #endif 531 if (lim->pl_corename != defcorename) 532 free(lim->pl_corename, M_TEMP); 533 pool_put(&plimit_pool, lim); 534 } 535 536 struct pstats * 537 pstatscopy(ps) 538 struct pstats *ps; 539 { 540 541 struct pstats *newps; 542 543 newps = pool_get(&pstats_pool, PR_WAITOK); 544 545 memset(&newps->pstat_startzero, 0, 546 (unsigned) ((caddr_t)&newps->pstat_endzero - 547 (caddr_t)&newps->pstat_startzero)); 548 memcpy(&newps->pstat_startcopy, &ps->pstat_startcopy, 549 ((caddr_t)&newps->pstat_endcopy - 550 (caddr_t)&newps->pstat_startcopy)); 551 552 return (newps); 553 554 } 555 556 void 557 pstatsfree(ps) 558 struct pstats *ps; 559 { 560 561 pool_put(&pstats_pool, ps); 562 } 563 564 /* 565 * sysctl interface in five parts 566 */ 567 568 /* 569 * a routine for sysctl proc subtree helpers that need to pick a valid 570 * process by pid. 571 */ 572 static int 573 sysctl_proc_findproc(struct proc *p, struct proc **p2, pid_t pid) 574 { 575 struct proc *ptmp; 576 int i, error = 0; 577 578 if (pid == PROC_CURPROC) 579 ptmp = p; 580 else if ((ptmp = pfind(pid)) == NULL) 581 error = ESRCH; 582 else { 583 /* 584 * suid proc of ours or proc not ours 585 */ 586 if (p->p_cred->p_ruid != ptmp->p_cred->p_ruid || 587 p->p_cred->p_ruid != ptmp->p_cred->p_svuid) 588 error = suser(p->p_ucred, &p->p_acflag); 589 590 /* 591 * sgid proc has sgid back to us temporarily 592 */ 593 else if (ptmp->p_cred->p_rgid != ptmp->p_cred->p_svgid) 594 error = suser(p->p_ucred, &p->p_acflag); 595 596 /* 597 * our rgid must be in target's group list (ie, 598 * sub-processes started by a sgid process) 599 */ 600 else { 601 for (i = 0; i < p->p_ucred->cr_ngroups; i++) { 602 if (p->p_ucred->cr_groups[i] == 603 ptmp->p_cred->p_rgid) 604 break; 605 } 606 if (i == p->p_ucred->cr_ngroups) 607 error = suser(p->p_ucred, &p->p_acflag); 608 } 609 } 610 611 *p2 = ptmp; 612 return (error); 613 } 614 615 /* 616 * sysctl helper routine for setting a process's specific corefile 617 * name. picks the process based on the given pid and checks the 618 * correctness of the new value. 619 */ 620 static int 621 sysctl_proc_corename(SYSCTLFN_ARGS) 622 { 623 struct proc *ptmp, *p; 624 struct plimit *newplim; 625 int error = 0, len; 626 char cname[MAXPATHLEN], *tmp; 627 struct sysctlnode node; 628 629 /* 630 * is this all correct? 631 */ 632 if (namelen != 0) 633 return (EINVAL); 634 if (name[-1] != PROC_PID_CORENAME) 635 return (EINVAL); 636 637 /* 638 * whom are we tweaking? 639 */ 640 p = l->l_proc; 641 error = sysctl_proc_findproc(p, &ptmp, (pid_t)name[-2]); 642 if (error) 643 return (error); 644 645 /* 646 * let them modify a temporary copy of the core name 647 */ 648 node = *rnode; 649 strlcpy(cname, ptmp->p_limit->pl_corename, sizeof(cname)); 650 node.sysctl_data = cname; 651 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 652 653 /* 654 * if that failed, or they have nothing new to say, or we've 655 * heard it before... 656 */ 657 if (error || newp == NULL || 658 strcmp(cname, ptmp->p_limit->pl_corename) == 0) 659 return (error); 660 661 /* 662 * no error yet and cname now has the new core name in it. 663 * let's see if it looks acceptable. it must be either "core" 664 * or end in ".core" or "/core". 665 */ 666 len = strlen(cname); 667 if (len < 4) 668 return (EINVAL); 669 if (strcmp(cname + len - 4, "core") != 0) 670 return (EINVAL); 671 if (len > 4 && cname[len - 5] != '/' && cname[len - 5] != '.') 672 return (EINVAL); 673 674 /* 675 * hmm...looks good. now...where do we put it? 676 */ 677 tmp = malloc(len + 1, M_TEMP, M_WAITOK|M_CANFAIL); 678 if (tmp == NULL) 679 return (ENOMEM); 680 strlcpy(tmp, cname, len + 1); 681 682 if (ptmp->p_limit->p_refcnt > 1 && 683 (ptmp->p_limit->p_lflags & PL_SHAREMOD) == 0) { 684 newplim = limcopy(ptmp->p_limit); 685 limfree(ptmp->p_limit); 686 ptmp->p_limit = newplim; 687 } 688 if (ptmp->p_limit->pl_corename != defcorename) 689 FREE(ptmp->p_limit->pl_corename, M_SYSCTLDATA); 690 ptmp->p_limit->pl_corename = tmp; 691 692 return (error); 693 } 694 695 /* 696 * sysctl helper routine for checking/setting a process's stop flags, 697 * one for fork and one for exec. 698 */ 699 static int 700 sysctl_proc_stop(SYSCTLFN_ARGS) 701 { 702 struct proc *p, *ptmp; 703 int i, f, error = 0; 704 struct sysctlnode node; 705 706 if (namelen != 0) 707 return (EINVAL); 708 709 p = l->l_proc; 710 error = sysctl_proc_findproc(p, &ptmp, (pid_t)name[-2]); 711 if (error) 712 return (error); 713 714 switch (rnode->sysctl_num) { 715 case PROC_PID_STOPFORK: 716 f = P_STOPFORK; 717 break; 718 case PROC_PID_STOPEXEC: 719 f = P_STOPEXEC; 720 break; 721 case PROC_PID_STOPEXIT: 722 f = P_STOPEXIT; 723 break; 724 default: 725 return (EINVAL); 726 } 727 728 i = (ptmp->p_flag & f) ? 1 : 0; 729 node = *rnode; 730 node.sysctl_data = &i; 731 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 732 if (error || newp == NULL) 733 return (error); 734 735 if (i) 736 ptmp->p_flag |= f; 737 else 738 ptmp->p_flag &= ~f; 739 740 return (0); 741 } 742 743 /* 744 * sysctl helper routine for a process's rlimits as exposed by sysctl. 745 */ 746 static int 747 sysctl_proc_plimit(SYSCTLFN_ARGS) 748 { 749 struct proc *ptmp, *p; 750 u_int limitno; 751 int which, error = 0; 752 struct rlimit alim; 753 struct sysctlnode node; 754 755 if (namelen != 0) 756 return (EINVAL); 757 758 which = name[-1]; 759 if (which != PROC_PID_LIMIT_TYPE_SOFT && 760 which != PROC_PID_LIMIT_TYPE_HARD) 761 return (EINVAL); 762 763 limitno = name[-2] - 1; 764 if (limitno >= RLIM_NLIMITS) 765 return (EINVAL); 766 767 if (name[-3] != PROC_PID_LIMIT) 768 return (EINVAL); 769 770 p = l->l_proc; 771 error = sysctl_proc_findproc(p, &ptmp, (pid_t)name[-4]); 772 if (error) 773 return (error); 774 775 node = *rnode; 776 memcpy(&alim, &ptmp->p_rlimit[limitno], sizeof(alim)); 777 if (which == PROC_PID_LIMIT_TYPE_HARD) 778 node.sysctl_data = &alim.rlim_max; 779 else 780 node.sysctl_data = &alim.rlim_cur; 781 782 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 783 if (error || newp == NULL) 784 return (error); 785 786 return (dosetrlimit(ptmp, p->p_cred, limitno, &alim)); 787 } 788 789 /* 790 * and finally, the actually glue that sticks it to the tree 791 */ 792 SYSCTL_SETUP(sysctl_proc_setup, "sysctl proc subtree setup") 793 { 794 795 sysctl_createv(SYSCTL_PERMANENT, 796 CTLTYPE_NODE, "proc", NULL, 797 NULL, 0, NULL, 0, 798 CTL_PROC, CTL_EOL); 799 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_ANYNUMBER, 800 CTLTYPE_NODE, "curproc", NULL, 801 NULL, 0, NULL, 0, 802 CTL_PROC, PROC_CURPROC, CTL_EOL); 803 804 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READONLY2|SYSCTL_ANYWRITE, 805 CTLTYPE_STRING, "corename", NULL, 806 sysctl_proc_corename, 0, NULL, MAXPATHLEN, 807 CTL_PROC, PROC_CURPROC, PROC_PID_CORENAME, CTL_EOL); 808 sysctl_createv(SYSCTL_PERMANENT, 809 CTLTYPE_NODE, "rlimit", NULL, 810 NULL, 0, NULL, 0, 811 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, CTL_EOL); 812 813 #define create_proc_plimit(s, n) do { \ 814 sysctl_createv(SYSCTL_PERMANENT, \ 815 CTLTYPE_NODE, s, NULL, \ 816 NULL, 0, NULL, 0, \ 817 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 818 CTL_EOL); \ 819 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE|SYSCTL_ANYWRITE, \ 820 CTLTYPE_QUAD, "soft", NULL, \ 821 sysctl_proc_plimit, 0, NULL, 0, \ 822 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 823 PROC_PID_LIMIT_TYPE_SOFT, CTL_EOL); \ 824 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE|SYSCTL_ANYWRITE, \ 825 CTLTYPE_QUAD, "hard", NULL, \ 826 sysctl_proc_plimit, 0, NULL, 0, \ 827 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 828 PROC_PID_LIMIT_TYPE_HARD, CTL_EOL); \ 829 } while (0/*CONSTCOND*/) 830 831 create_proc_plimit("cputime", PROC_PID_LIMIT_CPU); 832 create_proc_plimit("filesize", PROC_PID_LIMIT_FSIZE); 833 create_proc_plimit("datasize", PROC_PID_LIMIT_DATA); 834 create_proc_plimit("stacksize", PROC_PID_LIMIT_STACK); 835 create_proc_plimit("coredumpsize", PROC_PID_LIMIT_CORE); 836 create_proc_plimit("memoryuse", PROC_PID_LIMIT_RSS); 837 create_proc_plimit("memorylocked", PROC_PID_LIMIT_MEMLOCK); 838 create_proc_plimit("maxproc", PROC_PID_LIMIT_NPROC); 839 create_proc_plimit("descriptors", PROC_PID_LIMIT_NOFILE); 840 841 #undef create_proc_plimit 842 843 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE|SYSCTL_ANYWRITE, 844 CTLTYPE_INT, "stopfork", NULL, 845 sysctl_proc_stop, 0, NULL, 0, 846 CTL_PROC, PROC_CURPROC, PROC_PID_STOPFORK, CTL_EOL); 847 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE|SYSCTL_ANYWRITE, 848 CTLTYPE_INT, "stopexec", NULL, 849 sysctl_proc_stop, 0, NULL, 0, 850 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXEC, CTL_EOL); 851 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE|SYSCTL_ANYWRITE, 852 CTLTYPE_INT, "stopexit", NULL, 853 sysctl_proc_stop, 0, NULL, 0, 854 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXIT, CTL_EOL); 855 } 856