1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $ 40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $ 41 */ 42 43 #include "opt_compat.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/file.h> 49 #include <sys/kern_syscall.h> 50 #include <sys/kernel.h> 51 #include <sys/resourcevar.h> 52 #include <sys/malloc.h> 53 #include <sys/proc.h> 54 #include <sys/priv.h> 55 #include <sys/time.h> 56 #include <sys/lockf.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_param.h> 60 #include <sys/lock.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_map.h> 63 64 #include <sys/thread2.h> 65 #include <sys/spinlock2.h> 66 #include <sys/mplock2.h> 67 68 static int donice (struct proc *chgp, int n); 69 70 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures"); 71 #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) 72 static struct spinlock uihash_lock; 73 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; 74 static u_long uihash; /* size of hash table - 1 */ 75 76 static struct uidinfo *uicreate (uid_t uid); 77 static struct uidinfo *uilookup (uid_t uid); 78 79 /* 80 * Resource controls and accounting. 81 */ 82 83 struct getpriority_info { 84 int low; 85 int who; 86 }; 87 88 static int getpriority_callback(struct proc *p, void *data); 89 90 /* 91 * MPALMOSTSAFE 92 */ 93 int 94 sys_getpriority(struct getpriority_args *uap) 95 { 96 struct getpriority_info info; 97 struct proc *curp = curproc; 98 struct proc *p; 99 int low = PRIO_MAX + 1; 100 int error; 101 102 get_mplock(); 103 104 switch (uap->which) { 105 case PRIO_PROCESS: 106 if (uap->who == 0) 107 p = curp; 108 else 109 p = pfind(uap->who); 110 if (p == 0) 111 break; 112 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) 113 break; 114 low = p->p_nice; 115 break; 116 117 case PRIO_PGRP: 118 { 119 struct pgrp *pg; 120 121 if (uap->who == 0) 122 pg = curp->p_pgrp; 123 else if ((pg = pgfind(uap->who)) == NULL) 124 break; 125 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 126 if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low)) 127 low = p->p_nice; 128 } 129 break; 130 } 131 case PRIO_USER: 132 if (uap->who == 0) 133 uap->who = curp->p_ucred->cr_uid; 134 info.low = low; 135 info.who = uap->who; 136 allproc_scan(getpriority_callback, &info); 137 low = info.low; 138 break; 139 140 default: 141 error = EINVAL; 142 goto done; 143 } 144 if (low == PRIO_MAX + 1) { 145 error = ESRCH; 146 goto done; 147 } 148 uap->sysmsg_result = low; 149 error = 0; 150 done: 151 rel_mplock(); 152 return (error); 153 } 154 155 /* 156 * Figure out the current lowest nice priority for processes owned 157 * by the specified user. 158 */ 159 static 160 int 161 getpriority_callback(struct proc *p, void *data) 162 { 163 struct getpriority_info *info = data; 164 165 if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) && 166 p->p_ucred->cr_uid == info->who && 167 p->p_nice < info->low) { 168 info->low = p->p_nice; 169 } 170 return(0); 171 } 172 173 struct setpriority_info { 174 int prio; 175 int who; 176 int error; 177 int found; 178 }; 179 180 static int setpriority_callback(struct proc *p, void *data); 181 182 /* 183 * MPALMOSTSAFE 184 */ 185 int 186 sys_setpriority(struct setpriority_args *uap) 187 { 188 struct setpriority_info info; 189 struct proc *curp = curproc; 190 struct proc *p; 191 int found = 0, error = 0; 192 193 get_mplock(); 194 195 switch (uap->which) { 196 case PRIO_PROCESS: 197 if (uap->who == 0) 198 p = curp; 199 else 200 p = pfind(uap->who); 201 if (p == 0) 202 break; 203 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) 204 break; 205 error = donice(p, uap->prio); 206 found++; 207 break; 208 209 case PRIO_PGRP: 210 { 211 struct pgrp *pg; 212 213 if (uap->who == 0) 214 pg = curp->p_pgrp; 215 else if ((pg = pgfind(uap->who)) == NULL) 216 break; 217 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 218 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) { 219 error = donice(p, uap->prio); 220 found++; 221 } 222 } 223 break; 224 } 225 case PRIO_USER: 226 if (uap->who == 0) 227 uap->who = curp->p_ucred->cr_uid; 228 info.prio = uap->prio; 229 info.who = uap->who; 230 info.error = 0; 231 info.found = 0; 232 allproc_scan(setpriority_callback, &info); 233 error = info.error; 234 found = info.found; 235 break; 236 237 default: 238 error = EINVAL; 239 found = 1; 240 break; 241 } 242 243 rel_mplock(); 244 if (found == 0) 245 error = ESRCH; 246 return (error); 247 } 248 249 static 250 int 251 setpriority_callback(struct proc *p, void *data) 252 { 253 struct setpriority_info *info = data; 254 int error; 255 256 if (p->p_ucred->cr_uid == info->who && 257 PRISON_CHECK(curproc->p_ucred, p->p_ucred)) { 258 error = donice(p, info->prio); 259 if (error) 260 info->error = error; 261 ++info->found; 262 } 263 return(0); 264 } 265 266 static int 267 donice(struct proc *chgp, int n) 268 { 269 struct proc *curp = curproc; 270 struct ucred *cr = curp->p_ucred; 271 struct lwp *lp; 272 273 if (cr->cr_uid && cr->cr_ruid && 274 cr->cr_uid != chgp->p_ucred->cr_uid && 275 cr->cr_ruid != chgp->p_ucred->cr_uid) 276 return (EPERM); 277 if (n > PRIO_MAX) 278 n = PRIO_MAX; 279 if (n < PRIO_MIN) 280 n = PRIO_MIN; 281 if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0)) 282 return (EACCES); 283 chgp->p_nice = n; 284 FOREACH_LWP_IN_PROC(lp, chgp) 285 chgp->p_usched->resetpriority(lp); 286 return (0); 287 } 288 289 /* 290 * MPALMOSTSAFE 291 */ 292 int 293 sys_lwp_rtprio(struct lwp_rtprio_args *uap) 294 { 295 struct proc *p = curproc; 296 struct lwp *lp; 297 struct rtprio rtp; 298 struct ucred *cr = curthread->td_ucred; 299 int error; 300 301 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 302 if (error) 303 return error; 304 if (uap->pid < 0) 305 return EINVAL; 306 307 get_mplock(); 308 if (uap->pid == 0) { 309 /* curproc already loaded on p */ 310 } else { 311 p = pfind(uap->pid); 312 } 313 314 if (p == NULL) { 315 error = ESRCH; 316 goto done; 317 } 318 319 if (uap->tid < -1) { 320 error = EINVAL; 321 goto done; 322 } 323 if (uap->tid == -1) { 324 /* 325 * sadly, tid can be 0 so we can't use 0 here 326 * like sys_rtprio() 327 */ 328 lp = curthread->td_lwp; 329 } else { 330 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid); 331 if (lp == NULL) { 332 error = ESRCH; 333 goto done; 334 } 335 } 336 337 switch (uap->function) { 338 case RTP_LOOKUP: 339 error = copyout(&lp->lwp_rtprio, uap->rtp, 340 sizeof(struct rtprio)); 341 break; 342 case RTP_SET: 343 if (cr->cr_uid && cr->cr_ruid && 344 cr->cr_uid != p->p_ucred->cr_uid && 345 cr->cr_ruid != p->p_ucred->cr_uid) { 346 error = EPERM; 347 break; 348 } 349 /* disallow setting rtprio in most cases if not superuser */ 350 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) { 351 /* can't set someone else's */ 352 if (uap->pid) { /* XXX */ 353 error = EPERM; 354 break; 355 } 356 /* can't set realtime priority */ 357 /* 358 * Realtime priority has to be restricted for reasons which should be 359 * obvious. However, for idle priority, there is a potential for 360 * system deadlock if an idleprio process gains a lock on a resource 361 * that other processes need (and the idleprio process can't run 362 * due to a CPU-bound normal process). Fix me! XXX 363 */ 364 if (RTP_PRIO_IS_REALTIME(rtp.type)) { 365 error = EPERM; 366 break; 367 } 368 } 369 switch (rtp.type) { 370 #ifdef RTP_PRIO_FIFO 371 case RTP_PRIO_FIFO: 372 #endif 373 case RTP_PRIO_REALTIME: 374 case RTP_PRIO_NORMAL: 375 case RTP_PRIO_IDLE: 376 if (rtp.prio > RTP_PRIO_MAX) 377 return EINVAL; 378 lp->lwp_rtprio = rtp; 379 error = 0; 380 break; 381 default: 382 error = EINVAL; 383 break; 384 } 385 break; 386 default: 387 error = EINVAL; 388 break; 389 } 390 391 done: 392 rel_mplock(); 393 return (error); 394 } 395 396 /* 397 * Set realtime priority 398 * 399 * MPALMOSTSAFE 400 */ 401 int 402 sys_rtprio(struct rtprio_args *uap) 403 { 404 struct proc *curp = curproc; 405 struct proc *p; 406 struct lwp *lp; 407 struct ucred *cr = curthread->td_ucred; 408 struct rtprio rtp; 409 int error; 410 411 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 412 if (error) 413 return (error); 414 415 get_mplock(); 416 if (uap->pid == 0) 417 p = curp; 418 else 419 p = pfind(uap->pid); 420 421 if (p == NULL) { 422 error = ESRCH; 423 goto done; 424 } 425 426 /* XXX lwp */ 427 lp = FIRST_LWP_IN_PROC(p); 428 switch (uap->function) { 429 case RTP_LOOKUP: 430 error = copyout(&lp->lwp_rtprio, uap->rtp, 431 sizeof(struct rtprio)); 432 break; 433 case RTP_SET: 434 if (cr->cr_uid && cr->cr_ruid && 435 cr->cr_uid != p->p_ucred->cr_uid && 436 cr->cr_ruid != p->p_ucred->cr_uid) { 437 error = EPERM; 438 break; 439 } 440 /* disallow setting rtprio in most cases if not superuser */ 441 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) { 442 /* can't set someone else's */ 443 if (uap->pid) { 444 error = EPERM; 445 break; 446 } 447 /* can't set realtime priority */ 448 /* 449 * Realtime priority has to be restricted for reasons which should be 450 * obvious. However, for idle priority, there is a potential for 451 * system deadlock if an idleprio process gains a lock on a resource 452 * that other processes need (and the idleprio process can't run 453 * due to a CPU-bound normal process). Fix me! XXX 454 */ 455 if (RTP_PRIO_IS_REALTIME(rtp.type)) { 456 error = EPERM; 457 break; 458 } 459 } 460 switch (rtp.type) { 461 #ifdef RTP_PRIO_FIFO 462 case RTP_PRIO_FIFO: 463 #endif 464 case RTP_PRIO_REALTIME: 465 case RTP_PRIO_NORMAL: 466 case RTP_PRIO_IDLE: 467 if (rtp.prio > RTP_PRIO_MAX) { 468 error = EINVAL; 469 break; 470 } 471 lp->lwp_rtprio = rtp; 472 error = 0; 473 break; 474 default: 475 error = EINVAL; 476 break; 477 } 478 break; 479 default: 480 error = EINVAL; 481 break; 482 } 483 done: 484 rel_mplock(); 485 return (error); 486 } 487 488 /* 489 * MPSAFE 490 */ 491 int 492 sys_setrlimit(struct __setrlimit_args *uap) 493 { 494 struct rlimit alim; 495 int error; 496 497 error = copyin(uap->rlp, &alim, sizeof(alim)); 498 if (error) 499 return (error); 500 501 error = kern_setrlimit(uap->which, &alim); 502 503 return (error); 504 } 505 506 /* 507 * MPSAFE 508 */ 509 int 510 sys_getrlimit(struct __getrlimit_args *uap) 511 { 512 struct rlimit lim; 513 int error; 514 515 error = kern_getrlimit(uap->which, &lim); 516 517 if (error == 0) 518 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp)); 519 return error; 520 } 521 522 /* 523 * Transform the running time and tick information in lwp lp's thread into user, 524 * system, and interrupt time usage. 525 * 526 * Since we are limited to statclock tick granularity this is a statisical 527 * calculation which will be correct over the long haul, but should not be 528 * expected to measure fine grained deltas. 529 * 530 * It is possible to catch a lwp in the midst of being created, so 531 * check whether lwp_thread is NULL or not. 532 */ 533 void 534 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp) 535 { 536 struct thread *td; 537 538 /* 539 * Calculate at the statclock level. YYY if the thread is owned by 540 * another cpu we need to forward the request to the other cpu, or 541 * have a token to interlock the information in order to avoid racing 542 * thread destruction. 543 */ 544 if ((td = lp->lwp_thread) != NULL) { 545 crit_enter(); 546 up->tv_sec = td->td_uticks / 1000000; 547 up->tv_usec = td->td_uticks % 1000000; 548 sp->tv_sec = td->td_sticks / 1000000; 549 sp->tv_usec = td->td_sticks % 1000000; 550 crit_exit(); 551 } 552 } 553 554 /* 555 * Aggregate resource statistics of all lwps of a process. 556 * 557 * proc.p_ru keeps track of all statistics directly related to a proc. This 558 * consists of RSS usage and nswap information and aggregate numbers for all 559 * former lwps of this proc. 560 * 561 * proc.p_cru is the sum of all stats of reaped children. 562 * 563 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning 564 * packet, scheduler switch or page fault counts, etc. This information gets 565 * added to lwp.lwp_proc.p_ru when the lwp exits. 566 */ 567 void 568 calcru_proc(struct proc *p, struct rusage *ru) 569 { 570 struct timeval upt, spt; 571 long *rip1, *rip2; 572 struct lwp *lp; 573 574 *ru = p->p_ru; 575 576 FOREACH_LWP_IN_PROC(lp, p) { 577 calcru(lp, &upt, &spt); 578 timevaladd(&ru->ru_utime, &upt); 579 timevaladd(&ru->ru_stime, &spt); 580 for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first; 581 rip1 <= &ru->ru_last; 582 rip1++, rip2++) 583 *rip1 += *rip2; 584 } 585 } 586 587 588 /* 589 * MPALMOSTSAFE 590 */ 591 int 592 sys_getrusage(struct getrusage_args *uap) 593 { 594 struct rusage ru; 595 struct rusage *rup; 596 int error; 597 598 get_mplock(); 599 600 switch (uap->who) { 601 case RUSAGE_SELF: 602 rup = &ru; 603 calcru_proc(curproc, rup); 604 error = 0; 605 break; 606 case RUSAGE_CHILDREN: 607 rup = &curproc->p_cru; 608 error = 0; 609 break; 610 default: 611 error = EINVAL; 612 break; 613 } 614 if (error == 0) 615 error = copyout(rup, uap->rusage, sizeof(struct rusage)); 616 rel_mplock(); 617 return (error); 618 } 619 620 void 621 ruadd(struct rusage *ru, struct rusage *ru2) 622 { 623 long *ip, *ip2; 624 int i; 625 626 timevaladd(&ru->ru_utime, &ru2->ru_utime); 627 timevaladd(&ru->ru_stime, &ru2->ru_stime); 628 if (ru->ru_maxrss < ru2->ru_maxrss) 629 ru->ru_maxrss = ru2->ru_maxrss; 630 ip = &ru->ru_first; ip2 = &ru2->ru_first; 631 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 632 *ip++ += *ip2++; 633 } 634 635 /* 636 * Find the uidinfo structure for a uid. This structure is used to 637 * track the total resource consumption (process count, socket buffer 638 * size, etc.) for the uid and impose limits. 639 */ 640 void 641 uihashinit(void) 642 { 643 spin_init(&uihash_lock); 644 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash); 645 } 646 647 /* 648 * NOTE: Must be called with uihash_lock held 649 * 650 * MPSAFE 651 */ 652 static struct uidinfo * 653 uilookup(uid_t uid) 654 { 655 struct uihashhead *uipp; 656 struct uidinfo *uip; 657 658 uipp = UIHASH(uid); 659 LIST_FOREACH(uip, uipp, ui_hash) { 660 if (uip->ui_uid == uid) 661 break; 662 } 663 return (uip); 664 } 665 666 /* 667 * MPSAFE 668 */ 669 static struct uidinfo * 670 uicreate(uid_t uid) 671 { 672 struct uidinfo *uip, *tmp; 673 /* 674 * Allocate space and check for a race 675 */ 676 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK); 677 /* 678 * Initialize structure and enter it into the hash table 679 */ 680 spin_init(&uip->ui_lock); 681 uip->ui_uid = uid; 682 uip->ui_proccnt = 0; 683 uip->ui_sbsize = 0; 684 uip->ui_ref = 1; /* we're returning a ref */ 685 uip->ui_posixlocks = 0; 686 varsymset_init(&uip->ui_varsymset, NULL); 687 688 /* 689 * Somebody may have already created the uidinfo for this 690 * uid. If so, return that instead. 691 */ 692 spin_lock_wr(&uihash_lock); 693 tmp = uilookup(uid); 694 if (tmp != NULL) { 695 varsymset_clean(&uip->ui_varsymset); 696 spin_uninit(&uip->ui_lock); 697 FREE(uip, M_UIDINFO); 698 uip = tmp; 699 } else { 700 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash); 701 } 702 spin_unlock_wr(&uihash_lock); 703 704 return (uip); 705 } 706 707 /* 708 * MPSAFE 709 */ 710 struct uidinfo * 711 uifind(uid_t uid) 712 { 713 struct uidinfo *uip; 714 715 spin_lock_rd(&uihash_lock); 716 uip = uilookup(uid); 717 if (uip == NULL) { 718 spin_unlock_rd(&uihash_lock); 719 uip = uicreate(uid); 720 } else { 721 uihold(uip); 722 spin_unlock_rd(&uihash_lock); 723 } 724 return (uip); 725 } 726 727 /* 728 * MPSAFE 729 */ 730 static __inline void 731 uifree(struct uidinfo *uip) 732 { 733 spin_lock_wr(&uihash_lock); 734 735 /* 736 * Note that we're taking a read lock even though we 737 * modify the structure because we know nobody can find 738 * it now that we've locked uihash_lock. If somebody 739 * can get to it through a stored pointer, the reference 740 * count will not be 0 and in that case we don't modify 741 * the struct. 742 */ 743 spin_lock_rd(&uip->ui_lock); 744 if (uip->ui_ref != 0) { 745 /* 746 * Someone found the uid and got a ref when we 747 * unlocked. No need to free any more. 748 */ 749 spin_unlock_rd(&uip->ui_lock); 750 return; 751 } 752 if (uip->ui_sbsize != 0) 753 /* XXX no %qd in kernel. Truncate. */ 754 kprintf("freeing uidinfo: uid = %d, sbsize = %ld\n", 755 uip->ui_uid, (long)uip->ui_sbsize); 756 if (uip->ui_proccnt != 0) 757 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n", 758 uip->ui_uid, uip->ui_proccnt); 759 760 LIST_REMOVE(uip, ui_hash); 761 spin_unlock_wr(&uihash_lock); 762 varsymset_clean(&uip->ui_varsymset); 763 lockuninit(&uip->ui_varsymset.vx_lock); 764 spin_unlock_rd(&uip->ui_lock); 765 spin_uninit(&uip->ui_lock); 766 FREE(uip, M_UIDINFO); 767 } 768 769 /* 770 * MPSAFE 771 */ 772 void 773 uihold(struct uidinfo *uip) 774 { 775 atomic_add_int(&uip->ui_ref, 1); 776 KKASSERT(uip->ui_ref >= 0); 777 } 778 779 /* 780 * MPSAFE 781 */ 782 void 783 uidrop(struct uidinfo *uip) 784 { 785 KKASSERT(uip->ui_ref > 0); 786 if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) { 787 uifree(uip); 788 } 789 } 790 791 void 792 uireplace(struct uidinfo **puip, struct uidinfo *nuip) 793 { 794 uidrop(*puip); 795 *puip = nuip; 796 } 797 798 /* 799 * Change the count associated with number of processes 800 * a given user is using. When 'max' is 0, don't enforce a limit 801 */ 802 int 803 chgproccnt(struct uidinfo *uip, int diff, int max) 804 { 805 int ret; 806 spin_lock_wr(&uip->ui_lock); 807 /* don't allow them to exceed max, but allow subtraction */ 808 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) { 809 ret = 0; 810 } else { 811 uip->ui_proccnt += diff; 812 if (uip->ui_proccnt < 0) 813 kprintf("negative proccnt for uid = %d\n", uip->ui_uid); 814 ret = 1; 815 } 816 spin_unlock_wr(&uip->ui_lock); 817 return ret; 818 } 819 820 /* 821 * Change the total socket buffer size a user has used. 822 */ 823 int 824 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max) 825 { 826 rlim_t new; 827 828 spin_lock_wr(&uip->ui_lock); 829 new = uip->ui_sbsize + to - *hiwat; 830 KKASSERT(new >= 0); 831 832 /* 833 * If we are trying to increase the socket buffer size 834 * Scale down the hi water mark when we exceed the user's 835 * allowed socket buffer space. 836 * 837 * We can't scale down too much or we will blow up atomic packet 838 * operations. 839 */ 840 if (to > *hiwat && to > MCLBYTES && new > max) { 841 to = to * max / new; 842 if (to < MCLBYTES) 843 to = MCLBYTES; 844 } 845 uip->ui_sbsize = new; 846 *hiwat = to; 847 spin_unlock_wr(&uip->ui_lock); 848 return (1); 849 } 850 851