1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $ 40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.30 2007/01/01 22:51:17 corecode Exp $ 41 */ 42 43 #include "opt_compat.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/file.h> 49 #include <sys/kern_syscall.h> 50 #include <sys/kernel.h> 51 #include <sys/resourcevar.h> 52 #include <sys/malloc.h> 53 #include <sys/proc.h> 54 #include <sys/time.h> 55 #include <sys/lockf.h> 56 57 #include <vm/vm.h> 58 #include <vm/vm_param.h> 59 #include <sys/lock.h> 60 #include <vm/pmap.h> 61 #include <vm/vm_map.h> 62 63 #include <sys/thread2.h> 64 65 static int donice (struct proc *chgp, int n); 66 67 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures"); 68 #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) 69 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; 70 static u_long uihash; /* size of hash table - 1 */ 71 72 static struct uidinfo *uicreate (uid_t uid); 73 static struct uidinfo *uilookup (uid_t uid); 74 75 /* 76 * Resource controls and accounting. 77 */ 78 79 struct getpriority_info { 80 int low; 81 int who; 82 }; 83 84 static int getpriority_callback(struct proc *p, void *data); 85 86 int 87 sys_getpriority(struct getpriority_args *uap) 88 { 89 struct getpriority_info info; 90 struct proc *curp = curproc; 91 struct proc *p; 92 int low = PRIO_MAX + 1; 93 94 switch (uap->which) { 95 case PRIO_PROCESS: 96 if (uap->who == 0) 97 p = curp; 98 else 99 p = pfind(uap->who); 100 if (p == 0) 101 break; 102 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) 103 break; 104 low = p->p_nice; 105 break; 106 107 case PRIO_PGRP: 108 { 109 struct pgrp *pg; 110 111 if (uap->who == 0) 112 pg = curp->p_pgrp; 113 else if ((pg = pgfind(uap->who)) == NULL) 114 break; 115 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 116 if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low)) 117 low = p->p_nice; 118 } 119 break; 120 } 121 case PRIO_USER: 122 if (uap->who == 0) 123 uap->who = curp->p_ucred->cr_uid; 124 info.low = low; 125 info.who = uap->who; 126 allproc_scan(getpriority_callback, &info); 127 low = info.low; 128 break; 129 130 default: 131 return (EINVAL); 132 } 133 if (low == PRIO_MAX + 1) 134 return (ESRCH); 135 uap->sysmsg_result = low; 136 return (0); 137 } 138 139 /* 140 * Figure out the current lowest nice priority for processes owned 141 * by the specified user. 142 */ 143 static 144 int 145 getpriority_callback(struct proc *p, void *data) 146 { 147 struct getpriority_info *info = data; 148 149 if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) && 150 p->p_ucred->cr_uid == info->who && 151 p->p_nice < info->low) { 152 info->low = p->p_nice; 153 } 154 return(0); 155 } 156 157 struct setpriority_info { 158 int prio; 159 int who; 160 int error; 161 int found; 162 }; 163 164 static int setpriority_callback(struct proc *p, void *data); 165 166 int 167 sys_setpriority(struct setpriority_args *uap) 168 { 169 struct setpriority_info info; 170 struct proc *curp = curproc; 171 struct proc *p; 172 int found = 0, error = 0; 173 174 switch (uap->which) { 175 case PRIO_PROCESS: 176 if (uap->who == 0) 177 p = curp; 178 else 179 p = pfind(uap->who); 180 if (p == 0) 181 break; 182 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) 183 break; 184 error = donice(p, uap->prio); 185 found++; 186 break; 187 188 case PRIO_PGRP: 189 { 190 struct pgrp *pg; 191 192 if (uap->who == 0) 193 pg = curp->p_pgrp; 194 else if ((pg = pgfind(uap->who)) == NULL) 195 break; 196 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 197 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) { 198 error = donice(p, uap->prio); 199 found++; 200 } 201 } 202 break; 203 } 204 case PRIO_USER: 205 if (uap->who == 0) 206 uap->who = curp->p_ucred->cr_uid; 207 info.prio = uap->prio; 208 info.who = uap->who; 209 info.error = 0; 210 info.found = 0; 211 allproc_scan(setpriority_callback, &info); 212 error = info.error; 213 found = info.found; 214 break; 215 216 default: 217 return (EINVAL); 218 } 219 if (found == 0) 220 return (ESRCH); 221 return (error); 222 } 223 224 static 225 int 226 setpriority_callback(struct proc *p, void *data) 227 { 228 struct setpriority_info *info = data; 229 int error; 230 231 if (p->p_ucred->cr_uid == info->who && 232 PRISON_CHECK(curproc->p_ucred, p->p_ucred)) { 233 error = donice(p, info->prio); 234 if (error) 235 info->error = error; 236 ++info->found; 237 } 238 return(0); 239 } 240 241 static int 242 donice(struct proc *chgp, int n) 243 { 244 struct proc *curp = curproc; 245 struct ucred *cr = curp->p_ucred; 246 247 if (cr->cr_uid && cr->cr_ruid && 248 cr->cr_uid != chgp->p_ucred->cr_uid && 249 cr->cr_ruid != chgp->p_ucred->cr_uid) 250 return (EPERM); 251 if (n > PRIO_MAX) 252 n = PRIO_MAX; 253 if (n < PRIO_MIN) 254 n = PRIO_MIN; 255 if (n < chgp->p_nice && suser_cred(cr, 0)) 256 return (EACCES); 257 chgp->p_nice = n; 258 chgp->p_usched->resetpriority(&chgp->p_lwp); 259 return (0); 260 } 261 262 /* 263 * Set realtime priority 264 */ 265 /* ARGSUSED */ 266 int 267 sys_rtprio(struct rtprio_args *uap) 268 { 269 struct proc *curp = curproc; 270 struct proc *p; 271 struct ucred *cr = curp->p_ucred; 272 struct rtprio rtp; 273 int error; 274 275 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio)); 276 if (error) 277 return (error); 278 279 if (uap->pid == 0) 280 p = curp; 281 else 282 p = pfind(uap->pid); 283 284 if (p == 0) 285 return (ESRCH); 286 287 switch (uap->function) { 288 case RTP_LOOKUP: 289 return (copyout(&p->p_lwp.lwp_rtprio, uap->rtp, sizeof(struct rtprio))); 290 case RTP_SET: 291 if (cr->cr_uid && cr->cr_ruid && 292 cr->cr_uid != p->p_ucred->cr_uid && 293 cr->cr_ruid != p->p_ucred->cr_uid) 294 return (EPERM); 295 /* disallow setting rtprio in most cases if not superuser */ 296 if (suser_cred(cr, 0)) { 297 /* can't set someone else's */ 298 if (uap->pid) 299 return (EPERM); 300 /* can't set realtime priority */ 301 /* 302 * Realtime priority has to be restricted for reasons which should be 303 * obvious. However, for idle priority, there is a potential for 304 * system deadlock if an idleprio process gains a lock on a resource 305 * that other processes need (and the idleprio process can't run 306 * due to a CPU-bound normal process). Fix me! XXX 307 */ 308 if (RTP_PRIO_IS_REALTIME(rtp.type)) 309 return (EPERM); 310 } 311 switch (rtp.type) { 312 #ifdef RTP_PRIO_FIFO 313 case RTP_PRIO_FIFO: 314 #endif 315 case RTP_PRIO_REALTIME: 316 case RTP_PRIO_NORMAL: 317 case RTP_PRIO_IDLE: 318 if (rtp.prio > RTP_PRIO_MAX) 319 return (EINVAL); 320 p->p_lwp.lwp_rtprio = rtp; 321 return (0); 322 default: 323 return (EINVAL); 324 } 325 326 default: 327 return (EINVAL); 328 } 329 } 330 331 int 332 sys_setrlimit(struct __setrlimit_args *uap) 333 { 334 struct rlimit alim; 335 int error; 336 337 error = copyin(uap->rlp, &alim, sizeof(alim)); 338 if (error) 339 return (error); 340 341 error = kern_setrlimit(uap->which, &alim); 342 343 return (error); 344 } 345 346 int 347 sys_getrlimit(struct __getrlimit_args *uap) 348 { 349 struct rlimit lim; 350 int error; 351 352 error = kern_getrlimit(uap->which, &lim); 353 354 if (error == 0) 355 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp)); 356 return error; 357 } 358 359 /* 360 * Transform the running time and tick information in lwp lp's thread into user, 361 * system, and interrupt time usage. 362 * 363 * Since we are limited to statclock tick granularity this is a statisical 364 * calculation which will be correct over the long haul, but should not be 365 * expected to measure fine grained deltas. 366 */ 367 void 368 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp) 369 { 370 struct thread *td = lp->lwp_thread; 371 372 /* 373 * Calculate at the statclock level. YYY if the thread is owned by 374 * another cpu we need to forward the request to the other cpu, or 375 * have a token to interlock the information. 376 */ 377 crit_enter(); 378 up->tv_sec = td->td_uticks / 1000000; 379 up->tv_usec = td->td_uticks % 1000000; 380 sp->tv_sec = td->td_sticks / 1000000; 381 sp->tv_usec = td->td_sticks % 1000000; 382 crit_exit(); 383 } 384 385 /* 386 * Aggregate resource statistics of all lwps of a process. 387 * 388 * proc.p_ru keeps track of all statistics directly related to a proc. This 389 * consists of RSS usage and nswap information and aggregate numbers for all 390 * former lwps of this proc. 391 * 392 * proc.p_cru is the sum of all stats of reaped children. 393 * 394 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning 395 * packet, scheduler switch or page fault counts, etc. This information gets 396 * added to lwp.lwp_proc.p_ru when the lwp exits. 397 */ 398 void 399 calcru_proc(struct proc *p, struct rusage *ru) 400 { 401 struct timeval upt, spt; 402 long *rip1, *rip2; 403 struct lwp *lp; 404 405 *ru = p->p_ru; 406 407 FOREACH_LWP_IN_PROC(lp, p) { 408 calcru(lp, &upt, &spt); 409 timevaladd(&ru->ru_utime, &upt); 410 timevaladd(&ru->ru_stime, &spt); 411 for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first; 412 rip1 <= &ru->ru_last; 413 rip1++, rip2++) 414 *rip1 += *rip2; 415 } 416 } 417 418 419 /* ARGSUSED */ 420 int 421 sys_getrusage(struct getrusage_args *uap) 422 { 423 struct rusage ru; 424 struct rusage *rup; 425 426 switch (uap->who) { 427 428 case RUSAGE_SELF: 429 rup = &ru; 430 calcru_proc(curproc, rup); 431 break; 432 433 case RUSAGE_CHILDREN: 434 rup = &curproc->p_cru; 435 break; 436 437 default: 438 return (EINVAL); 439 } 440 return (copyout((caddr_t)rup, (caddr_t)uap->rusage, 441 sizeof (struct rusage))); 442 } 443 444 void 445 ruadd(struct rusage *ru, struct rusage *ru2) 446 { 447 long *ip, *ip2; 448 int i; 449 450 timevaladd(&ru->ru_utime, &ru2->ru_utime); 451 timevaladd(&ru->ru_stime, &ru2->ru_stime); 452 if (ru->ru_maxrss < ru2->ru_maxrss) 453 ru->ru_maxrss = ru2->ru_maxrss; 454 ip = &ru->ru_first; ip2 = &ru2->ru_first; 455 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 456 *ip++ += *ip2++; 457 } 458 459 /* 460 * Find the uidinfo structure for a uid. This structure is used to 461 * track the total resource consumption (process count, socket buffer 462 * size, etc.) for the uid and impose limits. 463 */ 464 void 465 uihashinit(void) 466 { 467 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash); 468 } 469 470 static struct uidinfo * 471 uilookup(uid_t uid) 472 { 473 struct uihashhead *uipp; 474 struct uidinfo *uip; 475 476 uipp = UIHASH(uid); 477 LIST_FOREACH(uip, uipp, ui_hash) { 478 if (uip->ui_uid == uid) 479 break; 480 } 481 return (uip); 482 } 483 484 static struct uidinfo * 485 uicreate(uid_t uid) 486 { 487 struct uidinfo *uip, *norace; 488 489 /* 490 * Allocate space and check for a race 491 */ 492 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK); 493 norace = uilookup(uid); 494 if (norace != NULL) { 495 FREE(uip, M_UIDINFO); 496 return (norace); 497 } 498 499 /* 500 * Initialize structure and enter it into the hash table 501 */ 502 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash); 503 uip->ui_uid = uid; 504 uip->ui_proccnt = 0; 505 uip->ui_sbsize = 0; 506 uip->ui_ref = 0; 507 uip->ui_posixlocks = 0; 508 varsymset_init(&uip->ui_varsymset, NULL); 509 return (uip); 510 } 511 512 struct uidinfo * 513 uifind(uid_t uid) 514 { 515 struct uidinfo *uip; 516 517 uip = uilookup(uid); 518 if (uip == NULL) 519 uip = uicreate(uid); 520 uip->ui_ref++; 521 return (uip); 522 } 523 524 static __inline void 525 uifree(struct uidinfo *uip) 526 { 527 if (uip->ui_sbsize != 0) 528 /* XXX no %qd in kernel. Truncate. */ 529 kprintf("freeing uidinfo: uid = %d, sbsize = %ld\n", 530 uip->ui_uid, (long)uip->ui_sbsize); 531 if (uip->ui_proccnt != 0) 532 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n", 533 uip->ui_uid, uip->ui_proccnt); 534 LIST_REMOVE(uip, ui_hash); 535 varsymset_clean(&uip->ui_varsymset); 536 FREE(uip, M_UIDINFO); 537 } 538 539 void 540 uihold(struct uidinfo *uip) 541 { 542 ++uip->ui_ref; 543 KKASSERT(uip->ui_ref > 0); 544 } 545 546 void 547 uidrop(struct uidinfo *uip) 548 { 549 KKASSERT(uip->ui_ref > 0); 550 if (--uip->ui_ref == 0) 551 uifree(uip); 552 } 553 554 void 555 uireplace(struct uidinfo **puip, struct uidinfo *nuip) 556 { 557 uidrop(*puip); 558 *puip = nuip; 559 } 560 561 /* 562 * Change the count associated with number of processes 563 * a given user is using. When 'max' is 0, don't enforce a limit 564 */ 565 int 566 chgproccnt(struct uidinfo *uip, int diff, int max) 567 { 568 /* don't allow them to exceed max, but allow subtraction */ 569 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) 570 return (0); 571 uip->ui_proccnt += diff; 572 if (uip->ui_proccnt < 0) 573 kprintf("negative proccnt for uid = %d\n", uip->ui_uid); 574 return (1); 575 } 576 577 /* 578 * Change the total socket buffer size a user has used. 579 */ 580 int 581 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max) 582 { 583 rlim_t new; 584 585 crit_enter(); 586 new = uip->ui_sbsize + to - *hiwat; 587 /* don't allow them to exceed max, but allow subtraction */ 588 if (to > *hiwat && new > max) { 589 crit_exit(); 590 return (0); 591 } 592 uip->ui_sbsize = new; 593 *hiwat = to; 594 if (uip->ui_sbsize < 0) 595 kprintf("negative sbsize for uid = %d\n", uip->ui_uid); 596 crit_exit(); 597 return (1); 598 } 599 600