1 /* $NetBSD: kern_resource.c,v 1.57 2000/05/31 05:02:32 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_resource.c 8.8 (Berkeley) 2/14/95 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/file.h> 47 #include <sys/resourcevar.h> 48 #include <sys/malloc.h> 49 #include <sys/pool.h> 50 #include <sys/proc.h> 51 52 #include <sys/mount.h> 53 #include <sys/syscallargs.h> 54 55 #include <vm/vm.h> 56 57 #include <uvm/uvm_extern.h> 58 59 /* 60 * Resource controls and accounting. 61 */ 62 63 int 64 sys_getpriority(curp, v, retval) 65 struct proc *curp; 66 void *v; 67 register_t *retval; 68 { 69 struct sys_getpriority_args /* { 70 syscallarg(int) which; 71 syscallarg(int) who; 72 } */ *uap = v; 73 struct proc *p; 74 int low = NZERO + PRIO_MAX + 1; 75 76 switch (SCARG(uap, which)) { 77 78 case PRIO_PROCESS: 79 if (SCARG(uap, who) == 0) 80 p = curp; 81 else 82 p = pfind(SCARG(uap, who)); 83 if (p == 0) 84 break; 85 low = p->p_nice; 86 break; 87 88 case PRIO_PGRP: { 89 struct pgrp *pg; 90 91 if (SCARG(uap, who) == 0) 92 pg = curp->p_pgrp; 93 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 94 break; 95 for (p = pg->pg_members.lh_first; p != 0; 96 p = p->p_pglist.le_next) { 97 if (p->p_nice < low) 98 low = p->p_nice; 99 } 100 break; 101 } 102 103 case PRIO_USER: 104 if (SCARG(uap, who) == 0) 105 SCARG(uap, who) = curp->p_ucred->cr_uid; 106 proclist_lock_read(); 107 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) 108 if (p->p_ucred->cr_uid == SCARG(uap, who) && 109 p->p_nice < low) 110 low = p->p_nice; 111 proclist_unlock_read(); 112 break; 113 114 default: 115 return (EINVAL); 116 } 117 if (low == NZERO + PRIO_MAX + 1) 118 return (ESRCH); 119 *retval = low - NZERO; 120 return (0); 121 } 122 123 /* ARGSUSED */ 124 int 125 sys_setpriority(curp, v, retval) 126 struct proc *curp; 127 void *v; 128 register_t *retval; 129 { 130 struct sys_setpriority_args /* { 131 syscallarg(int) which; 132 syscallarg(int) who; 133 syscallarg(int) prio; 134 } */ *uap = v; 135 struct proc *p; 136 int found = 0, error = 0; 137 138 switch (SCARG(uap, which)) { 139 140 case PRIO_PROCESS: 141 if (SCARG(uap, who) == 0) 142 p = curp; 143 else 144 p = pfind(SCARG(uap, who)); 145 if (p == 0) 146 break; 147 error = donice(curp, p, SCARG(uap, prio)); 148 found++; 149 break; 150 151 case PRIO_PGRP: { 152 struct pgrp *pg; 153 154 if (SCARG(uap, who) == 0) 155 pg = curp->p_pgrp; 156 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 157 break; 158 for (p = pg->pg_members.lh_first; p != 0; 159 p = p->p_pglist.le_next) { 160 error = donice(curp, p, SCARG(uap, prio)); 161 found++; 162 } 163 break; 164 } 165 166 case PRIO_USER: 167 if (SCARG(uap, who) == 0) 168 SCARG(uap, who) = curp->p_ucred->cr_uid; 169 proclist_lock_read(); 170 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) 171 if (p->p_ucred->cr_uid == SCARG(uap, who)) { 172 error = donice(curp, p, SCARG(uap, prio)); 173 found++; 174 } 175 proclist_unlock_read(); 176 break; 177 178 default: 179 return (EINVAL); 180 } 181 if (found == 0) 182 return (ESRCH); 183 return (error); 184 } 185 186 int 187 donice(curp, chgp, n) 188 struct proc *curp, *chgp; 189 int n; 190 { 191 struct pcred *pcred = curp->p_cred; 192 193 if (pcred->pc_ucred->cr_uid && pcred->p_ruid && 194 pcred->pc_ucred->cr_uid != chgp->p_ucred->cr_uid && 195 pcred->p_ruid != chgp->p_ucred->cr_uid) 196 return (EPERM); 197 if (n > PRIO_MAX) 198 n = PRIO_MAX; 199 if (n < PRIO_MIN) 200 n = PRIO_MIN; 201 n += NZERO; 202 if (n < chgp->p_nice && suser(pcred->pc_ucred, &curp->p_acflag)) 203 return (EACCES); 204 chgp->p_nice = n; 205 (void)resetpriority(chgp); 206 return (0); 207 } 208 209 /* ARGSUSED */ 210 int 211 sys_setrlimit(p, v, retval) 212 struct proc *p; 213 void *v; 214 register_t *retval; 215 { 216 struct sys_setrlimit_args /* { 217 syscallarg(int) which; 218 syscallarg(const struct rlimit *) rlp; 219 } */ *uap = v; 220 int which = SCARG(uap, which); 221 struct rlimit alim; 222 int error; 223 224 error = copyin(SCARG(uap, rlp), &alim, sizeof(struct rlimit)); 225 if (error) 226 return (error); 227 return (dosetrlimit(p, p->p_cred, which, &alim)); 228 } 229 230 int 231 dosetrlimit(p, cred, which, limp) 232 struct proc *p; 233 struct pcred *cred; 234 int which; 235 struct rlimit *limp; 236 { 237 struct rlimit *alimp; 238 extern unsigned maxdmap, maxsmap; 239 struct plimit *newplim; 240 int error; 241 242 if ((u_int)which >= RLIM_NLIMITS) 243 return (EINVAL); 244 245 if (limp->rlim_cur < 0 || limp->rlim_max < 0) 246 return (EINVAL); 247 248 alimp = &p->p_rlimit[which]; 249 /* if we don't change the value, no need to limcopy() */ 250 if (limp->rlim_cur == alimp->rlim_cur && 251 limp->rlim_max == alimp->rlim_max) 252 return 0; 253 254 if (limp->rlim_cur > alimp->rlim_max || 255 limp->rlim_max > alimp->rlim_max) 256 if ((error = suser(cred->pc_ucred, &p->p_acflag)) != 0) 257 return (error); 258 if (limp->rlim_cur > limp->rlim_max) 259 limp->rlim_cur = limp->rlim_max; 260 if (p->p_limit->p_refcnt > 1 && 261 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) { 262 newplim = limcopy(p->p_limit); 263 limfree(p->p_limit); 264 p->p_limit = newplim; 265 alimp = &p->p_rlimit[which]; 266 } 267 268 switch (which) { 269 270 case RLIMIT_DATA: 271 if (limp->rlim_cur > maxdmap) 272 limp->rlim_cur = maxdmap; 273 if (limp->rlim_max > maxdmap) 274 limp->rlim_max = maxdmap; 275 break; 276 277 case RLIMIT_STACK: 278 if (limp->rlim_cur > maxsmap) 279 limp->rlim_cur = maxsmap; 280 if (limp->rlim_max > maxsmap) 281 limp->rlim_max = maxsmap; 282 283 /* 284 * Stack is allocated to the max at exec time with 285 * only "rlim_cur" bytes accessible (In other words, 286 * allocates stack dividing two contiguous regions at 287 * "rlim_cur" bytes boundary). 288 * 289 * Since allocation is done in terms of page, roundup 290 * "rlim_cur" (otherwise, contiguous regions 291 * overlap). If stack limit is going up make more 292 * accessible, if going down make inaccessible. 293 */ 294 limp->rlim_cur = round_page(limp->rlim_cur); 295 if (limp->rlim_cur != alimp->rlim_cur) { 296 vaddr_t addr; 297 vsize_t size; 298 vm_prot_t prot; 299 300 if (limp->rlim_cur > alimp->rlim_cur) { 301 prot = VM_PROT_ALL; 302 size = limp->rlim_cur - alimp->rlim_cur; 303 addr = USRSTACK - limp->rlim_cur; 304 } else { 305 prot = VM_PROT_NONE; 306 size = alimp->rlim_cur - limp->rlim_cur; 307 addr = USRSTACK - alimp->rlim_cur; 308 } 309 (void) uvm_map_protect(&p->p_vmspace->vm_map, 310 addr, addr+size, prot, FALSE); 311 } 312 break; 313 314 case RLIMIT_NOFILE: 315 if (limp->rlim_cur > maxfiles) 316 limp->rlim_cur = maxfiles; 317 if (limp->rlim_max > maxfiles) 318 limp->rlim_max = maxfiles; 319 break; 320 321 case RLIMIT_NPROC: 322 if (limp->rlim_cur > maxproc) 323 limp->rlim_cur = maxproc; 324 if (limp->rlim_max > maxproc) 325 limp->rlim_max = maxproc; 326 break; 327 } 328 *alimp = *limp; 329 return (0); 330 } 331 332 /* ARGSUSED */ 333 int 334 sys_getrlimit(p, v, retval) 335 struct proc *p; 336 void *v; 337 register_t *retval; 338 { 339 struct sys_getrlimit_args /* { 340 syscallarg(int) which; 341 syscallarg(struct rlimit *) rlp; 342 } */ *uap = v; 343 int which = SCARG(uap, which); 344 345 if ((u_int)which >= RLIM_NLIMITS) 346 return (EINVAL); 347 return (copyout(&p->p_rlimit[which], SCARG(uap, rlp), 348 sizeof(struct rlimit))); 349 } 350 351 /* 352 * Transform the running time and tick information in proc p into user, 353 * system, and interrupt time usage. 354 */ 355 void 356 calcru(p, up, sp, ip) 357 struct proc *p; 358 struct timeval *up; 359 struct timeval *sp; 360 struct timeval *ip; 361 { 362 u_quad_t u, st, ut, it, tot; 363 long sec, usec; 364 int s; 365 struct timeval tv; 366 367 s = splstatclock(); 368 st = p->p_sticks; 369 ut = p->p_uticks; 370 it = p->p_iticks; 371 splx(s); 372 373 tot = st + ut + it; 374 if (tot == 0) { 375 up->tv_sec = up->tv_usec = 0; 376 sp->tv_sec = sp->tv_usec = 0; 377 if (ip != NULL) 378 ip->tv_sec = ip->tv_usec = 0; 379 return; 380 } 381 382 sec = p->p_rtime.tv_sec; 383 usec = p->p_rtime.tv_usec; 384 if (p->p_stat == SONPROC) { 385 struct schedstate_percpu *spc; 386 387 KDASSERT(p->p_cpu != NULL); 388 spc = &p->p_cpu->ci_schedstate; 389 390 /* 391 * Adjust for the current time slice. This is actually fairly 392 * important since the error here is on the order of a time 393 * quantum, which is much greater than the sampling error. 394 */ 395 microtime(&tv); 396 sec += tv.tv_sec - spc->spc_runtime.tv_sec; 397 usec += tv.tv_usec - spc->spc_runtime.tv_usec; 398 } 399 u = (u_quad_t) sec * 1000000 + usec; 400 st = (u * st) / tot; 401 sp->tv_sec = st / 1000000; 402 sp->tv_usec = st % 1000000; 403 ut = (u * ut) / tot; 404 up->tv_sec = ut / 1000000; 405 up->tv_usec = ut % 1000000; 406 if (ip != NULL) { 407 it = (u * it) / tot; 408 ip->tv_sec = it / 1000000; 409 ip->tv_usec = it % 1000000; 410 } 411 } 412 413 /* ARGSUSED */ 414 int 415 sys_getrusage(p, v, retval) 416 struct proc *p; 417 void *v; 418 register_t *retval; 419 { 420 struct sys_getrusage_args /* { 421 syscallarg(int) who; 422 syscallarg(struct rusage *) rusage; 423 } */ *uap = v; 424 struct rusage *rup; 425 426 switch (SCARG(uap, who)) { 427 428 case RUSAGE_SELF: 429 rup = &p->p_stats->p_ru; 430 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); 431 break; 432 433 case RUSAGE_CHILDREN: 434 rup = &p->p_stats->p_cru; 435 break; 436 437 default: 438 return (EINVAL); 439 } 440 return (copyout(rup, SCARG(uap, rusage), sizeof(struct rusage))); 441 } 442 443 void 444 ruadd(ru, ru2) 445 struct rusage *ru, *ru2; 446 { 447 long *ip, *ip2; 448 int i; 449 450 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); 451 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); 452 if (ru->ru_maxrss < ru2->ru_maxrss) 453 ru->ru_maxrss = ru2->ru_maxrss; 454 ip = &ru->ru_first; ip2 = &ru2->ru_first; 455 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 456 *ip++ += *ip2++; 457 } 458 459 /* 460 * Make a copy of the plimit structure. 461 * We share these structures copy-on-write after fork, 462 * and copy when a limit is changed. 463 */ 464 struct plimit * 465 limcopy(lim) 466 struct plimit *lim; 467 { 468 struct plimit *newlim; 469 470 newlim = pool_get(&plimit_pool, PR_WAITOK); 471 memcpy(newlim->pl_rlimit, lim->pl_rlimit, 472 sizeof(struct rlimit) * RLIM_NLIMITS); 473 if (lim->pl_corename == defcorename) { 474 newlim->pl_corename = defcorename; 475 } else { 476 newlim->pl_corename = malloc(strlen(lim->pl_corename)+1, 477 M_TEMP, M_WAITOK); 478 strcpy(newlim->pl_corename, lim->pl_corename); 479 } 480 newlim->p_lflags = 0; 481 newlim->p_refcnt = 1; 482 return (newlim); 483 } 484 485 void 486 limfree(lim) 487 struct plimit *lim; 488 { 489 490 if (--lim->p_refcnt > 0) 491 return; 492 #ifdef DIAGNOSTIC 493 if (lim->p_refcnt < 0) 494 panic("limfree"); 495 #endif 496 if (lim->pl_corename != defcorename) 497 free(lim->pl_corename, M_TEMP); 498 pool_put(&plimit_pool, lim); 499 } 500