1 /* $NetBSD: kern_resource.c,v 1.58 2000/06/27 17:41:25 mrg Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_resource.c 8.8 (Berkeley) 2/14/95 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/file.h> 47 #include <sys/resourcevar.h> 48 #include <sys/malloc.h> 49 #include <sys/pool.h> 50 #include <sys/proc.h> 51 52 #include <sys/mount.h> 53 #include <sys/syscallargs.h> 54 55 #include <uvm/uvm_extern.h> 56 57 /* 58 * Resource controls and accounting. 59 */ 60 61 int 62 sys_getpriority(curp, v, retval) 63 struct proc *curp; 64 void *v; 65 register_t *retval; 66 { 67 struct sys_getpriority_args /* { 68 syscallarg(int) which; 69 syscallarg(int) who; 70 } */ *uap = v; 71 struct proc *p; 72 int low = NZERO + PRIO_MAX + 1; 73 74 switch (SCARG(uap, which)) { 75 76 case PRIO_PROCESS: 77 if (SCARG(uap, who) == 0) 78 p = curp; 79 else 80 p = pfind(SCARG(uap, who)); 81 if (p == 0) 82 break; 83 low = p->p_nice; 84 break; 85 86 case PRIO_PGRP: { 87 struct pgrp *pg; 88 89 if (SCARG(uap, who) == 0) 90 pg = curp->p_pgrp; 91 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 92 break; 93 for (p = pg->pg_members.lh_first; p != 0; 94 p = p->p_pglist.le_next) { 95 if (p->p_nice < low) 96 low = p->p_nice; 97 } 98 break; 99 } 100 101 case PRIO_USER: 102 if (SCARG(uap, who) == 0) 103 SCARG(uap, who) = curp->p_ucred->cr_uid; 104 proclist_lock_read(); 105 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) 106 if (p->p_ucred->cr_uid == SCARG(uap, who) && 107 p->p_nice < low) 108 low = p->p_nice; 109 proclist_unlock_read(); 110 break; 111 112 default: 113 return (EINVAL); 114 } 115 if (low == NZERO + PRIO_MAX + 1) 116 return (ESRCH); 117 *retval = low - NZERO; 118 return (0); 119 } 120 121 /* ARGSUSED */ 122 int 123 sys_setpriority(curp, v, retval) 124 struct proc *curp; 125 void *v; 126 register_t *retval; 127 { 128 struct sys_setpriority_args /* { 129 syscallarg(int) which; 130 syscallarg(int) who; 131 syscallarg(int) prio; 132 } */ *uap = v; 133 struct proc *p; 134 int found = 0, error = 0; 135 136 switch (SCARG(uap, which)) { 137 138 case PRIO_PROCESS: 139 if (SCARG(uap, who) == 0) 140 p = curp; 141 else 142 p = pfind(SCARG(uap, who)); 143 if (p == 0) 144 break; 145 error = donice(curp, p, SCARG(uap, prio)); 146 found++; 147 break; 148 149 case PRIO_PGRP: { 150 struct pgrp *pg; 151 152 if (SCARG(uap, who) == 0) 153 pg = curp->p_pgrp; 154 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 155 break; 156 for (p = pg->pg_members.lh_first; p != 0; 157 p = p->p_pglist.le_next) { 158 error = donice(curp, p, SCARG(uap, prio)); 159 found++; 160 } 161 break; 162 } 163 164 case PRIO_USER: 165 if (SCARG(uap, who) == 0) 166 SCARG(uap, who) = curp->p_ucred->cr_uid; 167 proclist_lock_read(); 168 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) 169 if (p->p_ucred->cr_uid == SCARG(uap, who)) { 170 error = donice(curp, p, SCARG(uap, prio)); 171 found++; 172 } 173 proclist_unlock_read(); 174 break; 175 176 default: 177 return (EINVAL); 178 } 179 if (found == 0) 180 return (ESRCH); 181 return (error); 182 } 183 184 int 185 donice(curp, chgp, n) 186 struct proc *curp, *chgp; 187 int n; 188 { 189 struct pcred *pcred = curp->p_cred; 190 191 if (pcred->pc_ucred->cr_uid && pcred->p_ruid && 192 pcred->pc_ucred->cr_uid != chgp->p_ucred->cr_uid && 193 pcred->p_ruid != chgp->p_ucred->cr_uid) 194 return (EPERM); 195 if (n > PRIO_MAX) 196 n = PRIO_MAX; 197 if (n < PRIO_MIN) 198 n = PRIO_MIN; 199 n += NZERO; 200 if (n < chgp->p_nice && suser(pcred->pc_ucred, &curp->p_acflag)) 201 return (EACCES); 202 chgp->p_nice = n; 203 (void)resetpriority(chgp); 204 return (0); 205 } 206 207 /* ARGSUSED */ 208 int 209 sys_setrlimit(p, v, retval) 210 struct proc *p; 211 void *v; 212 register_t *retval; 213 { 214 struct sys_setrlimit_args /* { 215 syscallarg(int) which; 216 syscallarg(const struct rlimit *) rlp; 217 } */ *uap = v; 218 int which = SCARG(uap, which); 219 struct rlimit alim; 220 int error; 221 222 error = copyin(SCARG(uap, rlp), &alim, sizeof(struct rlimit)); 223 if (error) 224 return (error); 225 return (dosetrlimit(p, p->p_cred, which, &alim)); 226 } 227 228 int 229 dosetrlimit(p, cred, which, limp) 230 struct proc *p; 231 struct pcred *cred; 232 int which; 233 struct rlimit *limp; 234 { 235 struct rlimit *alimp; 236 extern unsigned maxdmap, maxsmap; 237 struct plimit *newplim; 238 int error; 239 240 if ((u_int)which >= RLIM_NLIMITS) 241 return (EINVAL); 242 243 if (limp->rlim_cur < 0 || limp->rlim_max < 0) 244 return (EINVAL); 245 246 alimp = &p->p_rlimit[which]; 247 /* if we don't change the value, no need to limcopy() */ 248 if (limp->rlim_cur == alimp->rlim_cur && 249 limp->rlim_max == alimp->rlim_max) 250 return 0; 251 252 if (limp->rlim_cur > alimp->rlim_max || 253 limp->rlim_max > alimp->rlim_max) 254 if ((error = suser(cred->pc_ucred, &p->p_acflag)) != 0) 255 return (error); 256 if (limp->rlim_cur > limp->rlim_max) 257 limp->rlim_cur = limp->rlim_max; 258 if (p->p_limit->p_refcnt > 1 && 259 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) { 260 newplim = limcopy(p->p_limit); 261 limfree(p->p_limit); 262 p->p_limit = newplim; 263 alimp = &p->p_rlimit[which]; 264 } 265 266 switch (which) { 267 268 case RLIMIT_DATA: 269 if (limp->rlim_cur > maxdmap) 270 limp->rlim_cur = maxdmap; 271 if (limp->rlim_max > maxdmap) 272 limp->rlim_max = maxdmap; 273 break; 274 275 case RLIMIT_STACK: 276 if (limp->rlim_cur > maxsmap) 277 limp->rlim_cur = maxsmap; 278 if (limp->rlim_max > maxsmap) 279 limp->rlim_max = maxsmap; 280 281 /* 282 * Stack is allocated to the max at exec time with 283 * only "rlim_cur" bytes accessible (In other words, 284 * allocates stack dividing two contiguous regions at 285 * "rlim_cur" bytes boundary). 286 * 287 * Since allocation is done in terms of page, roundup 288 * "rlim_cur" (otherwise, contiguous regions 289 * overlap). If stack limit is going up make more 290 * accessible, if going down make inaccessible. 291 */ 292 limp->rlim_cur = round_page(limp->rlim_cur); 293 if (limp->rlim_cur != alimp->rlim_cur) { 294 vaddr_t addr; 295 vsize_t size; 296 vm_prot_t prot; 297 298 if (limp->rlim_cur > alimp->rlim_cur) { 299 prot = VM_PROT_ALL; 300 size = limp->rlim_cur - alimp->rlim_cur; 301 addr = USRSTACK - limp->rlim_cur; 302 } else { 303 prot = VM_PROT_NONE; 304 size = alimp->rlim_cur - limp->rlim_cur; 305 addr = USRSTACK - alimp->rlim_cur; 306 } 307 (void) uvm_map_protect(&p->p_vmspace->vm_map, 308 addr, addr+size, prot, FALSE); 309 } 310 break; 311 312 case RLIMIT_NOFILE: 313 if (limp->rlim_cur > maxfiles) 314 limp->rlim_cur = maxfiles; 315 if (limp->rlim_max > maxfiles) 316 limp->rlim_max = maxfiles; 317 break; 318 319 case RLIMIT_NPROC: 320 if (limp->rlim_cur > maxproc) 321 limp->rlim_cur = maxproc; 322 if (limp->rlim_max > maxproc) 323 limp->rlim_max = maxproc; 324 break; 325 } 326 *alimp = *limp; 327 return (0); 328 } 329 330 /* ARGSUSED */ 331 int 332 sys_getrlimit(p, v, retval) 333 struct proc *p; 334 void *v; 335 register_t *retval; 336 { 337 struct sys_getrlimit_args /* { 338 syscallarg(int) which; 339 syscallarg(struct rlimit *) rlp; 340 } */ *uap = v; 341 int which = SCARG(uap, which); 342 343 if ((u_int)which >= RLIM_NLIMITS) 344 return (EINVAL); 345 return (copyout(&p->p_rlimit[which], SCARG(uap, rlp), 346 sizeof(struct rlimit))); 347 } 348 349 /* 350 * Transform the running time and tick information in proc p into user, 351 * system, and interrupt time usage. 352 */ 353 void 354 calcru(p, up, sp, ip) 355 struct proc *p; 356 struct timeval *up; 357 struct timeval *sp; 358 struct timeval *ip; 359 { 360 u_quad_t u, st, ut, it, tot; 361 long sec, usec; 362 int s; 363 struct timeval tv; 364 365 s = splstatclock(); 366 st = p->p_sticks; 367 ut = p->p_uticks; 368 it = p->p_iticks; 369 splx(s); 370 371 tot = st + ut + it; 372 if (tot == 0) { 373 up->tv_sec = up->tv_usec = 0; 374 sp->tv_sec = sp->tv_usec = 0; 375 if (ip != NULL) 376 ip->tv_sec = ip->tv_usec = 0; 377 return; 378 } 379 380 sec = p->p_rtime.tv_sec; 381 usec = p->p_rtime.tv_usec; 382 if (p->p_stat == SONPROC) { 383 struct schedstate_percpu *spc; 384 385 KDASSERT(p->p_cpu != NULL); 386 spc = &p->p_cpu->ci_schedstate; 387 388 /* 389 * Adjust for the current time slice. This is actually fairly 390 * important since the error here is on the order of a time 391 * quantum, which is much greater than the sampling error. 392 */ 393 microtime(&tv); 394 sec += tv.tv_sec - spc->spc_runtime.tv_sec; 395 usec += tv.tv_usec - spc->spc_runtime.tv_usec; 396 } 397 u = (u_quad_t) sec * 1000000 + usec; 398 st = (u * st) / tot; 399 sp->tv_sec = st / 1000000; 400 sp->tv_usec = st % 1000000; 401 ut = (u * ut) / tot; 402 up->tv_sec = ut / 1000000; 403 up->tv_usec = ut % 1000000; 404 if (ip != NULL) { 405 it = (u * it) / tot; 406 ip->tv_sec = it / 1000000; 407 ip->tv_usec = it % 1000000; 408 } 409 } 410 411 /* ARGSUSED */ 412 int 413 sys_getrusage(p, v, retval) 414 struct proc *p; 415 void *v; 416 register_t *retval; 417 { 418 struct sys_getrusage_args /* { 419 syscallarg(int) who; 420 syscallarg(struct rusage *) rusage; 421 } */ *uap = v; 422 struct rusage *rup; 423 424 switch (SCARG(uap, who)) { 425 426 case RUSAGE_SELF: 427 rup = &p->p_stats->p_ru; 428 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); 429 break; 430 431 case RUSAGE_CHILDREN: 432 rup = &p->p_stats->p_cru; 433 break; 434 435 default: 436 return (EINVAL); 437 } 438 return (copyout(rup, SCARG(uap, rusage), sizeof(struct rusage))); 439 } 440 441 void 442 ruadd(ru, ru2) 443 struct rusage *ru, *ru2; 444 { 445 long *ip, *ip2; 446 int i; 447 448 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); 449 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); 450 if (ru->ru_maxrss < ru2->ru_maxrss) 451 ru->ru_maxrss = ru2->ru_maxrss; 452 ip = &ru->ru_first; ip2 = &ru2->ru_first; 453 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 454 *ip++ += *ip2++; 455 } 456 457 /* 458 * Make a copy of the plimit structure. 459 * We share these structures copy-on-write after fork, 460 * and copy when a limit is changed. 461 */ 462 struct plimit * 463 limcopy(lim) 464 struct plimit *lim; 465 { 466 struct plimit *newlim; 467 468 newlim = pool_get(&plimit_pool, PR_WAITOK); 469 memcpy(newlim->pl_rlimit, lim->pl_rlimit, 470 sizeof(struct rlimit) * RLIM_NLIMITS); 471 if (lim->pl_corename == defcorename) { 472 newlim->pl_corename = defcorename; 473 } else { 474 newlim->pl_corename = malloc(strlen(lim->pl_corename)+1, 475 M_TEMP, M_WAITOK); 476 strcpy(newlim->pl_corename, lim->pl_corename); 477 } 478 newlim->p_lflags = 0; 479 newlim->p_refcnt = 1; 480 return (newlim); 481 } 482 483 void 484 limfree(lim) 485 struct plimit *lim; 486 { 487 488 if (--lim->p_refcnt > 0) 489 return; 490 #ifdef DIAGNOSTIC 491 if (lim->p_refcnt < 0) 492 panic("limfree"); 493 #endif 494 if (lim->pl_corename != defcorename) 495 free(lim->pl_corename, M_TEMP); 496 pool_put(&plimit_pool, lim); 497 } 498