xref: /netbsd-src/sys/kern/kern_resource.c (revision 7c7c171d130af9949261bc7dce2150a03c3d239c)
1 /*	$NetBSD: kern_resource.c,v 1.45 1998/03/01 02:22:29 fvdl Exp $	*/
2 
3 /*-
4  * Copyright (c) 1982, 1986, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)kern_resource.c	8.8 (Berkeley) 2/14/95
41  */
42 
43 #include "opt_uvm.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/file.h>
49 #include <sys/resourcevar.h>
50 #include <sys/malloc.h>
51 #include <sys/proc.h>
52 
53 #include <sys/mount.h>
54 #include <sys/syscallargs.h>
55 
56 #include <vm/vm.h>
57 
58 #if defined(UVM)
59 #include <uvm/uvm_extern.h>
60 #endif
61 
62 void limfree __P((struct plimit *));
63 /*
64  * Resource controls and accounting.
65  */
66 
67 int
68 sys_getpriority(curp, v, retval)
69 	struct proc *curp;
70 	void *v;
71 	register_t *retval;
72 {
73 	register struct sys_getpriority_args /* {
74 		syscallarg(int) which;
75 		syscallarg(int) who;
76 	} */ *uap = v;
77 	register struct proc *p;
78 	register int low = NZERO + PRIO_MAX + 1;
79 
80 	switch (SCARG(uap, which)) {
81 
82 	case PRIO_PROCESS:
83 		if (SCARG(uap, who) == 0)
84 			p = curp;
85 		else
86 			p = pfind(SCARG(uap, who));
87 		if (p == 0)
88 			break;
89 		low = p->p_nice;
90 		break;
91 
92 	case PRIO_PGRP: {
93 		register struct pgrp *pg;
94 
95 		if (SCARG(uap, who) == 0)
96 			pg = curp->p_pgrp;
97 		else if ((pg = pgfind(SCARG(uap, who))) == NULL)
98 			break;
99 		for (p = pg->pg_members.lh_first; p != 0;
100 		     p = p->p_pglist.le_next) {
101 			if (p->p_nice < low)
102 				low = p->p_nice;
103 		}
104 		break;
105 	}
106 
107 	case PRIO_USER:
108 		if (SCARG(uap, who) == 0)
109 			SCARG(uap, who) = curp->p_ucred->cr_uid;
110 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next)
111 			if (p->p_ucred->cr_uid == SCARG(uap, who) &&
112 			    p->p_nice < low)
113 				low = p->p_nice;
114 		break;
115 
116 	default:
117 		return (EINVAL);
118 	}
119 	if (low == NZERO + PRIO_MAX + 1)
120 		return (ESRCH);
121 	*retval = low - NZERO;
122 	return (0);
123 }
124 
125 /* ARGSUSED */
126 int
127 sys_setpriority(curp, v, retval)
128 	struct proc *curp;
129 	void *v;
130 	register_t *retval;
131 {
132 	register struct sys_setpriority_args /* {
133 		syscallarg(int) which;
134 		syscallarg(int) who;
135 		syscallarg(int) prio;
136 	} */ *uap = v;
137 	register struct proc *p;
138 	int found = 0, error = 0;
139 
140 	switch (SCARG(uap, which)) {
141 
142 	case PRIO_PROCESS:
143 		if (SCARG(uap, who) == 0)
144 			p = curp;
145 		else
146 			p = pfind(SCARG(uap, who));
147 		if (p == 0)
148 			break;
149 		error = donice(curp, p, SCARG(uap, prio));
150 		found++;
151 		break;
152 
153 	case PRIO_PGRP: {
154 		register struct pgrp *pg;
155 
156 		if (SCARG(uap, who) == 0)
157 			pg = curp->p_pgrp;
158 		else if ((pg = pgfind(SCARG(uap, who))) == NULL)
159 			break;
160 		for (p = pg->pg_members.lh_first; p != 0;
161 		    p = p->p_pglist.le_next) {
162 			error = donice(curp, p, SCARG(uap, prio));
163 			found++;
164 		}
165 		break;
166 	}
167 
168 	case PRIO_USER:
169 		if (SCARG(uap, who) == 0)
170 			SCARG(uap, who) = curp->p_ucred->cr_uid;
171 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next)
172 			if (p->p_ucred->cr_uid == SCARG(uap, who)) {
173 				error = donice(curp, p, SCARG(uap, prio));
174 				found++;
175 			}
176 		break;
177 
178 	default:
179 		return (EINVAL);
180 	}
181 	if (found == 0)
182 		return (ESRCH);
183 	return (error);
184 }
185 
186 int
187 donice(curp, chgp, n)
188 	register struct proc *curp, *chgp;
189 	register int n;
190 {
191 	register struct pcred *pcred = curp->p_cred;
192 
193 	if (pcred->pc_ucred->cr_uid && pcred->p_ruid &&
194 	    pcred->pc_ucred->cr_uid != chgp->p_ucred->cr_uid &&
195 	    pcred->p_ruid != chgp->p_ucred->cr_uid)
196 		return (EPERM);
197 	if (n > PRIO_MAX)
198 		n = PRIO_MAX;
199 	if (n < PRIO_MIN)
200 		n = PRIO_MIN;
201 	n += NZERO;
202 	if (n < chgp->p_nice && suser(pcred->pc_ucred, &curp->p_acflag))
203 		return (EACCES);
204 	chgp->p_nice = n;
205 	(void)resetpriority(chgp);
206 	return (0);
207 }
208 
209 /* ARGSUSED */
210 int
211 sys_setrlimit(p, v, retval)
212 	struct proc *p;
213 	void *v;
214 	register_t *retval;
215 {
216 	register struct sys_setrlimit_args /* {
217 		syscallarg(int) which;
218 		syscallarg(const struct rlimit *) rlp;
219 	} */ *uap = v;
220 	int which = SCARG(uap, which);
221 	struct rlimit alim;
222 	int error;
223 
224 	error = copyin(SCARG(uap, rlp), &alim, sizeof (struct rlimit));
225 	if (error)
226 		return (error);
227 	return (dosetrlimit(p, which, &alim));
228 }
229 
230 int
231 dosetrlimit(p, which, limp)
232 	struct proc *p;
233 	int which;
234 	struct rlimit *limp;
235 {
236 	register struct rlimit *alimp;
237 	extern unsigned maxdmap, maxsmap;
238 	int error;
239 
240 	if ((u_int)which >= RLIM_NLIMITS)
241 		return (EINVAL);
242 
243 	if (limp->rlim_cur < 0 || limp->rlim_max < 0)
244 		return (EINVAL);
245 
246 	alimp = &p->p_rlimit[which];
247 	if (limp->rlim_cur > alimp->rlim_max ||
248 	    limp->rlim_max > alimp->rlim_max)
249 		if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
250 			return (error);
251 	if (limp->rlim_cur > limp->rlim_max)
252 		limp->rlim_cur = limp->rlim_max;
253 	if (p->p_limit->p_refcnt > 1 &&
254 	    (p->p_limit->p_lflags & PL_SHAREMOD) == 0) {
255 		p->p_limit->p_refcnt--;
256 		p->p_limit = limcopy(p->p_limit);
257 		alimp = &p->p_rlimit[which];
258 	}
259 
260 	switch (which) {
261 
262 	case RLIMIT_DATA:
263 		if (limp->rlim_cur > maxdmap)
264 			limp->rlim_cur = maxdmap;
265 		if (limp->rlim_max > maxdmap)
266 			limp->rlim_max = maxdmap;
267 		break;
268 
269 	case RLIMIT_STACK:
270 		if (limp->rlim_cur > maxsmap)
271 			limp->rlim_cur = maxsmap;
272 		if (limp->rlim_max > maxsmap)
273 			limp->rlim_max = maxsmap;
274 
275 		/*
276 		 * Stack is allocated to the max at exec time with
277 		 * only "rlim_cur" bytes accessible (In other words,
278 		 * allocates stack dividing two contiguous regions at
279 		 * "rlim_cur" bytes boundary).
280 		 *
281 		 * Since allocation is done in terms of page, roundup
282 		 * "rlim_cur" (otherwise, contiguous regions
283 		 * overlap).  If stack limit is going up make more
284 		 * accessible, if going down make inaccessible.
285 		 */
286 		limp->rlim_cur = round_page(limp->rlim_cur);
287 		if (limp->rlim_cur != alimp->rlim_cur) {
288 			vm_offset_t addr;
289 			vm_size_t size;
290 			vm_prot_t prot;
291 
292 			if (limp->rlim_cur > alimp->rlim_cur) {
293 				prot = VM_PROT_ALL;
294 				size = limp->rlim_cur - alimp->rlim_cur;
295 				addr = USRSTACK - limp->rlim_cur;
296 			} else {
297 				prot = VM_PROT_NONE;
298 				size = alimp->rlim_cur - limp->rlim_cur;
299 				addr = USRSTACK - alimp->rlim_cur;
300 			}
301 #if defined(UVM)
302 			(void) uvm_map_protect(&p->p_vmspace->vm_map,
303 					      addr, addr+size, prot, FALSE);
304 #else
305 			(void) vm_map_protect(&p->p_vmspace->vm_map,
306 					      addr, addr+size, prot, FALSE);
307 #endif
308 		}
309 		break;
310 
311 	case RLIMIT_NOFILE:
312 		if (limp->rlim_cur > maxfiles)
313 			limp->rlim_cur = maxfiles;
314 		if (limp->rlim_max > maxfiles)
315 			limp->rlim_max = maxfiles;
316 		break;
317 
318 	case RLIMIT_NPROC:
319 		if (limp->rlim_cur > maxproc)
320 			limp->rlim_cur = maxproc;
321 		if (limp->rlim_max > maxproc)
322 			limp->rlim_max = maxproc;
323 		break;
324 	}
325 	*alimp = *limp;
326 	return (0);
327 }
328 
329 /* ARGSUSED */
330 int
331 sys_getrlimit(p, v, retval)
332 	struct proc *p;
333 	void *v;
334 	register_t *retval;
335 {
336 	register struct sys_getrlimit_args /* {
337 		syscallarg(int) which;
338 		syscallarg(struct rlimit *) rlp;
339 	} */ *uap = v;
340 	int which = SCARG(uap, which);
341 
342 	if ((u_int)which >= RLIM_NLIMITS)
343 		return (EINVAL);
344 	return (copyout(&p->p_rlimit[which], SCARG(uap, rlp),
345 	    sizeof (struct rlimit)));
346 }
347 
348 /*
349  * Transform the running time and tick information in proc p into user,
350  * system, and interrupt time usage.
351  */
352 void
353 calcru(p, up, sp, ip)
354 	register struct proc *p;
355 	register struct timeval *up;
356 	register struct timeval *sp;
357 	register struct timeval *ip;
358 {
359 	register u_quad_t u, st, ut, it, tot;
360 	register long sec, usec;
361 	register int s;
362 	struct timeval tv;
363 
364 	s = splstatclock();
365 	st = p->p_sticks;
366 	ut = p->p_uticks;
367 	it = p->p_iticks;
368 	splx(s);
369 
370 	tot = st + ut + it;
371 	if (tot == 0) {
372 		up->tv_sec = up->tv_usec = 0;
373 		sp->tv_sec = sp->tv_usec = 0;
374 		if (ip != NULL)
375 			ip->tv_sec = ip->tv_usec = 0;
376 		return;
377 	}
378 
379 	sec = p->p_rtime.tv_sec;
380 	usec = p->p_rtime.tv_usec;
381 	if (p == curproc) {
382 		/*
383 		 * Adjust for the current time slice.  This is actually fairly
384 		 * important since the error here is on the order of a time
385 		 * quantum, which is much greater than the sampling error.
386 		 */
387 		microtime(&tv);
388 		sec += tv.tv_sec - runtime.tv_sec;
389 		usec += tv.tv_usec - runtime.tv_usec;
390 	}
391 	u = (u_quad_t) sec * 1000000 + usec;
392 	st = (u * st) / tot;
393 	sp->tv_sec = st / 1000000;
394 	sp->tv_usec = st % 1000000;
395 	ut = (u * ut) / tot;
396 	up->tv_sec = ut / 1000000;
397 	up->tv_usec = ut % 1000000;
398 	if (ip != NULL) {
399 		it = (u * it) / tot;
400 		ip->tv_sec = it / 1000000;
401 		ip->tv_usec = it % 1000000;
402 	}
403 }
404 
405 /* ARGSUSED */
406 int
407 sys_getrusage(p, v, retval)
408 	register struct proc *p;
409 	void *v;
410 	register_t *retval;
411 {
412 	register struct sys_getrusage_args /* {
413 		syscallarg(int) who;
414 		syscallarg(struct rusage *) rusage;
415 	} */ *uap = v;
416 	register struct rusage *rup;
417 
418 	switch (SCARG(uap, who)) {
419 
420 	case RUSAGE_SELF:
421 		rup = &p->p_stats->p_ru;
422 		calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
423 		break;
424 
425 	case RUSAGE_CHILDREN:
426 		rup = &p->p_stats->p_cru;
427 		break;
428 
429 	default:
430 		return (EINVAL);
431 	}
432 	return (copyout(rup, SCARG(uap, rusage), sizeof (struct rusage)));
433 }
434 
435 void
436 ruadd(ru, ru2)
437 	register struct rusage *ru, *ru2;
438 {
439 	register long *ip, *ip2;
440 	register int i;
441 
442 	timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
443 	timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
444 	if (ru->ru_maxrss < ru2->ru_maxrss)
445 		ru->ru_maxrss = ru2->ru_maxrss;
446 	ip = &ru->ru_first; ip2 = &ru2->ru_first;
447 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
448 		*ip++ += *ip2++;
449 }
450 
451 /*
452  * Make a copy of the plimit structure.
453  * We share these structures copy-on-write after fork,
454  * and copy when a limit is changed.
455  */
456 struct plimit *
457 limcopy(lim)
458 	struct plimit *lim;
459 {
460 	register struct plimit *newlim;
461 
462 	MALLOC(newlim, struct plimit *, sizeof(struct plimit),
463 	    M_SUBPROC, M_WAITOK);
464 	bcopy(lim->pl_rlimit, newlim->pl_rlimit,
465 	    sizeof(struct rlimit) * RLIM_NLIMITS);
466 	newlim->p_lflags = 0;
467 	newlim->p_refcnt = 1;
468 	return (newlim);
469 }
470 
471 void
472 limfree(lim)
473 	struct plimit *lim;
474 {
475 
476 	if (--lim->p_refcnt > 0)
477 		return;
478 	FREE(lim, M_SUBPROC);
479 }
480