xref: /openbsd-src/sys/kern/kern_resource.c (revision edc99bcd885f4004d6cffdbe82f15aa2eb27ee9c)
1 /*	$OpenBSD: kern_resource.c,v 1.65 2019/06/21 09:39:48 visa Exp $	*/
2 /*	$NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file.h>
44 #include <sys/resourcevar.h>
45 #include <sys/pool.h>
46 #include <sys/proc.h>
47 #include <sys/ktrace.h>
48 #include <sys/sched.h>
49 #include <sys/signalvar.h>
50 
51 #include <sys/mount.h>
52 #include <sys/syscallargs.h>
53 
54 #include <uvm/uvm_extern.h>
55 
56 /* Resource usage check interval in msec */
57 #define RUCHECK_INTERVAL	1000
58 
59 /* SIGXCPU interval in seconds of process runtime */
60 #define SIGXCPU_INTERVAL	5
61 
62 struct plimit	*lim_copy(struct plimit *);
63 struct plimit	*lim_write_begin(void);
64 void		 lim_write_commit(struct plimit *);
65 
66 void	tuagg_sub(struct tusage *, struct proc *);
67 
68 /*
69  * Patchable maximum data and stack limits.
70  */
71 rlim_t maxdmap = MAXDSIZ;
72 rlim_t maxsmap = MAXSSIZ;
73 
74 /*
75  * Serializes resource limit updates.
76  * This lock has to be held together with ps_mtx when updating
77  * the process' ps_limit.
78  */
79 struct rwlock rlimit_lock = RWLOCK_INITIALIZER("rlimitlk");
80 
81 /*
82  * Resource controls and accounting.
83  */
84 
85 int
86 sys_getpriority(struct proc *curp, void *v, register_t *retval)
87 {
88 	struct sys_getpriority_args /* {
89 		syscallarg(int) which;
90 		syscallarg(id_t) who;
91 	} */ *uap = v;
92 	struct process *pr;
93 	int low = NZERO + PRIO_MAX + 1;
94 
95 	switch (SCARG(uap, which)) {
96 
97 	case PRIO_PROCESS:
98 		if (SCARG(uap, who) == 0)
99 			pr = curp->p_p;
100 		else
101 			pr = prfind(SCARG(uap, who));
102 		if (pr == NULL)
103 			break;
104 		if (pr->ps_nice < low)
105 			low = pr->ps_nice;
106 		break;
107 
108 	case PRIO_PGRP: {
109 		struct pgrp *pg;
110 
111 		if (SCARG(uap, who) == 0)
112 			pg = curp->p_p->ps_pgrp;
113 		else if ((pg = pgfind(SCARG(uap, who))) == NULL)
114 			break;
115 		LIST_FOREACH(pr, &pg->pg_members, ps_pglist)
116 			if (pr->ps_nice < low)
117 				low = pr->ps_nice;
118 		break;
119 	}
120 
121 	case PRIO_USER:
122 		if (SCARG(uap, who) == 0)
123 			SCARG(uap, who) = curp->p_ucred->cr_uid;
124 		LIST_FOREACH(pr, &allprocess, ps_list)
125 			if (pr->ps_ucred->cr_uid == SCARG(uap, who) &&
126 			    pr->ps_nice < low)
127 				low = pr->ps_nice;
128 		break;
129 
130 	default:
131 		return (EINVAL);
132 	}
133 	if (low == NZERO + PRIO_MAX + 1)
134 		return (ESRCH);
135 	*retval = low - NZERO;
136 	return (0);
137 }
138 
139 int
140 sys_setpriority(struct proc *curp, void *v, register_t *retval)
141 {
142 	struct sys_setpriority_args /* {
143 		syscallarg(int) which;
144 		syscallarg(id_t) who;
145 		syscallarg(int) prio;
146 	} */ *uap = v;
147 	struct process *pr;
148 	int found = 0, error = 0;
149 
150 	switch (SCARG(uap, which)) {
151 
152 	case PRIO_PROCESS:
153 		if (SCARG(uap, who) == 0)
154 			pr = curp->p_p;
155 		else
156 			pr = prfind(SCARG(uap, who));
157 		if (pr == NULL)
158 			break;
159 		error = donice(curp, pr, SCARG(uap, prio));
160 		found++;
161 		break;
162 
163 	case PRIO_PGRP: {
164 		struct pgrp *pg;
165 
166 		if (SCARG(uap, who) == 0)
167 			pg = curp->p_p->ps_pgrp;
168 		else if ((pg = pgfind(SCARG(uap, who))) == NULL)
169 			break;
170 		LIST_FOREACH(pr, &pg->pg_members, ps_pglist) {
171 			error = donice(curp, pr, SCARG(uap, prio));
172 			found++;
173 		}
174 		break;
175 	}
176 
177 	case PRIO_USER:
178 		if (SCARG(uap, who) == 0)
179 			SCARG(uap, who) = curp->p_ucred->cr_uid;
180 		LIST_FOREACH(pr, &allprocess, ps_list)
181 			if (pr->ps_ucred->cr_uid == SCARG(uap, who)) {
182 				error = donice(curp, pr, SCARG(uap, prio));
183 				found++;
184 			}
185 		break;
186 
187 	default:
188 		return (EINVAL);
189 	}
190 	if (found == 0)
191 		return (ESRCH);
192 	return (error);
193 }
194 
195 int
196 donice(struct proc *curp, struct process *chgpr, int n)
197 {
198 	struct ucred *ucred = curp->p_ucred;
199 	struct proc *p;
200 	int s;
201 
202 	if (ucred->cr_uid != 0 && ucred->cr_ruid != 0 &&
203 	    ucred->cr_uid != chgpr->ps_ucred->cr_uid &&
204 	    ucred->cr_ruid != chgpr->ps_ucred->cr_uid)
205 		return (EPERM);
206 	if (n > PRIO_MAX)
207 		n = PRIO_MAX;
208 	if (n < PRIO_MIN)
209 		n = PRIO_MIN;
210 	n += NZERO;
211 	if (n < chgpr->ps_nice && suser(curp))
212 		return (EACCES);
213 	chgpr->ps_nice = n;
214 	SCHED_LOCK(s);
215 	TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link)
216 		(void)resetpriority(p);
217 	SCHED_UNLOCK(s);
218 	return (0);
219 }
220 
221 int
222 sys_setrlimit(struct proc *p, void *v, register_t *retval)
223 {
224 	struct sys_setrlimit_args /* {
225 		syscallarg(int) which;
226 		syscallarg(const struct rlimit *) rlp;
227 	} */ *uap = v;
228 	struct rlimit alim;
229 	int error;
230 
231 	error = copyin((caddr_t)SCARG(uap, rlp), (caddr_t)&alim,
232 		       sizeof (struct rlimit));
233 	if (error)
234 		return (error);
235 #ifdef KTRACE
236 	if (KTRPOINT(p, KTR_STRUCT))
237 		ktrrlimit(p, &alim);
238 #endif
239 	return (dosetrlimit(p, SCARG(uap, which), &alim));
240 }
241 
242 int
243 dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
244 {
245 	struct rlimit *alimp;
246 	struct plimit *limit;
247 	rlim_t maxlim;
248 	int error;
249 
250 	if (which >= RLIM_NLIMITS || limp->rlim_cur > limp->rlim_max)
251 		return (EINVAL);
252 
253 	rw_enter_write(&rlimit_lock);
254 
255 	alimp = &p->p_p->ps_limit->pl_rlimit[which];
256 	if (limp->rlim_max > alimp->rlim_max) {
257 		if ((error = suser(p)) != 0) {
258 			rw_exit_write(&rlimit_lock);
259 			return (error);
260 		}
261 	}
262 
263 	/* Get exclusive write access to the limit structure. */
264 	limit = lim_write_begin();
265 	alimp = &limit->pl_rlimit[which];
266 
267 	switch (which) {
268 	case RLIMIT_DATA:
269 		maxlim = maxdmap;
270 		break;
271 	case RLIMIT_STACK:
272 		maxlim = maxsmap;
273 		break;
274 	case RLIMIT_NOFILE:
275 		maxlim = maxfiles;
276 		break;
277 	case RLIMIT_NPROC:
278 		maxlim = maxprocess;
279 		break;
280 	default:
281 		maxlim = RLIM_INFINITY;
282 		break;
283 	}
284 
285 	if (limp->rlim_max > maxlim)
286 		limp->rlim_max = maxlim;
287 	if (limp->rlim_cur > limp->rlim_max)
288 		limp->rlim_cur = limp->rlim_max;
289 
290 	if (which == RLIMIT_CPU && limp->rlim_cur != RLIM_INFINITY &&
291 	    alimp->rlim_cur == RLIM_INFINITY)
292 		timeout_add_msec(&p->p_p->ps_rucheck_to, RUCHECK_INTERVAL);
293 
294 	if (which == RLIMIT_STACK) {
295 		/*
296 		 * Stack is allocated to the max at exec time with only
297 		 * "rlim_cur" bytes accessible.  If stack limit is going
298 		 * up make more accessible, if going down make inaccessible.
299 		 */
300 		if (limp->rlim_cur != alimp->rlim_cur) {
301 			vaddr_t addr;
302 			vsize_t size;
303 			vm_prot_t prot;
304 			struct vmspace *vm = p->p_vmspace;
305 
306 			if (limp->rlim_cur > alimp->rlim_cur) {
307 				prot = PROT_READ | PROT_WRITE;
308 				size = limp->rlim_cur - alimp->rlim_cur;
309 #ifdef MACHINE_STACK_GROWS_UP
310 				addr = (vaddr_t)vm->vm_maxsaddr +
311 				    alimp->rlim_cur;
312 #else
313 				addr = (vaddr_t)vm->vm_minsaddr -
314 				    limp->rlim_cur;
315 #endif
316 			} else {
317 				prot = PROT_NONE;
318 				size = alimp->rlim_cur - limp->rlim_cur;
319 #ifdef MACHINE_STACK_GROWS_UP
320 				addr = (vaddr_t)vm->vm_maxsaddr +
321 				    limp->rlim_cur;
322 #else
323 				addr = (vaddr_t)vm->vm_minsaddr -
324 				    alimp->rlim_cur;
325 #endif
326 			}
327 			addr = trunc_page(addr);
328 			size = round_page(size);
329 			(void) uvm_map_protect(&vm->vm_map,
330 					      addr, addr+size, prot, FALSE);
331 		}
332 	}
333 
334 	*alimp = *limp;
335 
336 	lim_write_commit(limit);
337 	rw_exit_write(&rlimit_lock);
338 
339 	return (0);
340 }
341 
342 int
343 sys_getrlimit(struct proc *p, void *v, register_t *retval)
344 {
345 	struct sys_getrlimit_args /* {
346 		syscallarg(int) which;
347 		syscallarg(struct rlimit *) rlp;
348 	} */ *uap = v;
349 	struct plimit *limit;
350 	struct rlimit alimp;
351 	int error;
352 
353 	if (SCARG(uap, which) < 0 || SCARG(uap, which) >= RLIM_NLIMITS)
354 		return (EINVAL);
355 	limit = lim_read_enter();
356 	alimp = limit->pl_rlimit[SCARG(uap, which)];
357 	lim_read_leave(limit);
358 	error = copyout(&alimp, SCARG(uap, rlp), sizeof(struct rlimit));
359 #ifdef KTRACE
360 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
361 		ktrrlimit(p, &alimp);
362 #endif
363 	return (error);
364 }
365 
366 void
367 tuagg_sub(struct tusage *tup, struct proc *p)
368 {
369 	timespecadd(&tup->tu_runtime, &p->p_rtime, &tup->tu_runtime);
370 	tup->tu_uticks += p->p_uticks;
371 	tup->tu_sticks += p->p_sticks;
372 	tup->tu_iticks += p->p_iticks;
373 }
374 
375 /*
376  * Aggregate a single thread's immediate time counts into the running
377  * totals for the thread and process
378  */
379 void
380 tuagg_unlocked(struct process *pr, struct proc *p)
381 {
382 	tuagg_sub(&pr->ps_tu, p);
383 	tuagg_sub(&p->p_tu, p);
384 	timespecclear(&p->p_rtime);
385 	p->p_uticks = 0;
386 	p->p_sticks = 0;
387 	p->p_iticks = 0;
388 }
389 
390 void
391 tuagg(struct process *pr, struct proc *p)
392 {
393 	int s;
394 
395 	SCHED_LOCK(s);
396 	tuagg_unlocked(pr, p);
397 	SCHED_UNLOCK(s);
398 }
399 
400 /*
401  * Transform the running time and tick information in a struct tusage
402  * into user, system, and interrupt time usage.
403  */
404 void
405 calctsru(struct tusage *tup, struct timespec *up, struct timespec *sp,
406     struct timespec *ip)
407 {
408 	u_quad_t st, ut, it;
409 	int freq;
410 
411 	st = tup->tu_sticks;
412 	ut = tup->tu_uticks;
413 	it = tup->tu_iticks;
414 
415 	if (st + ut + it == 0) {
416 		timespecclear(up);
417 		timespecclear(sp);
418 		if (ip != NULL)
419 			timespecclear(ip);
420 		return;
421 	}
422 
423 	freq = stathz ? stathz : hz;
424 
425 	st = st * 1000000000 / freq;
426 	sp->tv_sec = st / 1000000000;
427 	sp->tv_nsec = st % 1000000000;
428 	ut = ut * 1000000000 / freq;
429 	up->tv_sec = ut / 1000000000;
430 	up->tv_nsec = ut % 1000000000;
431 	if (ip != NULL) {
432 		it = it * 1000000000 / freq;
433 		ip->tv_sec = it / 1000000000;
434 		ip->tv_nsec = it % 1000000000;
435 	}
436 }
437 
438 void
439 calcru(struct tusage *tup, struct timeval *up, struct timeval *sp,
440     struct timeval *ip)
441 {
442 	struct timespec u, s, i;
443 
444 	calctsru(tup, &u, &s, ip != NULL ? &i : NULL);
445 	TIMESPEC_TO_TIMEVAL(up, &u);
446 	TIMESPEC_TO_TIMEVAL(sp, &s);
447 	if (ip != NULL)
448 		TIMESPEC_TO_TIMEVAL(ip, &i);
449 }
450 
451 int
452 sys_getrusage(struct proc *p, void *v, register_t *retval)
453 {
454 	struct sys_getrusage_args /* {
455 		syscallarg(int) who;
456 		syscallarg(struct rusage *) rusage;
457 	} */ *uap = v;
458 	struct rusage ru;
459 	int error;
460 
461 	error = dogetrusage(p, SCARG(uap, who), &ru);
462 	if (error == 0) {
463 		error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
464 #ifdef KTRACE
465 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
466 			ktrrusage(p, &ru);
467 #endif
468 	}
469 	return (error);
470 }
471 
472 int
473 dogetrusage(struct proc *p, int who, struct rusage *rup)
474 {
475 	struct process *pr = p->p_p;
476 	struct proc *q;
477 
478 	switch (who) {
479 
480 	case RUSAGE_SELF:
481 		/* start with the sum of dead threads, if any */
482 		if (pr->ps_ru != NULL)
483 			*rup = *pr->ps_ru;
484 		else
485 			memset(rup, 0, sizeof(*rup));
486 
487 		/* add on all living threads */
488 		TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
489 			ruadd(rup, &q->p_ru);
490 			tuagg(pr, q);
491 		}
492 
493 		calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
494 		break;
495 
496 	case RUSAGE_THREAD:
497 		*rup = p->p_ru;
498 		calcru(&p->p_tu, &rup->ru_utime, &rup->ru_stime, NULL);
499 		break;
500 
501 	case RUSAGE_CHILDREN:
502 		*rup = pr->ps_cru;
503 		break;
504 
505 	default:
506 		return (EINVAL);
507 	}
508 	return (0);
509 }
510 
511 void
512 ruadd(struct rusage *ru, struct rusage *ru2)
513 {
514 	long *ip, *ip2;
515 	int i;
516 
517 	timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
518 	timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
519 	if (ru->ru_maxrss < ru2->ru_maxrss)
520 		ru->ru_maxrss = ru2->ru_maxrss;
521 	ip = &ru->ru_first; ip2 = &ru2->ru_first;
522 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
523 		*ip++ += *ip2++;
524 }
525 
526 /*
527  * Check if the process exceeds its cpu resource allocation.
528  * If over max, kill it.
529  */
530 void
531 rucheck(void *arg)
532 {
533 	struct rlimit rlim;
534 	struct process *pr = arg;
535 	time_t runtime;
536 	int s;
537 
538 	KERNEL_ASSERT_LOCKED();
539 
540 	SCHED_LOCK(s);
541 	runtime = pr->ps_tu.tu_runtime.tv_sec;
542 	SCHED_UNLOCK(s);
543 
544 	mtx_enter(&pr->ps_mtx);
545 	rlim = pr->ps_limit->pl_rlimit[RLIMIT_CPU];
546 	mtx_leave(&pr->ps_mtx);
547 
548 	if ((rlim_t)runtime >= rlim.rlim_cur) {
549 		if ((rlim_t)runtime >= rlim.rlim_max) {
550 			prsignal(pr, SIGKILL);
551 		} else if (runtime >= pr->ps_nextxcpu) {
552 			prsignal(pr, SIGXCPU);
553 			pr->ps_nextxcpu = runtime + SIGXCPU_INTERVAL;
554 		}
555 	}
556 
557 	timeout_add_msec(&pr->ps_rucheck_to, RUCHECK_INTERVAL);
558 }
559 
560 struct pool plimit_pool;
561 
562 void
563 lim_startup(struct plimit *limit0)
564 {
565 	rlim_t lim;
566 	int i;
567 
568 	pool_init(&plimit_pool, sizeof(struct plimit), 0, IPL_MPFLOOR,
569 	    PR_WAITOK, "plimitpl", NULL);
570 
571 	for (i = 0; i < nitems(limit0->pl_rlimit); i++)
572 		limit0->pl_rlimit[i].rlim_cur =
573 		    limit0->pl_rlimit[i].rlim_max = RLIM_INFINITY;
574 	limit0->pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
575 	limit0->pl_rlimit[RLIMIT_NOFILE].rlim_max = MIN(NOFILE_MAX,
576 	    (maxfiles - NOFILE > NOFILE) ? maxfiles - NOFILE : NOFILE);
577 	limit0->pl_rlimit[RLIMIT_NPROC].rlim_cur = MAXUPRC;
578 	lim = ptoa(uvmexp.free);
579 	limit0->pl_rlimit[RLIMIT_RSS].rlim_max = lim;
580 	limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim;
581 	limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
582 	limit0->pl_refcnt = 1;
583 }
584 
585 /*
586  * Make a copy of the plimit structure.
587  * We share these structures copy-on-write after fork,
588  * and copy when a limit is changed.
589  */
590 struct plimit *
591 lim_copy(struct plimit *lim)
592 {
593 	struct plimit *newlim;
594 
595 	newlim = pool_get(&plimit_pool, PR_WAITOK);
596 	memcpy(newlim->pl_rlimit, lim->pl_rlimit,
597 	    sizeof(struct rlimit) * RLIM_NLIMITS);
598 	newlim->pl_refcnt = 1;
599 	return (newlim);
600 }
601 
602 void
603 lim_free(struct plimit *lim)
604 {
605 	if (atomic_dec_int_nv(&lim->pl_refcnt) > 0)
606 		return;
607 	pool_put(&plimit_pool, lim);
608 }
609 
610 void
611 lim_fork(struct process *parent, struct process *child)
612 {
613 	struct plimit *limit;
614 
615 	mtx_enter(&parent->ps_mtx);
616 	limit = parent->ps_limit;
617 	atomic_inc_int(&limit->pl_refcnt);
618 	mtx_leave(&parent->ps_mtx);
619 
620 	child->ps_limit = limit;
621 
622 	if (limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY)
623 		timeout_add_msec(&child->ps_rucheck_to, RUCHECK_INTERVAL);
624 }
625 
626 /*
627  * Return an exclusive write reference to the process' resource limit structure.
628  * The caller has to release the structure by calling lim_write_commit().
629  *
630  * This invalidates any plimit read reference held by the calling thread.
631  */
632 struct plimit *
633 lim_write_begin(void)
634 {
635 	struct plimit *limit;
636 	struct proc *p = curproc;
637 
638 	rw_assert_wrlock(&rlimit_lock);
639 
640 	if (p->p_limit != NULL)
641 		lim_free(p->p_limit);
642 	p->p_limit = NULL;
643 
644 	/*
645 	 * It is safe to access ps_limit here without holding ps_mtx
646 	 * because rlimit_lock excludes other writers.
647 	 */
648 
649 	limit = p->p_p->ps_limit;
650 	if (P_HASSIBLING(p) || limit->pl_refcnt > 1)
651 		limit = lim_copy(limit);
652 
653 	return (limit);
654 }
655 
656 /*
657  * Finish exclusive write access to the plimit structure.
658  * This makes the structure visible to other threads in the process.
659  */
660 void
661 lim_write_commit(struct plimit *limit)
662 {
663 	struct plimit *olimit;
664 	struct proc *p = curproc;
665 
666 	rw_assert_wrlock(&rlimit_lock);
667 
668 	if (limit != p->p_p->ps_limit) {
669 		mtx_enter(&p->p_p->ps_mtx);
670 		olimit = p->p_p->ps_limit;
671 		p->p_p->ps_limit = limit;
672 		mtx_leave(&p->p_p->ps_mtx);
673 
674 		lim_free(olimit);
675 	}
676 }
677 
678 /*
679  * Begin read access to the process' resource limit structure.
680  * The access has to be finished by calling lim_read_leave().
681  *
682  * Sections denoted by lim_read_enter() and lim_read_leave() cannot nest.
683  */
684 struct plimit *
685 lim_read_enter(void)
686 {
687 	struct plimit *limit;
688 	struct proc *p = curproc;
689 	struct process *pr = p->p_p;
690 
691 	/*
692 	 * This thread might not observe the latest value of ps_limit
693 	 * if another thread updated the limits very recently on another CPU.
694 	 * However, the anomaly should disappear quickly, especially if
695 	 * there is any synchronization activity between the threads (or
696 	 * the CPUs).
697 	 */
698 
699 	limit = p->p_limit;
700 	if (limit != pr->ps_limit) {
701 		mtx_enter(&pr->ps_mtx);
702 		limit = pr->ps_limit;
703 		atomic_inc_int(&limit->pl_refcnt);
704 		mtx_leave(&pr->ps_mtx);
705 		if (p->p_limit != NULL)
706 			lim_free(p->p_limit);
707 		p->p_limit = limit;
708 	}
709 	KASSERT(limit != NULL);
710 	return (limit);
711 }
712 
713 /*
714  * Get the value of the resource limit in given process.
715  */
716 rlim_t
717 lim_cur_proc(struct proc *p, int which)
718 {
719 	struct process *pr = p->p_p;
720 	rlim_t val;
721 
722 	KASSERT(which >= 0 && which < RLIM_NLIMITS);
723 
724 	mtx_enter(&pr->ps_mtx);
725 	val = pr->ps_limit->pl_rlimit[which].rlim_cur;
726 	mtx_leave(&pr->ps_mtx);
727 	return (val);
728 }
729