xref: /openbsd-src/sys/kern/kern_resource.c (revision 78fec973f57e9fc9edd564490c79661460ad807b)
1 /*	$OpenBSD: kern_resource.c,v 1.74 2022/05/28 03:47:43 deraadt Exp $	*/
2 /*	$NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file.h>
44 #include <sys/resourcevar.h>
45 #include <sys/pool.h>
46 #include <sys/proc.h>
47 #include <sys/ktrace.h>
48 #include <sys/sched.h>
49 #include <sys/signalvar.h>
50 
51 #include <sys/mount.h>
52 #include <sys/syscallargs.h>
53 
54 #include <uvm/uvm_extern.h>
55 
56 /* Resource usage check interval in msec */
57 #define RUCHECK_INTERVAL	1000
58 
59 /* SIGXCPU interval in seconds of process runtime */
60 #define SIGXCPU_INTERVAL	5
61 
62 struct plimit	*lim_copy(struct plimit *);
63 struct plimit	*lim_write_begin(void);
64 void		 lim_write_commit(struct plimit *);
65 
66 void	tuagg_sub(struct tusage *, struct proc *);
67 
68 /*
69  * Patchable maximum data and stack limits.
70  */
71 rlim_t maxdmap = MAXDSIZ;
72 rlim_t maxsmap = MAXSSIZ;
73 
74 /*
75  * Serializes resource limit updates.
76  * This lock has to be held together with ps_mtx when updating
77  * the process' ps_limit.
78  */
79 struct rwlock rlimit_lock = RWLOCK_INITIALIZER("rlimitlk");
80 
81 /*
82  * Resource controls and accounting.
83  */
84 
85 int
86 sys_getpriority(struct proc *curp, void *v, register_t *retval)
87 {
88 	struct sys_getpriority_args /* {
89 		syscallarg(int) which;
90 		syscallarg(id_t) who;
91 	} */ *uap = v;
92 	struct process *pr;
93 	int low = NZERO + PRIO_MAX + 1;
94 
95 	switch (SCARG(uap, which)) {
96 
97 	case PRIO_PROCESS:
98 		if (SCARG(uap, who) == 0)
99 			pr = curp->p_p;
100 		else
101 			pr = prfind(SCARG(uap, who));
102 		if (pr == NULL)
103 			break;
104 		if (pr->ps_nice < low)
105 			low = pr->ps_nice;
106 		break;
107 
108 	case PRIO_PGRP: {
109 		struct pgrp *pg;
110 
111 		if (SCARG(uap, who) == 0)
112 			pg = curp->p_p->ps_pgrp;
113 		else if ((pg = pgfind(SCARG(uap, who))) == NULL)
114 			break;
115 		LIST_FOREACH(pr, &pg->pg_members, ps_pglist)
116 			if (pr->ps_nice < low)
117 				low = pr->ps_nice;
118 		break;
119 	}
120 
121 	case PRIO_USER:
122 		if (SCARG(uap, who) == 0)
123 			SCARG(uap, who) = curp->p_ucred->cr_uid;
124 		LIST_FOREACH(pr, &allprocess, ps_list)
125 			if (pr->ps_ucred->cr_uid == SCARG(uap, who) &&
126 			    pr->ps_nice < low)
127 				low = pr->ps_nice;
128 		break;
129 
130 	default:
131 		return (EINVAL);
132 	}
133 	if (low == NZERO + PRIO_MAX + 1)
134 		return (ESRCH);
135 	*retval = low - NZERO;
136 	return (0);
137 }
138 
139 int
140 sys_setpriority(struct proc *curp, void *v, register_t *retval)
141 {
142 	struct sys_setpriority_args /* {
143 		syscallarg(int) which;
144 		syscallarg(id_t) who;
145 		syscallarg(int) prio;
146 	} */ *uap = v;
147 	struct process *pr;
148 	int found = 0, error = 0;
149 
150 	switch (SCARG(uap, which)) {
151 
152 	case PRIO_PROCESS:
153 		if (SCARG(uap, who) == 0)
154 			pr = curp->p_p;
155 		else
156 			pr = prfind(SCARG(uap, who));
157 		if (pr == NULL)
158 			break;
159 		error = donice(curp, pr, SCARG(uap, prio));
160 		found = 1;
161 		break;
162 
163 	case PRIO_PGRP: {
164 		struct pgrp *pg;
165 
166 		if (SCARG(uap, who) == 0)
167 			pg = curp->p_p->ps_pgrp;
168 		else if ((pg = pgfind(SCARG(uap, who))) == NULL)
169 			break;
170 		LIST_FOREACH(pr, &pg->pg_members, ps_pglist) {
171 			error = donice(curp, pr, SCARG(uap, prio));
172 			found = 1;
173 		}
174 		break;
175 	}
176 
177 	case PRIO_USER:
178 		if (SCARG(uap, who) == 0)
179 			SCARG(uap, who) = curp->p_ucred->cr_uid;
180 		LIST_FOREACH(pr, &allprocess, ps_list)
181 			if (pr->ps_ucred->cr_uid == SCARG(uap, who)) {
182 				error = donice(curp, pr, SCARG(uap, prio));
183 				found = 1;
184 			}
185 		break;
186 
187 	default:
188 		return (EINVAL);
189 	}
190 	if (!found)
191 		return (ESRCH);
192 	return (error);
193 }
194 
195 int
196 donice(struct proc *curp, struct process *chgpr, int n)
197 {
198 	struct ucred *ucred = curp->p_ucred;
199 	struct proc *p;
200 	int s;
201 
202 	if (ucred->cr_uid != 0 && ucred->cr_ruid != 0 &&
203 	    ucred->cr_uid != chgpr->ps_ucred->cr_uid &&
204 	    ucred->cr_ruid != chgpr->ps_ucred->cr_uid)
205 		return (EPERM);
206 	if (n > PRIO_MAX)
207 		n = PRIO_MAX;
208 	if (n < PRIO_MIN)
209 		n = PRIO_MIN;
210 	n += NZERO;
211 	if (n < chgpr->ps_nice && suser(curp))
212 		return (EACCES);
213 	chgpr->ps_nice = n;
214 	SCHED_LOCK(s);
215 	TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) {
216 		setpriority(p, p->p_estcpu, n);
217 	}
218 	SCHED_UNLOCK(s);
219 	return (0);
220 }
221 
222 int
223 sys_setrlimit(struct proc *p, void *v, register_t *retval)
224 {
225 	struct sys_setrlimit_args /* {
226 		syscallarg(int) which;
227 		syscallarg(const struct rlimit *) rlp;
228 	} */ *uap = v;
229 	struct rlimit alim;
230 	int error;
231 
232 	error = copyin((caddr_t)SCARG(uap, rlp), (caddr_t)&alim,
233 		       sizeof (struct rlimit));
234 	if (error)
235 		return (error);
236 #ifdef KTRACE
237 	if (KTRPOINT(p, KTR_STRUCT))
238 		ktrrlimit(p, &alim);
239 #endif
240 	return (dosetrlimit(p, SCARG(uap, which), &alim));
241 }
242 
243 int
244 dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
245 {
246 	struct rlimit *alimp;
247 	struct plimit *limit;
248 	rlim_t maxlim;
249 	int error;
250 
251 	if (which >= RLIM_NLIMITS || limp->rlim_cur > limp->rlim_max)
252 		return (EINVAL);
253 
254 	rw_enter_write(&rlimit_lock);
255 
256 	alimp = &p->p_p->ps_limit->pl_rlimit[which];
257 	if (limp->rlim_max > alimp->rlim_max) {
258 		if ((error = suser(p)) != 0) {
259 			rw_exit_write(&rlimit_lock);
260 			return (error);
261 		}
262 	}
263 
264 	/* Get exclusive write access to the limit structure. */
265 	limit = lim_write_begin();
266 	alimp = &limit->pl_rlimit[which];
267 
268 	switch (which) {
269 	case RLIMIT_DATA:
270 		maxlim = maxdmap;
271 		break;
272 	case RLIMIT_STACK:
273 		maxlim = maxsmap;
274 		break;
275 	case RLIMIT_NOFILE:
276 		maxlim = maxfiles;
277 		break;
278 	case RLIMIT_NPROC:
279 		maxlim = maxprocess;
280 		break;
281 	default:
282 		maxlim = RLIM_INFINITY;
283 		break;
284 	}
285 
286 	if (limp->rlim_max > maxlim)
287 		limp->rlim_max = maxlim;
288 	if (limp->rlim_cur > limp->rlim_max)
289 		limp->rlim_cur = limp->rlim_max;
290 
291 	if (which == RLIMIT_CPU && limp->rlim_cur != RLIM_INFINITY &&
292 	    alimp->rlim_cur == RLIM_INFINITY)
293 		timeout_add_msec(&p->p_p->ps_rucheck_to, RUCHECK_INTERVAL);
294 
295 	if (which == RLIMIT_STACK) {
296 		/*
297 		 * Stack is allocated to the max at exec time with only
298 		 * "rlim_cur" bytes accessible.  If stack limit is going
299 		 * up make more accessible, if going down make inaccessible.
300 		 */
301 		if (limp->rlim_cur != alimp->rlim_cur) {
302 			vaddr_t addr;
303 			vsize_t size;
304 			vm_prot_t prot;
305 			struct vmspace *vm = p->p_vmspace;
306 
307 			if (limp->rlim_cur > alimp->rlim_cur) {
308 				prot = PROT_READ | PROT_WRITE;
309 				size = limp->rlim_cur - alimp->rlim_cur;
310 #ifdef MACHINE_STACK_GROWS_UP
311 				addr = (vaddr_t)vm->vm_maxsaddr +
312 				    alimp->rlim_cur;
313 #else
314 				addr = (vaddr_t)vm->vm_minsaddr -
315 				    limp->rlim_cur;
316 #endif
317 			} else {
318 				prot = PROT_NONE;
319 				size = alimp->rlim_cur - limp->rlim_cur;
320 #ifdef MACHINE_STACK_GROWS_UP
321 				addr = (vaddr_t)vm->vm_maxsaddr +
322 				    limp->rlim_cur;
323 #else
324 				addr = (vaddr_t)vm->vm_minsaddr -
325 				    alimp->rlim_cur;
326 #endif
327 			}
328 			addr = trunc_page(addr);
329 			size = round_page(size);
330 			KERNEL_LOCK();
331 			(void) uvm_map_protect(&vm->vm_map,
332 					      addr, addr+size, prot, FALSE);
333 			KERNEL_UNLOCK();
334 		}
335 	}
336 
337 	*alimp = *limp;
338 
339 	lim_write_commit(limit);
340 	rw_exit_write(&rlimit_lock);
341 
342 	return (0);
343 }
344 
345 int
346 sys_getrlimit(struct proc *p, void *v, register_t *retval)
347 {
348 	struct sys_getrlimit_args /* {
349 		syscallarg(int) which;
350 		syscallarg(struct rlimit *) rlp;
351 	} */ *uap = v;
352 	struct plimit *limit;
353 	struct rlimit alimp;
354 	int error;
355 
356 	if (SCARG(uap, which) < 0 || SCARG(uap, which) >= RLIM_NLIMITS)
357 		return (EINVAL);
358 	limit = lim_read_enter();
359 	alimp = limit->pl_rlimit[SCARG(uap, which)];
360 	lim_read_leave(limit);
361 	error = copyout(&alimp, SCARG(uap, rlp), sizeof(struct rlimit));
362 #ifdef KTRACE
363 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
364 		ktrrlimit(p, &alimp);
365 #endif
366 	return (error);
367 }
368 
369 void
370 tuagg_sub(struct tusage *tup, struct proc *p)
371 {
372 	timespecadd(&tup->tu_runtime, &p->p_rtime, &tup->tu_runtime);
373 	tup->tu_uticks += p->p_uticks;
374 	tup->tu_sticks += p->p_sticks;
375 	tup->tu_iticks += p->p_iticks;
376 }
377 
378 /*
379  * Aggregate a single thread's immediate time counts into the running
380  * totals for the thread and process
381  */
382 void
383 tuagg_unlocked(struct process *pr, struct proc *p)
384 {
385 	tuagg_sub(&pr->ps_tu, p);
386 	tuagg_sub(&p->p_tu, p);
387 	timespecclear(&p->p_rtime);
388 	p->p_uticks = 0;
389 	p->p_sticks = 0;
390 	p->p_iticks = 0;
391 }
392 
393 void
394 tuagg(struct process *pr, struct proc *p)
395 {
396 	int s;
397 
398 	SCHED_LOCK(s);
399 	tuagg_unlocked(pr, p);
400 	SCHED_UNLOCK(s);
401 }
402 
403 /*
404  * Transform the running time and tick information in a struct tusage
405  * into user, system, and interrupt time usage.
406  */
407 void
408 calctsru(struct tusage *tup, struct timespec *up, struct timespec *sp,
409     struct timespec *ip)
410 {
411 	u_quad_t st, ut, it;
412 	int freq;
413 
414 	st = tup->tu_sticks;
415 	ut = tup->tu_uticks;
416 	it = tup->tu_iticks;
417 
418 	if (st + ut + it == 0) {
419 		timespecclear(up);
420 		timespecclear(sp);
421 		if (ip != NULL)
422 			timespecclear(ip);
423 		return;
424 	}
425 
426 	freq = stathz ? stathz : hz;
427 
428 	st = st * 1000000000 / freq;
429 	sp->tv_sec = st / 1000000000;
430 	sp->tv_nsec = st % 1000000000;
431 	ut = ut * 1000000000 / freq;
432 	up->tv_sec = ut / 1000000000;
433 	up->tv_nsec = ut % 1000000000;
434 	if (ip != NULL) {
435 		it = it * 1000000000 / freq;
436 		ip->tv_sec = it / 1000000000;
437 		ip->tv_nsec = it % 1000000000;
438 	}
439 }
440 
441 void
442 calcru(struct tusage *tup, struct timeval *up, struct timeval *sp,
443     struct timeval *ip)
444 {
445 	struct timespec u, s, i;
446 
447 	calctsru(tup, &u, &s, ip != NULL ? &i : NULL);
448 	TIMESPEC_TO_TIMEVAL(up, &u);
449 	TIMESPEC_TO_TIMEVAL(sp, &s);
450 	if (ip != NULL)
451 		TIMESPEC_TO_TIMEVAL(ip, &i);
452 }
453 
454 int
455 sys_getrusage(struct proc *p, void *v, register_t *retval)
456 {
457 	struct sys_getrusage_args /* {
458 		syscallarg(int) who;
459 		syscallarg(struct rusage *) rusage;
460 	} */ *uap = v;
461 	struct rusage ru;
462 	int error;
463 
464 	error = dogetrusage(p, SCARG(uap, who), &ru);
465 	if (error == 0) {
466 		error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
467 #ifdef KTRACE
468 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
469 			ktrrusage(p, &ru);
470 #endif
471 	}
472 	return (error);
473 }
474 
475 int
476 dogetrusage(struct proc *p, int who, struct rusage *rup)
477 {
478 	struct process *pr = p->p_p;
479 	struct proc *q;
480 
481 	switch (who) {
482 
483 	case RUSAGE_SELF:
484 		/* start with the sum of dead threads, if any */
485 		if (pr->ps_ru != NULL)
486 			*rup = *pr->ps_ru;
487 		else
488 			memset(rup, 0, sizeof(*rup));
489 
490 		/* add on all living threads */
491 		TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
492 			ruadd(rup, &q->p_ru);
493 			tuagg(pr, q);
494 		}
495 
496 		calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
497 		break;
498 
499 	case RUSAGE_THREAD:
500 		*rup = p->p_ru;
501 		calcru(&p->p_tu, &rup->ru_utime, &rup->ru_stime, NULL);
502 		break;
503 
504 	case RUSAGE_CHILDREN:
505 		*rup = pr->ps_cru;
506 		break;
507 
508 	default:
509 		return (EINVAL);
510 	}
511 	return (0);
512 }
513 
514 void
515 ruadd(struct rusage *ru, struct rusage *ru2)
516 {
517 	long *ip, *ip2;
518 	int i;
519 
520 	timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
521 	timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
522 	if (ru->ru_maxrss < ru2->ru_maxrss)
523 		ru->ru_maxrss = ru2->ru_maxrss;
524 	ip = &ru->ru_first; ip2 = &ru2->ru_first;
525 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
526 		*ip++ += *ip2++;
527 }
528 
529 /*
530  * Check if the process exceeds its cpu resource allocation.
531  * If over max, kill it.
532  */
533 void
534 rucheck(void *arg)
535 {
536 	struct rlimit rlim;
537 	struct process *pr = arg;
538 	time_t runtime;
539 	int s;
540 
541 	KERNEL_ASSERT_LOCKED();
542 
543 	SCHED_LOCK(s);
544 	runtime = pr->ps_tu.tu_runtime.tv_sec;
545 	SCHED_UNLOCK(s);
546 
547 	mtx_enter(&pr->ps_mtx);
548 	rlim = pr->ps_limit->pl_rlimit[RLIMIT_CPU];
549 	mtx_leave(&pr->ps_mtx);
550 
551 	if ((rlim_t)runtime >= rlim.rlim_cur) {
552 		if ((rlim_t)runtime >= rlim.rlim_max) {
553 			prsignal(pr, SIGKILL);
554 		} else if (runtime >= pr->ps_nextxcpu) {
555 			prsignal(pr, SIGXCPU);
556 			pr->ps_nextxcpu = runtime + SIGXCPU_INTERVAL;
557 		}
558 	}
559 
560 	timeout_add_msec(&pr->ps_rucheck_to, RUCHECK_INTERVAL);
561 }
562 
563 struct pool plimit_pool;
564 
565 void
566 lim_startup(struct plimit *limit0)
567 {
568 	rlim_t lim;
569 	int i;
570 
571 	pool_init(&plimit_pool, sizeof(struct plimit), 0, IPL_MPFLOOR,
572 	    PR_WAITOK, "plimitpl", NULL);
573 
574 	for (i = 0; i < nitems(limit0->pl_rlimit); i++)
575 		limit0->pl_rlimit[i].rlim_cur =
576 		    limit0->pl_rlimit[i].rlim_max = RLIM_INFINITY;
577 	limit0->pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
578 	limit0->pl_rlimit[RLIMIT_NOFILE].rlim_max = MIN(NOFILE_MAX,
579 	    (maxfiles - NOFILE > NOFILE) ? maxfiles - NOFILE : NOFILE);
580 	limit0->pl_rlimit[RLIMIT_NPROC].rlim_cur = MAXUPRC;
581 	lim = ptoa(uvmexp.free);
582 	limit0->pl_rlimit[RLIMIT_RSS].rlim_max = lim;
583 	lim = ptoa(64*1024);		/* Default to very low */
584 	limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim;
585 	limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
586 	refcnt_init(&limit0->pl_refcnt);
587 }
588 
589 /*
590  * Make a copy of the plimit structure.
591  * We share these structures copy-on-write after fork,
592  * and copy when a limit is changed.
593  */
594 struct plimit *
595 lim_copy(struct plimit *lim)
596 {
597 	struct plimit *newlim;
598 
599 	newlim = pool_get(&plimit_pool, PR_WAITOK);
600 	memcpy(newlim->pl_rlimit, lim->pl_rlimit,
601 	    sizeof(struct rlimit) * RLIM_NLIMITS);
602 	refcnt_init(&newlim->pl_refcnt);
603 	return (newlim);
604 }
605 
606 void
607 lim_free(struct plimit *lim)
608 {
609 	if (refcnt_rele(&lim->pl_refcnt) == 0)
610 		return;
611 	pool_put(&plimit_pool, lim);
612 }
613 
614 void
615 lim_fork(struct process *parent, struct process *child)
616 {
617 	struct plimit *limit;
618 
619 	mtx_enter(&parent->ps_mtx);
620 	limit = parent->ps_limit;
621 	refcnt_take(&limit->pl_refcnt);
622 	mtx_leave(&parent->ps_mtx);
623 
624 	child->ps_limit = limit;
625 
626 	if (limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY)
627 		timeout_add_msec(&child->ps_rucheck_to, RUCHECK_INTERVAL);
628 }
629 
630 /*
631  * Return an exclusive write reference to the process' resource limit structure.
632  * The caller has to release the structure by calling lim_write_commit().
633  *
634  * This invalidates any plimit read reference held by the calling thread.
635  */
636 struct plimit *
637 lim_write_begin(void)
638 {
639 	struct plimit *limit;
640 	struct proc *p = curproc;
641 
642 	rw_assert_wrlock(&rlimit_lock);
643 
644 	if (p->p_limit != NULL)
645 		lim_free(p->p_limit);
646 	p->p_limit = NULL;
647 
648 	/*
649 	 * It is safe to access ps_limit here without holding ps_mtx
650 	 * because rlimit_lock excludes other writers.
651 	 */
652 
653 	limit = p->p_p->ps_limit;
654 	if (P_HASSIBLING(p) || refcnt_shared(&limit->pl_refcnt))
655 		limit = lim_copy(limit);
656 
657 	return (limit);
658 }
659 
660 /*
661  * Finish exclusive write access to the plimit structure.
662  * This makes the structure visible to other threads in the process.
663  */
664 void
665 lim_write_commit(struct plimit *limit)
666 {
667 	struct plimit *olimit;
668 	struct proc *p = curproc;
669 
670 	rw_assert_wrlock(&rlimit_lock);
671 
672 	if (limit != p->p_p->ps_limit) {
673 		mtx_enter(&p->p_p->ps_mtx);
674 		olimit = p->p_p->ps_limit;
675 		p->p_p->ps_limit = limit;
676 		mtx_leave(&p->p_p->ps_mtx);
677 
678 		lim_free(olimit);
679 	}
680 }
681 
682 /*
683  * Begin read access to the process' resource limit structure.
684  * The access has to be finished by calling lim_read_leave().
685  *
686  * Sections denoted by lim_read_enter() and lim_read_leave() cannot nest.
687  */
688 struct plimit *
689 lim_read_enter(void)
690 {
691 	struct plimit *limit;
692 	struct proc *p = curproc;
693 	struct process *pr = p->p_p;
694 
695 	/*
696 	 * This thread might not observe the latest value of ps_limit
697 	 * if another thread updated the limits very recently on another CPU.
698 	 * However, the anomaly should disappear quickly, especially if
699 	 * there is any synchronization activity between the threads (or
700 	 * the CPUs).
701 	 */
702 
703 	limit = p->p_limit;
704 	if (limit != pr->ps_limit) {
705 		mtx_enter(&pr->ps_mtx);
706 		limit = pr->ps_limit;
707 		refcnt_take(&limit->pl_refcnt);
708 		mtx_leave(&pr->ps_mtx);
709 		if (p->p_limit != NULL)
710 			lim_free(p->p_limit);
711 		p->p_limit = limit;
712 	}
713 	KASSERT(limit != NULL);
714 	return (limit);
715 }
716 
717 /*
718  * Get the value of the resource limit in given process.
719  */
720 rlim_t
721 lim_cur_proc(struct proc *p, int which)
722 {
723 	struct process *pr = p->p_p;
724 	rlim_t val;
725 
726 	KASSERT(which >= 0 && which < RLIM_NLIMITS);
727 
728 	mtx_enter(&pr->ps_mtx);
729 	val = pr->ps_limit->pl_rlimit[which].rlim_cur;
730 	mtx_leave(&pr->ps_mtx);
731 	return (val);
732 }
733