xref: /openbsd-src/sys/kern/kern_resource.c (revision 5699f997603f91289e6ed86fd4bdbb59e4513ec3)
1 /*	$OpenBSD: kern_resource.c,v 1.66 2019/06/24 12:49:03 visa Exp $	*/
2 /*	$NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file.h>
44 #include <sys/resourcevar.h>
45 #include <sys/pool.h>
46 #include <sys/proc.h>
47 #include <sys/ktrace.h>
48 #include <sys/sched.h>
49 #include <sys/signalvar.h>
50 
51 #include <sys/mount.h>
52 #include <sys/syscallargs.h>
53 
54 #include <uvm/uvm_extern.h>
55 
56 /* Resource usage check interval in msec */
57 #define RUCHECK_INTERVAL	1000
58 
59 /* SIGXCPU interval in seconds of process runtime */
60 #define SIGXCPU_INTERVAL	5
61 
62 struct plimit	*lim_copy(struct plimit *);
63 struct plimit	*lim_write_begin(void);
64 void		 lim_write_commit(struct plimit *);
65 
66 void	tuagg_sub(struct tusage *, struct proc *);
67 
68 /*
69  * Patchable maximum data and stack limits.
70  */
71 rlim_t maxdmap = MAXDSIZ;
72 rlim_t maxsmap = MAXSSIZ;
73 
74 /*
75  * Serializes resource limit updates.
76  * This lock has to be held together with ps_mtx when updating
77  * the process' ps_limit.
78  */
79 struct rwlock rlimit_lock = RWLOCK_INITIALIZER("rlimitlk");
80 
81 /*
82  * Resource controls and accounting.
83  */
84 
85 int
86 sys_getpriority(struct proc *curp, void *v, register_t *retval)
87 {
88 	struct sys_getpriority_args /* {
89 		syscallarg(int) which;
90 		syscallarg(id_t) who;
91 	} */ *uap = v;
92 	struct process *pr;
93 	int low = NZERO + PRIO_MAX + 1;
94 
95 	switch (SCARG(uap, which)) {
96 
97 	case PRIO_PROCESS:
98 		if (SCARG(uap, who) == 0)
99 			pr = curp->p_p;
100 		else
101 			pr = prfind(SCARG(uap, who));
102 		if (pr == NULL)
103 			break;
104 		if (pr->ps_nice < low)
105 			low = pr->ps_nice;
106 		break;
107 
108 	case PRIO_PGRP: {
109 		struct pgrp *pg;
110 
111 		if (SCARG(uap, who) == 0)
112 			pg = curp->p_p->ps_pgrp;
113 		else if ((pg = pgfind(SCARG(uap, who))) == NULL)
114 			break;
115 		LIST_FOREACH(pr, &pg->pg_members, ps_pglist)
116 			if (pr->ps_nice < low)
117 				low = pr->ps_nice;
118 		break;
119 	}
120 
121 	case PRIO_USER:
122 		if (SCARG(uap, who) == 0)
123 			SCARG(uap, who) = curp->p_ucred->cr_uid;
124 		LIST_FOREACH(pr, &allprocess, ps_list)
125 			if (pr->ps_ucred->cr_uid == SCARG(uap, who) &&
126 			    pr->ps_nice < low)
127 				low = pr->ps_nice;
128 		break;
129 
130 	default:
131 		return (EINVAL);
132 	}
133 	if (low == NZERO + PRIO_MAX + 1)
134 		return (ESRCH);
135 	*retval = low - NZERO;
136 	return (0);
137 }
138 
139 int
140 sys_setpriority(struct proc *curp, void *v, register_t *retval)
141 {
142 	struct sys_setpriority_args /* {
143 		syscallarg(int) which;
144 		syscallarg(id_t) who;
145 		syscallarg(int) prio;
146 	} */ *uap = v;
147 	struct process *pr;
148 	int found = 0, error = 0;
149 
150 	switch (SCARG(uap, which)) {
151 
152 	case PRIO_PROCESS:
153 		if (SCARG(uap, who) == 0)
154 			pr = curp->p_p;
155 		else
156 			pr = prfind(SCARG(uap, who));
157 		if (pr == NULL)
158 			break;
159 		error = donice(curp, pr, SCARG(uap, prio));
160 		found++;
161 		break;
162 
163 	case PRIO_PGRP: {
164 		struct pgrp *pg;
165 
166 		if (SCARG(uap, who) == 0)
167 			pg = curp->p_p->ps_pgrp;
168 		else if ((pg = pgfind(SCARG(uap, who))) == NULL)
169 			break;
170 		LIST_FOREACH(pr, &pg->pg_members, ps_pglist) {
171 			error = donice(curp, pr, SCARG(uap, prio));
172 			found++;
173 		}
174 		break;
175 	}
176 
177 	case PRIO_USER:
178 		if (SCARG(uap, who) == 0)
179 			SCARG(uap, who) = curp->p_ucred->cr_uid;
180 		LIST_FOREACH(pr, &allprocess, ps_list)
181 			if (pr->ps_ucred->cr_uid == SCARG(uap, who)) {
182 				error = donice(curp, pr, SCARG(uap, prio));
183 				found++;
184 			}
185 		break;
186 
187 	default:
188 		return (EINVAL);
189 	}
190 	if (found == 0)
191 		return (ESRCH);
192 	return (error);
193 }
194 
195 int
196 donice(struct proc *curp, struct process *chgpr, int n)
197 {
198 	struct ucred *ucred = curp->p_ucred;
199 	struct proc *p;
200 	int s;
201 
202 	if (ucred->cr_uid != 0 && ucred->cr_ruid != 0 &&
203 	    ucred->cr_uid != chgpr->ps_ucred->cr_uid &&
204 	    ucred->cr_ruid != chgpr->ps_ucred->cr_uid)
205 		return (EPERM);
206 	if (n > PRIO_MAX)
207 		n = PRIO_MAX;
208 	if (n < PRIO_MIN)
209 		n = PRIO_MIN;
210 	n += NZERO;
211 	if (n < chgpr->ps_nice && suser(curp))
212 		return (EACCES);
213 	chgpr->ps_nice = n;
214 	SCHED_LOCK(s);
215 	TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link)
216 		(void)resetpriority(p);
217 	SCHED_UNLOCK(s);
218 	return (0);
219 }
220 
221 int
222 sys_setrlimit(struct proc *p, void *v, register_t *retval)
223 {
224 	struct sys_setrlimit_args /* {
225 		syscallarg(int) which;
226 		syscallarg(const struct rlimit *) rlp;
227 	} */ *uap = v;
228 	struct rlimit alim;
229 	int error;
230 
231 	error = copyin((caddr_t)SCARG(uap, rlp), (caddr_t)&alim,
232 		       sizeof (struct rlimit));
233 	if (error)
234 		return (error);
235 #ifdef KTRACE
236 	if (KTRPOINT(p, KTR_STRUCT))
237 		ktrrlimit(p, &alim);
238 #endif
239 	return (dosetrlimit(p, SCARG(uap, which), &alim));
240 }
241 
242 int
243 dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
244 {
245 	struct rlimit *alimp;
246 	struct plimit *limit;
247 	rlim_t maxlim;
248 	int error;
249 
250 	if (which >= RLIM_NLIMITS || limp->rlim_cur > limp->rlim_max)
251 		return (EINVAL);
252 
253 	rw_enter_write(&rlimit_lock);
254 
255 	alimp = &p->p_p->ps_limit->pl_rlimit[which];
256 	if (limp->rlim_max > alimp->rlim_max) {
257 		if ((error = suser(p)) != 0) {
258 			rw_exit_write(&rlimit_lock);
259 			return (error);
260 		}
261 	}
262 
263 	/* Get exclusive write access to the limit structure. */
264 	limit = lim_write_begin();
265 	alimp = &limit->pl_rlimit[which];
266 
267 	switch (which) {
268 	case RLIMIT_DATA:
269 		maxlim = maxdmap;
270 		break;
271 	case RLIMIT_STACK:
272 		maxlim = maxsmap;
273 		break;
274 	case RLIMIT_NOFILE:
275 		maxlim = maxfiles;
276 		break;
277 	case RLIMIT_NPROC:
278 		maxlim = maxprocess;
279 		break;
280 	default:
281 		maxlim = RLIM_INFINITY;
282 		break;
283 	}
284 
285 	if (limp->rlim_max > maxlim)
286 		limp->rlim_max = maxlim;
287 	if (limp->rlim_cur > limp->rlim_max)
288 		limp->rlim_cur = limp->rlim_max;
289 
290 	if (which == RLIMIT_CPU && limp->rlim_cur != RLIM_INFINITY &&
291 	    alimp->rlim_cur == RLIM_INFINITY)
292 		timeout_add_msec(&p->p_p->ps_rucheck_to, RUCHECK_INTERVAL);
293 
294 	if (which == RLIMIT_STACK) {
295 		/*
296 		 * Stack is allocated to the max at exec time with only
297 		 * "rlim_cur" bytes accessible.  If stack limit is going
298 		 * up make more accessible, if going down make inaccessible.
299 		 */
300 		if (limp->rlim_cur != alimp->rlim_cur) {
301 			vaddr_t addr;
302 			vsize_t size;
303 			vm_prot_t prot;
304 			struct vmspace *vm = p->p_vmspace;
305 
306 			if (limp->rlim_cur > alimp->rlim_cur) {
307 				prot = PROT_READ | PROT_WRITE;
308 				size = limp->rlim_cur - alimp->rlim_cur;
309 #ifdef MACHINE_STACK_GROWS_UP
310 				addr = (vaddr_t)vm->vm_maxsaddr +
311 				    alimp->rlim_cur;
312 #else
313 				addr = (vaddr_t)vm->vm_minsaddr -
314 				    limp->rlim_cur;
315 #endif
316 			} else {
317 				prot = PROT_NONE;
318 				size = alimp->rlim_cur - limp->rlim_cur;
319 #ifdef MACHINE_STACK_GROWS_UP
320 				addr = (vaddr_t)vm->vm_maxsaddr +
321 				    limp->rlim_cur;
322 #else
323 				addr = (vaddr_t)vm->vm_minsaddr -
324 				    alimp->rlim_cur;
325 #endif
326 			}
327 			addr = trunc_page(addr);
328 			size = round_page(size);
329 			KERNEL_LOCK();
330 			(void) uvm_map_protect(&vm->vm_map,
331 					      addr, addr+size, prot, FALSE);
332 			KERNEL_UNLOCK();
333 		}
334 	}
335 
336 	*alimp = *limp;
337 
338 	lim_write_commit(limit);
339 	rw_exit_write(&rlimit_lock);
340 
341 	return (0);
342 }
343 
344 int
345 sys_getrlimit(struct proc *p, void *v, register_t *retval)
346 {
347 	struct sys_getrlimit_args /* {
348 		syscallarg(int) which;
349 		syscallarg(struct rlimit *) rlp;
350 	} */ *uap = v;
351 	struct plimit *limit;
352 	struct rlimit alimp;
353 	int error;
354 
355 	if (SCARG(uap, which) < 0 || SCARG(uap, which) >= RLIM_NLIMITS)
356 		return (EINVAL);
357 	limit = lim_read_enter();
358 	alimp = limit->pl_rlimit[SCARG(uap, which)];
359 	lim_read_leave(limit);
360 	error = copyout(&alimp, SCARG(uap, rlp), sizeof(struct rlimit));
361 #ifdef KTRACE
362 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
363 		ktrrlimit(p, &alimp);
364 #endif
365 	return (error);
366 }
367 
368 void
369 tuagg_sub(struct tusage *tup, struct proc *p)
370 {
371 	timespecadd(&tup->tu_runtime, &p->p_rtime, &tup->tu_runtime);
372 	tup->tu_uticks += p->p_uticks;
373 	tup->tu_sticks += p->p_sticks;
374 	tup->tu_iticks += p->p_iticks;
375 }
376 
377 /*
378  * Aggregate a single thread's immediate time counts into the running
379  * totals for the thread and process
380  */
381 void
382 tuagg_unlocked(struct process *pr, struct proc *p)
383 {
384 	tuagg_sub(&pr->ps_tu, p);
385 	tuagg_sub(&p->p_tu, p);
386 	timespecclear(&p->p_rtime);
387 	p->p_uticks = 0;
388 	p->p_sticks = 0;
389 	p->p_iticks = 0;
390 }
391 
392 void
393 tuagg(struct process *pr, struct proc *p)
394 {
395 	int s;
396 
397 	SCHED_LOCK(s);
398 	tuagg_unlocked(pr, p);
399 	SCHED_UNLOCK(s);
400 }
401 
402 /*
403  * Transform the running time and tick information in a struct tusage
404  * into user, system, and interrupt time usage.
405  */
406 void
407 calctsru(struct tusage *tup, struct timespec *up, struct timespec *sp,
408     struct timespec *ip)
409 {
410 	u_quad_t st, ut, it;
411 	int freq;
412 
413 	st = tup->tu_sticks;
414 	ut = tup->tu_uticks;
415 	it = tup->tu_iticks;
416 
417 	if (st + ut + it == 0) {
418 		timespecclear(up);
419 		timespecclear(sp);
420 		if (ip != NULL)
421 			timespecclear(ip);
422 		return;
423 	}
424 
425 	freq = stathz ? stathz : hz;
426 
427 	st = st * 1000000000 / freq;
428 	sp->tv_sec = st / 1000000000;
429 	sp->tv_nsec = st % 1000000000;
430 	ut = ut * 1000000000 / freq;
431 	up->tv_sec = ut / 1000000000;
432 	up->tv_nsec = ut % 1000000000;
433 	if (ip != NULL) {
434 		it = it * 1000000000 / freq;
435 		ip->tv_sec = it / 1000000000;
436 		ip->tv_nsec = it % 1000000000;
437 	}
438 }
439 
440 void
441 calcru(struct tusage *tup, struct timeval *up, struct timeval *sp,
442     struct timeval *ip)
443 {
444 	struct timespec u, s, i;
445 
446 	calctsru(tup, &u, &s, ip != NULL ? &i : NULL);
447 	TIMESPEC_TO_TIMEVAL(up, &u);
448 	TIMESPEC_TO_TIMEVAL(sp, &s);
449 	if (ip != NULL)
450 		TIMESPEC_TO_TIMEVAL(ip, &i);
451 }
452 
453 int
454 sys_getrusage(struct proc *p, void *v, register_t *retval)
455 {
456 	struct sys_getrusage_args /* {
457 		syscallarg(int) who;
458 		syscallarg(struct rusage *) rusage;
459 	} */ *uap = v;
460 	struct rusage ru;
461 	int error;
462 
463 	error = dogetrusage(p, SCARG(uap, who), &ru);
464 	if (error == 0) {
465 		error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
466 #ifdef KTRACE
467 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
468 			ktrrusage(p, &ru);
469 #endif
470 	}
471 	return (error);
472 }
473 
474 int
475 dogetrusage(struct proc *p, int who, struct rusage *rup)
476 {
477 	struct process *pr = p->p_p;
478 	struct proc *q;
479 
480 	switch (who) {
481 
482 	case RUSAGE_SELF:
483 		/* start with the sum of dead threads, if any */
484 		if (pr->ps_ru != NULL)
485 			*rup = *pr->ps_ru;
486 		else
487 			memset(rup, 0, sizeof(*rup));
488 
489 		/* add on all living threads */
490 		TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
491 			ruadd(rup, &q->p_ru);
492 			tuagg(pr, q);
493 		}
494 
495 		calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
496 		break;
497 
498 	case RUSAGE_THREAD:
499 		*rup = p->p_ru;
500 		calcru(&p->p_tu, &rup->ru_utime, &rup->ru_stime, NULL);
501 		break;
502 
503 	case RUSAGE_CHILDREN:
504 		*rup = pr->ps_cru;
505 		break;
506 
507 	default:
508 		return (EINVAL);
509 	}
510 	return (0);
511 }
512 
513 void
514 ruadd(struct rusage *ru, struct rusage *ru2)
515 {
516 	long *ip, *ip2;
517 	int i;
518 
519 	timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
520 	timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
521 	if (ru->ru_maxrss < ru2->ru_maxrss)
522 		ru->ru_maxrss = ru2->ru_maxrss;
523 	ip = &ru->ru_first; ip2 = &ru2->ru_first;
524 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
525 		*ip++ += *ip2++;
526 }
527 
528 /*
529  * Check if the process exceeds its cpu resource allocation.
530  * If over max, kill it.
531  */
532 void
533 rucheck(void *arg)
534 {
535 	struct rlimit rlim;
536 	struct process *pr = arg;
537 	time_t runtime;
538 	int s;
539 
540 	KERNEL_ASSERT_LOCKED();
541 
542 	SCHED_LOCK(s);
543 	runtime = pr->ps_tu.tu_runtime.tv_sec;
544 	SCHED_UNLOCK(s);
545 
546 	mtx_enter(&pr->ps_mtx);
547 	rlim = pr->ps_limit->pl_rlimit[RLIMIT_CPU];
548 	mtx_leave(&pr->ps_mtx);
549 
550 	if ((rlim_t)runtime >= rlim.rlim_cur) {
551 		if ((rlim_t)runtime >= rlim.rlim_max) {
552 			prsignal(pr, SIGKILL);
553 		} else if (runtime >= pr->ps_nextxcpu) {
554 			prsignal(pr, SIGXCPU);
555 			pr->ps_nextxcpu = runtime + SIGXCPU_INTERVAL;
556 		}
557 	}
558 
559 	timeout_add_msec(&pr->ps_rucheck_to, RUCHECK_INTERVAL);
560 }
561 
562 struct pool plimit_pool;
563 
564 void
565 lim_startup(struct plimit *limit0)
566 {
567 	rlim_t lim;
568 	int i;
569 
570 	pool_init(&plimit_pool, sizeof(struct plimit), 0, IPL_MPFLOOR,
571 	    PR_WAITOK, "plimitpl", NULL);
572 
573 	for (i = 0; i < nitems(limit0->pl_rlimit); i++)
574 		limit0->pl_rlimit[i].rlim_cur =
575 		    limit0->pl_rlimit[i].rlim_max = RLIM_INFINITY;
576 	limit0->pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
577 	limit0->pl_rlimit[RLIMIT_NOFILE].rlim_max = MIN(NOFILE_MAX,
578 	    (maxfiles - NOFILE > NOFILE) ? maxfiles - NOFILE : NOFILE);
579 	limit0->pl_rlimit[RLIMIT_NPROC].rlim_cur = MAXUPRC;
580 	lim = ptoa(uvmexp.free);
581 	limit0->pl_rlimit[RLIMIT_RSS].rlim_max = lim;
582 	limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim;
583 	limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
584 	limit0->pl_refcnt = 1;
585 }
586 
587 /*
588  * Make a copy of the plimit structure.
589  * We share these structures copy-on-write after fork,
590  * and copy when a limit is changed.
591  */
592 struct plimit *
593 lim_copy(struct plimit *lim)
594 {
595 	struct plimit *newlim;
596 
597 	newlim = pool_get(&plimit_pool, PR_WAITOK);
598 	memcpy(newlim->pl_rlimit, lim->pl_rlimit,
599 	    sizeof(struct rlimit) * RLIM_NLIMITS);
600 	newlim->pl_refcnt = 1;
601 	return (newlim);
602 }
603 
604 void
605 lim_free(struct plimit *lim)
606 {
607 	if (atomic_dec_int_nv(&lim->pl_refcnt) > 0)
608 		return;
609 	pool_put(&plimit_pool, lim);
610 }
611 
612 void
613 lim_fork(struct process *parent, struct process *child)
614 {
615 	struct plimit *limit;
616 
617 	mtx_enter(&parent->ps_mtx);
618 	limit = parent->ps_limit;
619 	atomic_inc_int(&limit->pl_refcnt);
620 	mtx_leave(&parent->ps_mtx);
621 
622 	child->ps_limit = limit;
623 
624 	if (limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY)
625 		timeout_add_msec(&child->ps_rucheck_to, RUCHECK_INTERVAL);
626 }
627 
628 /*
629  * Return an exclusive write reference to the process' resource limit structure.
630  * The caller has to release the structure by calling lim_write_commit().
631  *
632  * This invalidates any plimit read reference held by the calling thread.
633  */
634 struct plimit *
635 lim_write_begin(void)
636 {
637 	struct plimit *limit;
638 	struct proc *p = curproc;
639 
640 	rw_assert_wrlock(&rlimit_lock);
641 
642 	if (p->p_limit != NULL)
643 		lim_free(p->p_limit);
644 	p->p_limit = NULL;
645 
646 	/*
647 	 * It is safe to access ps_limit here without holding ps_mtx
648 	 * because rlimit_lock excludes other writers.
649 	 */
650 
651 	limit = p->p_p->ps_limit;
652 	if (P_HASSIBLING(p) || limit->pl_refcnt > 1)
653 		limit = lim_copy(limit);
654 
655 	return (limit);
656 }
657 
658 /*
659  * Finish exclusive write access to the plimit structure.
660  * This makes the structure visible to other threads in the process.
661  */
662 void
663 lim_write_commit(struct plimit *limit)
664 {
665 	struct plimit *olimit;
666 	struct proc *p = curproc;
667 
668 	rw_assert_wrlock(&rlimit_lock);
669 
670 	if (limit != p->p_p->ps_limit) {
671 		mtx_enter(&p->p_p->ps_mtx);
672 		olimit = p->p_p->ps_limit;
673 		p->p_p->ps_limit = limit;
674 		mtx_leave(&p->p_p->ps_mtx);
675 
676 		lim_free(olimit);
677 	}
678 }
679 
680 /*
681  * Begin read access to the process' resource limit structure.
682  * The access has to be finished by calling lim_read_leave().
683  *
684  * Sections denoted by lim_read_enter() and lim_read_leave() cannot nest.
685  */
686 struct plimit *
687 lim_read_enter(void)
688 {
689 	struct plimit *limit;
690 	struct proc *p = curproc;
691 	struct process *pr = p->p_p;
692 
693 	/*
694 	 * This thread might not observe the latest value of ps_limit
695 	 * if another thread updated the limits very recently on another CPU.
696 	 * However, the anomaly should disappear quickly, especially if
697 	 * there is any synchronization activity between the threads (or
698 	 * the CPUs).
699 	 */
700 
701 	limit = p->p_limit;
702 	if (limit != pr->ps_limit) {
703 		mtx_enter(&pr->ps_mtx);
704 		limit = pr->ps_limit;
705 		atomic_inc_int(&limit->pl_refcnt);
706 		mtx_leave(&pr->ps_mtx);
707 		if (p->p_limit != NULL)
708 			lim_free(p->p_limit);
709 		p->p_limit = limit;
710 	}
711 	KASSERT(limit != NULL);
712 	return (limit);
713 }
714 
715 /*
716  * Get the value of the resource limit in given process.
717  */
718 rlim_t
719 lim_cur_proc(struct proc *p, int which)
720 {
721 	struct process *pr = p->p_p;
722 	rlim_t val;
723 
724 	KASSERT(which >= 0 && which < RLIM_NLIMITS);
725 
726 	mtx_enter(&pr->ps_mtx);
727 	val = pr->ps_limit->pl_rlimit[which].rlim_cur;
728 	mtx_leave(&pr->ps_mtx);
729 	return (val);
730 }
731