xref: /dflybsd-src/sys/kern/kern_resource.c (revision 1c4f2fa48567f20b32ef2963ca51c6d456c4b58b)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40  * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $
41  */
42 
43 #include "opt_compat.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/file.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/kernel.h>
51 #include <sys/resourcevar.h>
52 #include <sys/malloc.h>
53 #include <sys/proc.h>
54 #include <sys/priv.h>
55 #include <sys/time.h>
56 #include <sys/lockf.h>
57 
58 #include <vm/vm.h>
59 #include <vm/vm_param.h>
60 #include <sys/lock.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 
64 #include <sys/thread2.h>
65 #include <sys/spinlock2.h>
66 #include <sys/mplock2.h>
67 
68 static int donice (struct proc *chgp, int n);
69 
70 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
71 #define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
72 static struct spinlock uihash_lock;
73 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
74 static u_long uihash;		/* size of hash table - 1 */
75 
76 static struct uidinfo	*uicreate (uid_t uid);
77 static struct uidinfo	*uilookup (uid_t uid);
78 
79 /*
80  * Resource controls and accounting.
81  */
82 
83 struct getpriority_info {
84 	int low;
85 	int who;
86 };
87 
88 static int getpriority_callback(struct proc *p, void *data);
89 
90 /*
91  * MPALMOSTSAFE
92  */
93 int
94 sys_getpriority(struct getpriority_args *uap)
95 {
96 	struct getpriority_info info;
97 	struct proc *curp = curproc;
98 	struct proc *p;
99 	int low = PRIO_MAX + 1;
100 	int error;
101 
102 	get_mplock();
103 
104 	switch (uap->which) {
105 	case PRIO_PROCESS:
106 		if (uap->who == 0)
107 			p = curp;
108 		else
109 			p = pfind(uap->who);
110 		if (p == 0)
111 			break;
112 		if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
113 			break;
114 		low = p->p_nice;
115 		break;
116 
117 	case PRIO_PGRP:
118 	{
119 		struct pgrp *pg;
120 
121 		if (uap->who == 0)
122 			pg = curp->p_pgrp;
123 		else if ((pg = pgfind(uap->who)) == NULL)
124 			break;
125 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
126 			if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low))
127 				low = p->p_nice;
128 		}
129 		break;
130 	}
131 	case PRIO_USER:
132 		if (uap->who == 0)
133 			uap->who = curp->p_ucred->cr_uid;
134 		info.low = low;
135 		info.who = uap->who;
136 		allproc_scan(getpriority_callback, &info);
137 		low = info.low;
138 		break;
139 
140 	default:
141 		error = EINVAL;
142 		goto done;
143 	}
144 	if (low == PRIO_MAX + 1) {
145 		error = ESRCH;
146 		goto done;
147 	}
148 	uap->sysmsg_result = low;
149 	error = 0;
150 done:
151 	rel_mplock();
152 	return (error);
153 }
154 
155 /*
156  * Figure out the current lowest nice priority for processes owned
157  * by the specified user.
158  */
159 static
160 int
161 getpriority_callback(struct proc *p, void *data)
162 {
163 	struct getpriority_info *info = data;
164 
165 	if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
166 	    p->p_ucred->cr_uid == info->who &&
167 	    p->p_nice < info->low) {
168 		info->low = p->p_nice;
169 	}
170 	return(0);
171 }
172 
173 struct setpriority_info {
174 	int prio;
175 	int who;
176 	int error;
177 	int found;
178 };
179 
180 static int setpriority_callback(struct proc *p, void *data);
181 
182 /*
183  * MPALMOSTSAFE
184  */
185 int
186 sys_setpriority(struct setpriority_args *uap)
187 {
188 	struct setpriority_info info;
189 	struct proc *curp = curproc;
190 	struct proc *p;
191 	int found = 0, error = 0;
192 
193 	get_mplock();
194 
195 	switch (uap->which) {
196 	case PRIO_PROCESS:
197 		if (uap->who == 0)
198 			p = curp;
199 		else
200 			p = pfind(uap->who);
201 		if (p == 0)
202 			break;
203 		if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
204 			break;
205 		error = donice(p, uap->prio);
206 		found++;
207 		break;
208 
209 	case PRIO_PGRP:
210 	{
211 		struct pgrp *pg;
212 
213 		if (uap->who == 0)
214 			pg = curp->p_pgrp;
215 		else if ((pg = pgfind(uap->who)) == NULL)
216 			break;
217 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
218 			if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
219 				error = donice(p, uap->prio);
220 				found++;
221 			}
222 		}
223 		break;
224 	}
225 	case PRIO_USER:
226 		if (uap->who == 0)
227 			uap->who = curp->p_ucred->cr_uid;
228 		info.prio = uap->prio;
229 		info.who = uap->who;
230 		info.error = 0;
231 		info.found = 0;
232 		allproc_scan(setpriority_callback, &info);
233 		error = info.error;
234 		found = info.found;
235 		break;
236 
237 	default:
238 		error = EINVAL;
239 		found = 1;
240 		break;
241 	}
242 
243 	rel_mplock();
244 	if (found == 0)
245 		error = ESRCH;
246 	return (error);
247 }
248 
249 static
250 int
251 setpriority_callback(struct proc *p, void *data)
252 {
253 	struct setpriority_info *info = data;
254 	int error;
255 
256 	if (p->p_ucred->cr_uid == info->who &&
257 	    PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
258 		error = donice(p, info->prio);
259 		if (error)
260 			info->error = error;
261 		++info->found;
262 	}
263 	return(0);
264 }
265 
266 static int
267 donice(struct proc *chgp, int n)
268 {
269 	struct proc *curp = curproc;
270 	struct ucred *cr = curp->p_ucred;
271 	struct lwp *lp;
272 
273 	if (cr->cr_uid && cr->cr_ruid &&
274 	    cr->cr_uid != chgp->p_ucred->cr_uid &&
275 	    cr->cr_ruid != chgp->p_ucred->cr_uid)
276 		return (EPERM);
277 	if (n > PRIO_MAX)
278 		n = PRIO_MAX;
279 	if (n < PRIO_MIN)
280 		n = PRIO_MIN;
281 	if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
282 		return (EACCES);
283 	chgp->p_nice = n;
284 	FOREACH_LWP_IN_PROC(lp, chgp)
285 		chgp->p_usched->resetpriority(lp);
286 	return (0);
287 }
288 
289 /*
290  * MPALMOSTSAFE
291  */
292 int
293 sys_lwp_rtprio(struct lwp_rtprio_args *uap)
294 {
295 	struct proc *p = curproc;
296 	struct lwp *lp;
297 	struct rtprio rtp;
298 	struct ucred *cr = curthread->td_ucred;
299 	int error;
300 
301 	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
302 	if (error)
303 		return error;
304 	if (uap->pid < 0)
305 		return EINVAL;
306 
307 	get_mplock();
308 	if (uap->pid == 0) {
309 		/* curproc already loaded on p */
310 	} else {
311 		p = pfind(uap->pid);
312 	}
313 
314 	if (p == NULL) {
315 		error = ESRCH;
316 		goto done;
317 	}
318 
319 	if (uap->tid < -1) {
320 		error = EINVAL;
321 		goto done;
322 	}
323 	if (uap->tid == -1) {
324 		/*
325 		 * sadly, tid can be 0 so we can't use 0 here
326 		 * like sys_rtprio()
327 		 */
328 		lp = curthread->td_lwp;
329 	} else {
330 		lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
331 		if (lp == NULL) {
332 			error = ESRCH;
333 			goto done;
334 		}
335 	}
336 
337 	switch (uap->function) {
338 	case RTP_LOOKUP:
339 		error = copyout(&lp->lwp_rtprio, uap->rtp,
340 				sizeof(struct rtprio));
341 		break;
342 	case RTP_SET:
343 		if (cr->cr_uid && cr->cr_ruid &&
344 		    cr->cr_uid != p->p_ucred->cr_uid &&
345 		    cr->cr_ruid != p->p_ucred->cr_uid) {
346 			error = EPERM;
347 			break;
348 		}
349 		/* disallow setting rtprio in most cases if not superuser */
350 		if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
351 			/* can't set someone else's */
352 			if (uap->pid) { /* XXX */
353 				error = EPERM;
354 				break;
355 			}
356 			/* can't set realtime priority */
357 /*
358  * Realtime priority has to be restricted for reasons which should be
359  * obvious. However, for idle priority, there is a potential for
360  * system deadlock if an idleprio process gains a lock on a resource
361  * that other processes need (and the idleprio process can't run
362  * due to a CPU-bound normal process). Fix me! XXX
363  */
364  			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
365 				error = EPERM;
366 				break;
367 			}
368 		}
369 		switch (rtp.type) {
370 #ifdef RTP_PRIO_FIFO
371 		case RTP_PRIO_FIFO:
372 #endif
373 		case RTP_PRIO_REALTIME:
374 		case RTP_PRIO_NORMAL:
375 		case RTP_PRIO_IDLE:
376 			if (rtp.prio > RTP_PRIO_MAX) {
377 				error = EINVAL;
378 			} else {
379 				lp->lwp_rtprio = rtp;
380 				error = 0;
381 			}
382 			break;
383 		default:
384 			error = EINVAL;
385 			break;
386 		}
387 		break;
388 	default:
389 		error = EINVAL;
390 		break;
391 	}
392 
393 done:
394 	rel_mplock();
395 	return (error);
396 }
397 
398 /*
399  * Set realtime priority
400  *
401  * MPALMOSTSAFE
402  */
403 int
404 sys_rtprio(struct rtprio_args *uap)
405 {
406 	struct proc *curp = curproc;
407 	struct proc *p;
408 	struct lwp *lp;
409 	struct ucred *cr = curthread->td_ucred;
410 	struct rtprio rtp;
411 	int error;
412 
413 	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
414 	if (error)
415 		return (error);
416 
417 	get_mplock();
418 	if (uap->pid == 0)
419 		p = curp;
420 	else
421 		p = pfind(uap->pid);
422 
423 	if (p == NULL) {
424 		error = ESRCH;
425 		goto done;
426 	}
427 
428 	/* XXX lwp */
429 	lp = FIRST_LWP_IN_PROC(p);
430 	switch (uap->function) {
431 	case RTP_LOOKUP:
432 		error = copyout(&lp->lwp_rtprio, uap->rtp,
433 				sizeof(struct rtprio));
434 		break;
435 	case RTP_SET:
436 		if (cr->cr_uid && cr->cr_ruid &&
437 		    cr->cr_uid != p->p_ucred->cr_uid &&
438 		    cr->cr_ruid != p->p_ucred->cr_uid) {
439 			error = EPERM;
440 			break;
441 		}
442 		/* disallow setting rtprio in most cases if not superuser */
443 		if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
444 			/* can't set someone else's */
445 			if (uap->pid) {
446 				error = EPERM;
447 				break;
448 			}
449 			/* can't set realtime priority */
450 /*
451  * Realtime priority has to be restricted for reasons which should be
452  * obvious. However, for idle priority, there is a potential for
453  * system deadlock if an idleprio process gains a lock on a resource
454  * that other processes need (and the idleprio process can't run
455  * due to a CPU-bound normal process). Fix me! XXX
456  */
457 			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
458 				error = EPERM;
459 				break;
460 			}
461 		}
462 		switch (rtp.type) {
463 #ifdef RTP_PRIO_FIFO
464 		case RTP_PRIO_FIFO:
465 #endif
466 		case RTP_PRIO_REALTIME:
467 		case RTP_PRIO_NORMAL:
468 		case RTP_PRIO_IDLE:
469 			if (rtp.prio > RTP_PRIO_MAX) {
470 				error = EINVAL;
471 				break;
472 			}
473 			lp->lwp_rtprio = rtp;
474 			error = 0;
475 			break;
476 		default:
477 			error = EINVAL;
478 			break;
479 		}
480 		break;
481 	default:
482 		error = EINVAL;
483 		break;
484 	}
485 done:
486 	rel_mplock();
487 	return (error);
488 }
489 
490 /*
491  * MPSAFE
492  */
493 int
494 sys_setrlimit(struct __setrlimit_args *uap)
495 {
496 	struct rlimit alim;
497 	int error;
498 
499 	error = copyin(uap->rlp, &alim, sizeof(alim));
500 	if (error)
501 		return (error);
502 
503 	error = kern_setrlimit(uap->which, &alim);
504 
505 	return (error);
506 }
507 
508 /*
509  * MPSAFE
510  */
511 int
512 sys_getrlimit(struct __getrlimit_args *uap)
513 {
514 	struct rlimit lim;
515 	int error;
516 
517 	error = kern_getrlimit(uap->which, &lim);
518 
519 	if (error == 0)
520 		error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
521 	return error;
522 }
523 
524 /*
525  * Transform the running time and tick information in lwp lp's thread into user,
526  * system, and interrupt time usage.
527  *
528  * Since we are limited to statclock tick granularity this is a statisical
529  * calculation which will be correct over the long haul, but should not be
530  * expected to measure fine grained deltas.
531  *
532  * It is possible to catch a lwp in the midst of being created, so
533  * check whether lwp_thread is NULL or not.
534  */
535 void
536 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
537 {
538 	struct thread *td;
539 
540 	/*
541 	 * Calculate at the statclock level.  YYY if the thread is owned by
542 	 * another cpu we need to forward the request to the other cpu, or
543 	 * have a token to interlock the information in order to avoid racing
544 	 * thread destruction.
545 	 */
546 	if ((td = lp->lwp_thread) != NULL) {
547 		crit_enter();
548 		up->tv_sec = td->td_uticks / 1000000;
549 		up->tv_usec = td->td_uticks % 1000000;
550 		sp->tv_sec = td->td_sticks / 1000000;
551 		sp->tv_usec = td->td_sticks % 1000000;
552 		crit_exit();
553 	}
554 }
555 
556 /*
557  * Aggregate resource statistics of all lwps of a process.
558  *
559  * proc.p_ru keeps track of all statistics directly related to a proc.  This
560  * consists of RSS usage and nswap information and aggregate numbers for all
561  * former lwps of this proc.
562  *
563  * proc.p_cru is the sum of all stats of reaped children.
564  *
565  * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
566  * packet, scheduler switch or page fault counts, etc.  This information gets
567  * added to lwp.lwp_proc.p_ru when the lwp exits.
568  */
569 void
570 calcru_proc(struct proc *p, struct rusage *ru)
571 {
572 	struct timeval upt, spt;
573 	long *rip1, *rip2;
574 	struct lwp *lp;
575 
576 	*ru = p->p_ru;
577 
578 	FOREACH_LWP_IN_PROC(lp, p) {
579 		calcru(lp, &upt, &spt);
580 		timevaladd(&ru->ru_utime, &upt);
581 		timevaladd(&ru->ru_stime, &spt);
582 		for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
583 		     rip1 <= &ru->ru_last;
584 		     rip1++, rip2++)
585 			*rip1 += *rip2;
586 	}
587 }
588 
589 
590 /*
591  * MPALMOSTSAFE
592  */
593 int
594 sys_getrusage(struct getrusage_args *uap)
595 {
596 	struct rusage ru;
597 	struct rusage *rup;
598 	int error;
599 
600 	get_mplock();
601 
602 	switch (uap->who) {
603 	case RUSAGE_SELF:
604 		rup = &ru;
605 		calcru_proc(curproc, rup);
606 		error = 0;
607 		break;
608 	case RUSAGE_CHILDREN:
609 		rup = &curproc->p_cru;
610 		error = 0;
611 		break;
612 	default:
613 		error = EINVAL;
614 		break;
615 	}
616 	if (error == 0)
617 		error = copyout(rup, uap->rusage, sizeof(struct rusage));
618 	rel_mplock();
619 	return (error);
620 }
621 
622 void
623 ruadd(struct rusage *ru, struct rusage *ru2)
624 {
625 	long *ip, *ip2;
626 	int i;
627 
628 	timevaladd(&ru->ru_utime, &ru2->ru_utime);
629 	timevaladd(&ru->ru_stime, &ru2->ru_stime);
630 	if (ru->ru_maxrss < ru2->ru_maxrss)
631 		ru->ru_maxrss = ru2->ru_maxrss;
632 	ip = &ru->ru_first; ip2 = &ru2->ru_first;
633 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
634 		*ip++ += *ip2++;
635 }
636 
637 /*
638  * Find the uidinfo structure for a uid.  This structure is used to
639  * track the total resource consumption (process count, socket buffer
640  * size, etc.) for the uid and impose limits.
641  */
642 void
643 uihashinit(void)
644 {
645 	spin_init(&uihash_lock);
646 	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
647 }
648 
649 /*
650  * NOTE: Must be called with uihash_lock held
651  *
652  * MPSAFE
653  */
654 static struct uidinfo *
655 uilookup(uid_t uid)
656 {
657 	struct	uihashhead *uipp;
658 	struct	uidinfo *uip;
659 
660 	uipp = UIHASH(uid);
661 	LIST_FOREACH(uip, uipp, ui_hash) {
662 		if (uip->ui_uid == uid)
663 			break;
664 	}
665 	return (uip);
666 }
667 
668 /*
669  * MPSAFE
670  */
671 static struct uidinfo *
672 uicreate(uid_t uid)
673 {
674 	struct	uidinfo *uip, *tmp;
675 	/*
676 	 * Allocate space and check for a race
677 	 */
678 	MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK);
679 	/*
680 	 * Initialize structure and enter it into the hash table
681 	 */
682 	spin_init(&uip->ui_lock);
683 	uip->ui_uid = uid;
684 	uip->ui_proccnt = 0;
685 	uip->ui_sbsize = 0;
686 	uip->ui_ref = 1;	/* we're returning a ref */
687 	uip->ui_posixlocks = 0;
688 	varsymset_init(&uip->ui_varsymset, NULL);
689 
690 	/*
691 	 * Somebody may have already created the uidinfo for this
692 	 * uid. If so, return that instead.
693 	 */
694 	spin_lock_wr(&uihash_lock);
695 	tmp = uilookup(uid);
696 	if (tmp != NULL) {
697 		varsymset_clean(&uip->ui_varsymset);
698 		spin_uninit(&uip->ui_lock);
699 		FREE(uip, M_UIDINFO);
700 		uip = tmp;
701 	} else {
702 		LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
703 	}
704 	spin_unlock_wr(&uihash_lock);
705 
706 	return (uip);
707 }
708 
709 /*
710  * MPSAFE
711  */
712 struct uidinfo *
713 uifind(uid_t uid)
714 {
715 	struct	uidinfo *uip;
716 
717 	spin_lock_rd(&uihash_lock);
718 	uip = uilookup(uid);
719 	if (uip == NULL) {
720 		spin_unlock_rd(&uihash_lock);
721 		uip = uicreate(uid);
722 	} else {
723 		uihold(uip);
724 		spin_unlock_rd(&uihash_lock);
725 	}
726 	return (uip);
727 }
728 
729 /*
730  * MPSAFE
731  */
732 static __inline void
733 uifree(struct uidinfo *uip)
734 {
735 	spin_lock_wr(&uihash_lock);
736 
737 	/*
738 	 * Note that we're taking a read lock even though we
739 	 * modify the structure because we know nobody can find
740 	 * it now that we've locked uihash_lock. If somebody
741 	 * can get to it through a stored pointer, the reference
742 	 * count will not be 0 and in that case we don't modify
743 	 * the struct.
744 	 */
745 	spin_lock_rd(&uip->ui_lock);
746 	if (uip->ui_ref != 0) {
747 		/*
748 		 * Someone found the uid and got a ref when we
749 		 * unlocked. No need to free any more.
750 		 */
751 		spin_unlock_rd(&uip->ui_lock);
752 		return;
753 	}
754 	if (uip->ui_sbsize != 0)
755 		/* XXX no %qd in kernel.  Truncate. */
756 		kprintf("freeing uidinfo: uid = %d, sbsize = %ld\n",
757 		    uip->ui_uid, (long)uip->ui_sbsize);
758 	if (uip->ui_proccnt != 0)
759 		kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
760 		    uip->ui_uid, uip->ui_proccnt);
761 
762 	LIST_REMOVE(uip, ui_hash);
763 	spin_unlock_wr(&uihash_lock);
764 	varsymset_clean(&uip->ui_varsymset);
765 	lockuninit(&uip->ui_varsymset.vx_lock);
766 	spin_unlock_rd(&uip->ui_lock);
767 	spin_uninit(&uip->ui_lock);
768 	FREE(uip, M_UIDINFO);
769 }
770 
771 /*
772  * MPSAFE
773  */
774 void
775 uihold(struct uidinfo *uip)
776 {
777 	atomic_add_int(&uip->ui_ref, 1);
778 	KKASSERT(uip->ui_ref >= 0);
779 }
780 
781 /*
782  * MPSAFE
783  */
784 void
785 uidrop(struct uidinfo *uip)
786 {
787 	KKASSERT(uip->ui_ref > 0);
788 	if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) {
789 		uifree(uip);
790 	}
791 }
792 
793 void
794 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
795 {
796 	uidrop(*puip);
797 	*puip = nuip;
798 }
799 
800 /*
801  * Change the count associated with number of processes
802  * a given user is using.  When 'max' is 0, don't enforce a limit
803  */
804 int
805 chgproccnt(struct uidinfo *uip, int diff, int max)
806 {
807 	int ret;
808 	spin_lock_wr(&uip->ui_lock);
809 	/* don't allow them to exceed max, but allow subtraction */
810 	if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
811 		ret = 0;
812 	} else {
813 		uip->ui_proccnt += diff;
814 		if (uip->ui_proccnt < 0)
815 			kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
816 		ret = 1;
817 	}
818 	spin_unlock_wr(&uip->ui_lock);
819 	return ret;
820 }
821 
822 /*
823  * Change the total socket buffer size a user has used.
824  */
825 int
826 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
827 {
828 	rlim_t new;
829 
830 	spin_lock_wr(&uip->ui_lock);
831 	new = uip->ui_sbsize + to - *hiwat;
832 	KKASSERT(new >= 0);
833 
834 	/*
835 	 * If we are trying to increase the socket buffer size
836 	 * Scale down the hi water mark when we exceed the user's
837 	 * allowed socket buffer space.
838 	 *
839 	 * We can't scale down too much or we will blow up atomic packet
840 	 * operations.
841 	 */
842 	if (to > *hiwat && to > MCLBYTES && new > max) {
843 		to = to * max / new;
844 		if (to < MCLBYTES)
845 			to = MCLBYTES;
846 	}
847 	uip->ui_sbsize = new;
848 	*hiwat = to;
849 	spin_unlock_wr(&uip->ui_lock);
850 	return (1);
851 }
852 
853