xref: /dflybsd-src/sys/kern/kern_resource.c (revision 90ea502b8c5d21f908cedff6680ee2bc9e74ce74)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40  * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $
41  */
42 
43 #include "opt_compat.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/file.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/kernel.h>
51 #include <sys/resourcevar.h>
52 #include <sys/malloc.h>
53 #include <sys/proc.h>
54 #include <sys/priv.h>
55 #include <sys/time.h>
56 #include <sys/lockf.h>
57 
58 #include <vm/vm.h>
59 #include <vm/vm_param.h>
60 #include <sys/lock.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 
64 #include <sys/thread2.h>
65 #include <sys/spinlock2.h>
66 
67 static int donice (struct proc *chgp, int n);
68 
69 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
70 #define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
71 static struct spinlock uihash_lock;
72 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
73 static u_long uihash;		/* size of hash table - 1 */
74 
75 static struct uidinfo	*uicreate (uid_t uid);
76 static struct uidinfo	*uilookup (uid_t uid);
77 
78 /*
79  * Resource controls and accounting.
80  */
81 
82 struct getpriority_info {
83 	int low;
84 	int who;
85 };
86 
87 static int getpriority_callback(struct proc *p, void *data);
88 
89 /*
90  * MPALMOSTSAFE
91  */
92 int
93 sys_getpriority(struct getpriority_args *uap)
94 {
95 	struct getpriority_info info;
96 	struct proc *curp = curproc;
97 	struct proc *p;
98 	int low = PRIO_MAX + 1;
99 	int error;
100 
101 	get_mplock();
102 
103 	switch (uap->which) {
104 	case PRIO_PROCESS:
105 		if (uap->who == 0)
106 			p = curp;
107 		else
108 			p = pfind(uap->who);
109 		if (p == 0)
110 			break;
111 		if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
112 			break;
113 		low = p->p_nice;
114 		break;
115 
116 	case PRIO_PGRP:
117 	{
118 		struct pgrp *pg;
119 
120 		if (uap->who == 0)
121 			pg = curp->p_pgrp;
122 		else if ((pg = pgfind(uap->who)) == NULL)
123 			break;
124 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
125 			if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low))
126 				low = p->p_nice;
127 		}
128 		break;
129 	}
130 	case PRIO_USER:
131 		if (uap->who == 0)
132 			uap->who = curp->p_ucred->cr_uid;
133 		info.low = low;
134 		info.who = uap->who;
135 		allproc_scan(getpriority_callback, &info);
136 		low = info.low;
137 		break;
138 
139 	default:
140 		error = EINVAL;
141 		goto done;
142 	}
143 	if (low == PRIO_MAX + 1) {
144 		error = ESRCH;
145 		goto done;
146 	}
147 	uap->sysmsg_result = low;
148 	error = 0;
149 done:
150 	rel_mplock();
151 	return (error);
152 }
153 
154 /*
155  * Figure out the current lowest nice priority for processes owned
156  * by the specified user.
157  */
158 static
159 int
160 getpriority_callback(struct proc *p, void *data)
161 {
162 	struct getpriority_info *info = data;
163 
164 	if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
165 	    p->p_ucred->cr_uid == info->who &&
166 	    p->p_nice < info->low) {
167 		info->low = p->p_nice;
168 	}
169 	return(0);
170 }
171 
172 struct setpriority_info {
173 	int prio;
174 	int who;
175 	int error;
176 	int found;
177 };
178 
179 static int setpriority_callback(struct proc *p, void *data);
180 
181 /*
182  * MPALMOSTSAFE
183  */
184 int
185 sys_setpriority(struct setpriority_args *uap)
186 {
187 	struct setpriority_info info;
188 	struct proc *curp = curproc;
189 	struct proc *p;
190 	int found = 0, error = 0;
191 
192 	get_mplock();
193 
194 	switch (uap->which) {
195 	case PRIO_PROCESS:
196 		if (uap->who == 0)
197 			p = curp;
198 		else
199 			p = pfind(uap->who);
200 		if (p == 0)
201 			break;
202 		if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
203 			break;
204 		error = donice(p, uap->prio);
205 		found++;
206 		break;
207 
208 	case PRIO_PGRP:
209 	{
210 		struct pgrp *pg;
211 
212 		if (uap->who == 0)
213 			pg = curp->p_pgrp;
214 		else if ((pg = pgfind(uap->who)) == NULL)
215 			break;
216 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
217 			if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
218 				error = donice(p, uap->prio);
219 				found++;
220 			}
221 		}
222 		break;
223 	}
224 	case PRIO_USER:
225 		if (uap->who == 0)
226 			uap->who = curp->p_ucred->cr_uid;
227 		info.prio = uap->prio;
228 		info.who = uap->who;
229 		info.error = 0;
230 		info.found = 0;
231 		allproc_scan(setpriority_callback, &info);
232 		error = info.error;
233 		found = info.found;
234 		break;
235 
236 	default:
237 		error = EINVAL;
238 		found = 1;
239 		break;
240 	}
241 
242 	rel_mplock();
243 	if (found == 0)
244 		error = ESRCH;
245 	return (error);
246 }
247 
248 static
249 int
250 setpriority_callback(struct proc *p, void *data)
251 {
252 	struct setpriority_info *info = data;
253 	int error;
254 
255 	if (p->p_ucred->cr_uid == info->who &&
256 	    PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
257 		error = donice(p, info->prio);
258 		if (error)
259 			info->error = error;
260 		++info->found;
261 	}
262 	return(0);
263 }
264 
265 static int
266 donice(struct proc *chgp, int n)
267 {
268 	struct proc *curp = curproc;
269 	struct ucred *cr = curp->p_ucred;
270 	struct lwp *lp;
271 
272 	if (cr->cr_uid && cr->cr_ruid &&
273 	    cr->cr_uid != chgp->p_ucred->cr_uid &&
274 	    cr->cr_ruid != chgp->p_ucred->cr_uid)
275 		return (EPERM);
276 	if (n > PRIO_MAX)
277 		n = PRIO_MAX;
278 	if (n < PRIO_MIN)
279 		n = PRIO_MIN;
280 	if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
281 		return (EACCES);
282 	chgp->p_nice = n;
283 	FOREACH_LWP_IN_PROC(lp, chgp)
284 		chgp->p_usched->resetpriority(lp);
285 	return (0);
286 }
287 
288 /*
289  * MPALMOSTSAFE
290  */
291 int
292 sys_lwp_rtprio(struct lwp_rtprio_args *uap)
293 {
294 	struct proc *p = curproc;
295 	struct lwp *lp;
296 	struct rtprio rtp;
297 	struct ucred *cr = curthread->td_ucred;
298 	int error;
299 
300 	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
301 	if (error)
302 		return error;
303 	if (uap->pid < 0)
304 		return EINVAL;
305 
306 	get_mplock();
307 	if (uap->pid == 0) {
308 		/* curproc already loaded on p */
309 	} else {
310 		p = pfind(uap->pid);
311 	}
312 
313 	if (p == NULL) {
314 		error = ESRCH;
315 		goto done;
316 	}
317 
318 	if (uap->tid < -1) {
319 		error = EINVAL;
320 		goto done;
321 	}
322 	if (uap->tid == -1) {
323 		/*
324 		 * sadly, tid can be 0 so we can't use 0 here
325 		 * like sys_rtprio()
326 		 */
327 		lp = curthread->td_lwp;
328 	} else {
329 		lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
330 		if (lp == NULL) {
331 			error = ESRCH;
332 			goto done;
333 		}
334 	}
335 
336 	switch (uap->function) {
337 	case RTP_LOOKUP:
338 		error = copyout(&lp->lwp_rtprio, uap->rtp,
339 				sizeof(struct rtprio));
340 		break;
341 	case RTP_SET:
342 		if (cr->cr_uid && cr->cr_ruid &&
343 		    cr->cr_uid != p->p_ucred->cr_uid &&
344 		    cr->cr_ruid != p->p_ucred->cr_uid) {
345 			error = EPERM;
346 			break;
347 		}
348 		/* disallow setting rtprio in most cases if not superuser */
349 		if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
350 			/* can't set someone else's */
351 			if (uap->pid) { /* XXX */
352 				error = EPERM;
353 				break;
354 			}
355 			/* can't set realtime priority */
356 /*
357  * Realtime priority has to be restricted for reasons which should be
358  * obvious. However, for idle priority, there is a potential for
359  * system deadlock if an idleprio process gains a lock on a resource
360  * that other processes need (and the idleprio process can't run
361  * due to a CPU-bound normal process). Fix me! XXX
362  */
363  			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
364 				error = EPERM;
365 				break;
366 			}
367 		}
368 		switch (rtp.type) {
369 #ifdef RTP_PRIO_FIFO
370 		case RTP_PRIO_FIFO:
371 #endif
372 		case RTP_PRIO_REALTIME:
373 		case RTP_PRIO_NORMAL:
374 		case RTP_PRIO_IDLE:
375 			if (rtp.prio > RTP_PRIO_MAX)
376 				return EINVAL;
377 			lp->lwp_rtprio = rtp;
378 			error = 0;
379 			break;
380 		default:
381 			error = EINVAL;
382 			break;
383 		}
384 		break;
385 	default:
386 		error = EINVAL;
387 		break;
388 	}
389 
390 done:
391 	rel_mplock();
392 	return (error);
393 }
394 
395 /*
396  * Set realtime priority
397  *
398  * MPALMOSTSAFE
399  */
400 int
401 sys_rtprio(struct rtprio_args *uap)
402 {
403 	struct proc *curp = curproc;
404 	struct proc *p;
405 	struct lwp *lp;
406 	struct ucred *cr = curthread->td_ucred;
407 	struct rtprio rtp;
408 	int error;
409 
410 	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
411 	if (error)
412 		return (error);
413 
414 	get_mplock();
415 	if (uap->pid == 0)
416 		p = curp;
417 	else
418 		p = pfind(uap->pid);
419 
420 	if (p == NULL) {
421 		error = ESRCH;
422 		goto done;
423 	}
424 
425 	/* XXX lwp */
426 	lp = FIRST_LWP_IN_PROC(p);
427 	switch (uap->function) {
428 	case RTP_LOOKUP:
429 		error = copyout(&lp->lwp_rtprio, uap->rtp,
430 				sizeof(struct rtprio));
431 		break;
432 	case RTP_SET:
433 		if (cr->cr_uid && cr->cr_ruid &&
434 		    cr->cr_uid != p->p_ucred->cr_uid &&
435 		    cr->cr_ruid != p->p_ucred->cr_uid) {
436 			error = EPERM;
437 			break;
438 		}
439 		/* disallow setting rtprio in most cases if not superuser */
440 		if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
441 			/* can't set someone else's */
442 			if (uap->pid) {
443 				error = EPERM;
444 				break;
445 			}
446 			/* can't set realtime priority */
447 /*
448  * Realtime priority has to be restricted for reasons which should be
449  * obvious. However, for idle priority, there is a potential for
450  * system deadlock if an idleprio process gains a lock on a resource
451  * that other processes need (and the idleprio process can't run
452  * due to a CPU-bound normal process). Fix me! XXX
453  */
454 			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
455 				error = EPERM;
456 				break;
457 			}
458 		}
459 		switch (rtp.type) {
460 #ifdef RTP_PRIO_FIFO
461 		case RTP_PRIO_FIFO:
462 #endif
463 		case RTP_PRIO_REALTIME:
464 		case RTP_PRIO_NORMAL:
465 		case RTP_PRIO_IDLE:
466 			if (rtp.prio > RTP_PRIO_MAX) {
467 				error = EINVAL;
468 				break;
469 			}
470 			lp->lwp_rtprio = rtp;
471 			error = 0;
472 			break;
473 		default:
474 			error = EINVAL;
475 			break;
476 		}
477 		break;
478 	default:
479 		error = EINVAL;
480 		break;
481 	}
482 done:
483 	rel_mplock();
484 	return (error);
485 }
486 
487 /*
488  * MPSAFE
489  */
490 int
491 sys_setrlimit(struct __setrlimit_args *uap)
492 {
493 	struct rlimit alim;
494 	int error;
495 
496 	error = copyin(uap->rlp, &alim, sizeof(alim));
497 	if (error)
498 		return (error);
499 
500 	error = kern_setrlimit(uap->which, &alim);
501 
502 	return (error);
503 }
504 
505 /*
506  * MPSAFE
507  */
508 int
509 sys_getrlimit(struct __getrlimit_args *uap)
510 {
511 	struct rlimit lim;
512 	int error;
513 
514 	error = kern_getrlimit(uap->which, &lim);
515 
516 	if (error == 0)
517 		error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
518 	return error;
519 }
520 
521 /*
522  * Transform the running time and tick information in lwp lp's thread into user,
523  * system, and interrupt time usage.
524  *
525  * Since we are limited to statclock tick granularity this is a statisical
526  * calculation which will be correct over the long haul, but should not be
527  * expected to measure fine grained deltas.
528  *
529  * It is possible to catch a lwp in the midst of being created, so
530  * check whether lwp_thread is NULL or not.
531  */
532 void
533 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
534 {
535 	struct thread *td;
536 
537 	/*
538 	 * Calculate at the statclock level.  YYY if the thread is owned by
539 	 * another cpu we need to forward the request to the other cpu, or
540 	 * have a token to interlock the information in order to avoid racing
541 	 * thread destruction.
542 	 */
543 	if ((td = lp->lwp_thread) != NULL) {
544 		crit_enter();
545 		up->tv_sec = td->td_uticks / 1000000;
546 		up->tv_usec = td->td_uticks % 1000000;
547 		sp->tv_sec = td->td_sticks / 1000000;
548 		sp->tv_usec = td->td_sticks % 1000000;
549 		crit_exit();
550 	}
551 }
552 
553 /*
554  * Aggregate resource statistics of all lwps of a process.
555  *
556  * proc.p_ru keeps track of all statistics directly related to a proc.  This
557  * consists of RSS usage and nswap information and aggregate numbers for all
558  * former lwps of this proc.
559  *
560  * proc.p_cru is the sum of all stats of reaped children.
561  *
562  * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
563  * packet, scheduler switch or page fault counts, etc.  This information gets
564  * added to lwp.lwp_proc.p_ru when the lwp exits.
565  */
566 void
567 calcru_proc(struct proc *p, struct rusage *ru)
568 {
569 	struct timeval upt, spt;
570 	long *rip1, *rip2;
571 	struct lwp *lp;
572 
573 	*ru = p->p_ru;
574 
575 	FOREACH_LWP_IN_PROC(lp, p) {
576 		calcru(lp, &upt, &spt);
577 		timevaladd(&ru->ru_utime, &upt);
578 		timevaladd(&ru->ru_stime, &spt);
579 		for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
580 		     rip1 <= &ru->ru_last;
581 		     rip1++, rip2++)
582 			*rip1 += *rip2;
583 	}
584 }
585 
586 
587 /*
588  * MPALMOSTSAFE
589  */
590 int
591 sys_getrusage(struct getrusage_args *uap)
592 {
593 	struct rusage ru;
594 	struct rusage *rup;
595 	int error;
596 
597 	get_mplock();
598 
599 	switch (uap->who) {
600 	case RUSAGE_SELF:
601 		rup = &ru;
602 		calcru_proc(curproc, rup);
603 		error = 0;
604 		break;
605 	case RUSAGE_CHILDREN:
606 		rup = &curproc->p_cru;
607 		error = 0;
608 		break;
609 	default:
610 		error = EINVAL;
611 		break;
612 	}
613 	if (error == 0)
614 		error = copyout(rup, uap->rusage, sizeof(struct rusage));
615 	rel_mplock();
616 	return (error);
617 }
618 
619 void
620 ruadd(struct rusage *ru, struct rusage *ru2)
621 {
622 	long *ip, *ip2;
623 	int i;
624 
625 	timevaladd(&ru->ru_utime, &ru2->ru_utime);
626 	timevaladd(&ru->ru_stime, &ru2->ru_stime);
627 	if (ru->ru_maxrss < ru2->ru_maxrss)
628 		ru->ru_maxrss = ru2->ru_maxrss;
629 	ip = &ru->ru_first; ip2 = &ru2->ru_first;
630 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
631 		*ip++ += *ip2++;
632 }
633 
634 /*
635  * Find the uidinfo structure for a uid.  This structure is used to
636  * track the total resource consumption (process count, socket buffer
637  * size, etc.) for the uid and impose limits.
638  */
639 void
640 uihashinit(void)
641 {
642 	spin_init(&uihash_lock);
643 	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
644 }
645 
646 /*
647  * NOTE: Must be called with uihash_lock held
648  *
649  * MPSAFE
650  */
651 static struct uidinfo *
652 uilookup(uid_t uid)
653 {
654 	struct	uihashhead *uipp;
655 	struct	uidinfo *uip;
656 
657 	uipp = UIHASH(uid);
658 	LIST_FOREACH(uip, uipp, ui_hash) {
659 		if (uip->ui_uid == uid)
660 			break;
661 	}
662 	return (uip);
663 }
664 
665 /*
666  * MPSAFE
667  */
668 static struct uidinfo *
669 uicreate(uid_t uid)
670 {
671 	struct	uidinfo *uip, *tmp;
672 	/*
673 	 * Allocate space and check for a race
674 	 */
675 	MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK);
676 	/*
677 	 * Initialize structure and enter it into the hash table
678 	 */
679 	spin_init(&uip->ui_lock);
680 	uip->ui_uid = uid;
681 	uip->ui_proccnt = 0;
682 	uip->ui_sbsize = 0;
683 	uip->ui_ref = 1;	/* we're returning a ref */
684 	uip->ui_posixlocks = 0;
685 	varsymset_init(&uip->ui_varsymset, NULL);
686 
687 	/*
688 	 * Somebody may have already created the uidinfo for this
689 	 * uid. If so, return that instead.
690 	 */
691 	spin_lock_wr(&uihash_lock);
692 	tmp = uilookup(uid);
693 	if (tmp != NULL) {
694 		varsymset_clean(&uip->ui_varsymset);
695 		spin_uninit(&uip->ui_lock);
696 		FREE(uip, M_UIDINFO);
697 		uip = tmp;
698 	} else {
699 		LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
700 	}
701 	spin_unlock_wr(&uihash_lock);
702 
703 	return (uip);
704 }
705 
706 /*
707  * MPSAFE
708  */
709 struct uidinfo *
710 uifind(uid_t uid)
711 {
712 	struct	uidinfo *uip;
713 
714 	spin_lock_rd(&uihash_lock);
715 	uip = uilookup(uid);
716 	if (uip == NULL) {
717 		spin_unlock_rd(&uihash_lock);
718 		uip = uicreate(uid);
719 	} else {
720 		uihold(uip);
721 		spin_unlock_rd(&uihash_lock);
722 	}
723 	return (uip);
724 }
725 
726 /*
727  * MPSAFE
728  */
729 static __inline void
730 uifree(struct uidinfo *uip)
731 {
732 	spin_lock_wr(&uihash_lock);
733 
734 	/*
735 	 * Note that we're taking a read lock even though we
736 	 * modify the structure because we know nobody can find
737 	 * it now that we've locked uihash_lock. If somebody
738 	 * can get to it through a stored pointer, the reference
739 	 * count will not be 0 and in that case we don't modify
740 	 * the struct.
741 	 */
742 	spin_lock_rd(&uip->ui_lock);
743 	if (uip->ui_ref != 0) {
744 		/*
745 		 * Someone found the uid and got a ref when we
746 		 * unlocked. No need to free any more.
747 		 */
748 		spin_unlock_rd(&uip->ui_lock);
749 		return;
750 	}
751 	if (uip->ui_sbsize != 0)
752 		/* XXX no %qd in kernel.  Truncate. */
753 		kprintf("freeing uidinfo: uid = %d, sbsize = %ld\n",
754 		    uip->ui_uid, (long)uip->ui_sbsize);
755 	if (uip->ui_proccnt != 0)
756 		kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
757 		    uip->ui_uid, uip->ui_proccnt);
758 
759 	LIST_REMOVE(uip, ui_hash);
760 	spin_unlock_wr(&uihash_lock);
761 	varsymset_clean(&uip->ui_varsymset);
762 	lockuninit(&uip->ui_varsymset.vx_lock);
763 	spin_unlock_rd(&uip->ui_lock);
764 	spin_uninit(&uip->ui_lock);
765 	FREE(uip, M_UIDINFO);
766 }
767 
768 /*
769  * MPSAFE
770  */
771 void
772 uihold(struct uidinfo *uip)
773 {
774 	atomic_add_int(&uip->ui_ref, 1);
775 	KKASSERT(uip->ui_ref > 0);
776 }
777 
778 /*
779  * MPSAFE
780  */
781 void
782 uidrop(struct uidinfo *uip)
783 {
784 	if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) {
785 		uifree(uip);
786 	} else {
787 		KKASSERT(uip->ui_ref > 0);
788 	}
789 }
790 
791 void
792 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
793 {
794 	uidrop(*puip);
795 	*puip = nuip;
796 }
797 
798 /*
799  * Change the count associated with number of processes
800  * a given user is using.  When 'max' is 0, don't enforce a limit
801  */
802 int
803 chgproccnt(struct uidinfo *uip, int diff, int max)
804 {
805 	int ret;
806 	spin_lock_wr(&uip->ui_lock);
807 	/* don't allow them to exceed max, but allow subtraction */
808 	if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
809 		ret = 0;
810 	} else {
811 		uip->ui_proccnt += diff;
812 		if (uip->ui_proccnt < 0)
813 			kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
814 		ret = 1;
815 	}
816 	spin_unlock_wr(&uip->ui_lock);
817 	return ret;
818 }
819 
820 /*
821  * Change the total socket buffer size a user has used.
822  */
823 int
824 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
825 {
826 	rlim_t new;
827 
828 	spin_lock_wr(&uip->ui_lock);
829 	new = uip->ui_sbsize + to - *hiwat;
830 	KKASSERT(new >= 0);
831 
832 	/*
833 	 * If we are trying to increase the socket buffer size
834 	 * Scale down the hi water mark when we exceed the user's
835 	 * allowed socket buffer space.
836 	 *
837 	 * We can't scale down too much or we will blow up atomic packet
838 	 * operations.
839 	 */
840 	if (to > *hiwat && to > MCLBYTES && new > max) {
841 		to = to * max / new;
842 		if (to < MCLBYTES)
843 			to = MCLBYTES;
844 	}
845 	uip->ui_sbsize = new;
846 	*hiwat = to;
847 	spin_unlock_wr(&uip->ui_lock);
848 	return (1);
849 }
850 
851