xref: /dflybsd-src/sys/kern/kern_plimit.c (revision 1365b5f1af0c1cc3894d279008de5831984b2990)
1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * Copyright (c) 1982, 1986, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
68  * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
69  * $DragonFly: src/sys/kern/kern_plimit.c,v 1.3 2008/05/08 01:26:00 dillon Exp $
70  */
71 
72 #include <sys/resource.h>
73 #include <sys/spinlock.h>
74 #include <sys/proc.h>
75 #include <sys/priv.h>
76 #include <sys/file.h>
77 #include <sys/lockf.h>
78 #include <sys/kern_syscall.h>
79 
80 #include <vm/vm_param.h>
81 #include <vm/vm.h>
82 #include <vm/vm_map.h>
83 
84 #include <machine/pmap.h>
85 
86 #include <sys/spinlock2.h>
87 #include <sys/mplock2.h>
88 
89 static void plimit_copy(struct plimit *olimit, struct plimit *nlimit);
90 
91 /*
92  * Initialize proc0's plimit structure.  All later plimit structures
93  * are inherited through fork.
94  */
95 void
96 plimit_init0(struct plimit *limit)
97 {
98 	int i;
99 	rlim_t lim;
100 
101 	for (i = 0; i < RLIM_NLIMITS; ++i) {
102 		limit->pl_rlimit[i].rlim_cur = RLIM_INFINITY;
103 		limit->pl_rlimit[i].rlim_max = RLIM_INFINITY;
104 	}
105 	limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur = maxfiles;
106 	limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
107 	limit->pl_rlimit[RLIMIT_NPROC].rlim_cur = maxproc;
108 	limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
109 	lim = ptoa((rlim_t)vmstats.v_free_count);
110 	limit->pl_rlimit[RLIMIT_RSS].rlim_max = lim;
111 	limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim;
112 	limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
113 	limit->p_cpulimit = RLIM_INFINITY;
114 	limit->p_refcnt = 1;
115 	spin_init(&limit->p_spin);
116 }
117 
118 /*
119  * Return a plimit for use by a new forked process given the one
120  * contained in the parent process.
121  *
122  * MPSAFE
123  */
124 struct plimit *
125 plimit_fork(struct proc *p1)
126 {
127 	struct plimit *olimit = p1->p_limit;
128 	struct plimit *nlimit = NULL;
129 	struct plimit *rlimit;
130 
131 	/*
132 	 * If we are exclusive (but not threaded-exclusive), but have only
133 	 * one reference, we can convert the structure to copy-on-write
134 	 * again.
135 	 *
136 	 * If we were threaded but are no longer threaded we can do the same
137 	 * thing.
138 	 */
139 	if (olimit->p_exclusive == 1) {
140 		KKASSERT(olimit->p_refcnt == 1);
141 		olimit->p_exclusive = 0;
142 	} else if (olimit->p_exclusive == 2 && p1->p_nthreads == 1) {
143 		KKASSERT(olimit->p_refcnt == 1);
144 		olimit->p_exclusive = 0;
145 	}
146 
147 	/*
148 	 * Take a short-cut that requires limited spin locks.  If we aren't
149 	 * exclusive we will not be threaded and we can just bump the ref
150 	 * count.  If that is true and we also have only one ref then there
151 	 * can be no other accessors.
152 	 */
153 	if (olimit->p_exclusive == 0) {
154 		if (olimit->p_refcnt == 1) {
155 			++olimit->p_refcnt;
156 		} else {
157 			spin_lock_wr(&olimit->p_spin);
158 			++olimit->p_refcnt;
159 			spin_unlock_wr(&olimit->p_spin);
160 		}
161 		return(olimit);
162 	}
163 
164 	/*
165 	 * Full-blown code-up.
166 	 */
167 	nlimit = NULL;
168 	spin_lock_wr(&olimit->p_spin);
169 
170 	for (;;) {
171 		if (olimit->p_exclusive == 0) {
172 			++olimit->p_refcnt;
173 			rlimit = olimit;
174 			break;
175 		}
176 		if (nlimit) {
177 			plimit_copy(olimit, nlimit);
178 			rlimit = nlimit;
179 			nlimit = NULL;
180 			break;
181 		}
182 		spin_unlock_wr(&olimit->p_spin);
183 		nlimit = kmalloc(sizeof(*nlimit), M_SUBPROC, M_WAITOK);
184 		spin_lock_wr(&olimit->p_spin);
185 	}
186 	spin_unlock_wr(&olimit->p_spin);
187 	if (nlimit)
188 		kfree(nlimit, M_SUBPROC);
189 	return(rlimit);
190 }
191 
192 /*
193  * This routine is called when a new LWP is created for a process.  We
194  * must force exclusivity (=2) so p->p_limit remains stable.
195  *
196  * LWPs share the same process structure so this does not bump refcnt.
197  */
198 void
199 plimit_lwp_fork(struct proc *p)
200 {
201 	struct plimit *olimit;
202 
203 	for (;;) {
204 		olimit = p->p_limit;
205 		if (olimit->p_exclusive == 2) {
206 			KKASSERT(olimit->p_refcnt == 1);
207 			break;
208 		}
209 		if (olimit->p_refcnt == 1) {
210 			olimit->p_exclusive = 2;
211 			break;
212 		}
213 		plimit_modify(p, -1, NULL);
214 	}
215 }
216 
217 /*
218  * This routine is called to fixup a proces's p_limit structure prior
219  * to it being modified.  If index >= 0 the specified modification is also
220  * made.
221  *
222  * This routine must make the limit structure exclusive.  A later fork
223  * will convert it back to copy-on-write if possible.
224  *
225  * We can count on p->p_limit being stable since if we had created any
226  * threads it will have already been made exclusive (=2).
227  *
228  * MPSAFE
229  */
230 void
231 plimit_modify(struct proc *p, int index, struct rlimit *rlim)
232 {
233 	struct plimit *olimit;
234 	struct plimit *nlimit;
235 	struct plimit *rlimit;
236 
237 	/*
238 	 * Shortcut.  If we are not threaded we may be able to trivially
239 	 * set the structure to exclusive access without needing to acquire
240 	 * any spinlocks.   The p_limit structure will be stable.
241 	 */
242 	olimit = p->p_limit;
243 	if (p->p_nthreads == 1) {
244 		if (olimit->p_exclusive == 0 && olimit->p_refcnt == 1)
245 			olimit->p_exclusive = 1;
246 		if (olimit->p_exclusive) {
247 			if (index >= 0)
248 				p->p_limit->pl_rlimit[index] = *rlim;
249 			return;
250 		}
251 	}
252 
253 	/*
254 	 * Full-blown code-up.  Make a copy if we aren't exclusive.  If
255 	 * we have only one ref we can safely convert the structure to
256 	 * exclusive without copying.
257 	 */
258 	nlimit = NULL;
259 	spin_lock_wr(&olimit->p_spin);
260 
261 	for (;;) {
262 		if (olimit->p_refcnt == 1) {
263 			if (olimit->p_exclusive == 0)
264 				olimit->p_exclusive = 1;
265 			rlimit = olimit;
266 			break;
267 		}
268 		KKASSERT(olimit->p_exclusive == 0);
269 		if (nlimit) {
270 			plimit_copy(olimit, nlimit);
271 			nlimit->p_exclusive = 1;
272 			p->p_limit = nlimit;
273 			rlimit = nlimit;
274 			nlimit = NULL;
275 			break;
276 		}
277 		spin_unlock_wr(&olimit->p_spin);
278 		nlimit = kmalloc(sizeof(*nlimit), M_SUBPROC, M_WAITOK);
279 		spin_lock_wr(&olimit->p_spin);
280 	}
281 	if (index >= 0)
282 		rlimit->pl_rlimit[index] = *rlim;
283 	spin_unlock_wr(&olimit->p_spin);
284 	if (nlimit)
285 		kfree(nlimit, M_SUBPROC);
286 }
287 
288 /*
289  * Destroy a process's plimit structure.
290  *
291  * MPSAFE
292  */
293 void
294 plimit_free(struct proc *p)
295 {
296 	struct plimit *limit;
297 
298 	if ((limit = p->p_limit) != NULL) {
299 		p->p_limit = NULL;
300 
301 		if (limit->p_refcnt == 1) {
302 			limit->p_refcnt = -999;
303 			kfree(limit, M_SUBPROC);
304 		} else {
305 			spin_lock_wr(&limit->p_spin);
306 			if (--limit->p_refcnt == 0) {
307 				spin_unlock_wr(&limit->p_spin);
308 				kfree(limit, M_SUBPROC);
309 			} else {
310 				spin_unlock_wr(&limit->p_spin);
311 			}
312 		}
313 	}
314 }
315 
316 /*
317  * Modify a resource limit (from system call)
318  *
319  * MPSAFE
320  */
321 int
322 kern_setrlimit(u_int which, struct rlimit *limp)
323 {
324         struct proc *p = curproc;
325 	struct plimit *limit;
326         struct rlimit *alimp;
327         int error;
328 
329         if (which >= RLIM_NLIMITS)
330                 return (EINVAL);
331 
332 	/*
333 	 * We will be modifying a resource, make a copy if necessary.
334 	 */
335 	plimit_modify(p, -1, NULL);
336 	limit = p->p_limit;
337         alimp = &limit->pl_rlimit[which];
338 
339         /*
340          * Preserve historical bugs by treating negative limits as unsigned.
341          */
342         if (limp->rlim_cur < 0)
343                 limp->rlim_cur = RLIM_INFINITY;
344         if (limp->rlim_max < 0)
345                 limp->rlim_max = RLIM_INFINITY;
346 
347 	spin_lock_rd(&limit->p_spin);
348         if (limp->rlim_cur > alimp->rlim_max ||
349             limp->rlim_max > alimp->rlim_max) {
350 		spin_unlock_rd(&limit->p_spin);
351                 error = priv_check_cred(p->p_ucred, PRIV_PROC_SETRLIMIT, 0);
352                 if (error)
353                         return (error);
354 	} else {
355 		spin_unlock_rd(&limit->p_spin);
356 	}
357         if (limp->rlim_cur > limp->rlim_max)
358                 limp->rlim_cur = limp->rlim_max;
359 
360         switch (which) {
361         case RLIMIT_CPU:
362 		spin_lock_wr(&limit->p_spin);
363                 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
364                         limit->p_cpulimit = RLIM_INFINITY;
365                 else
366                         limit->p_cpulimit = (rlim_t)1000000 * limp->rlim_cur;
367 		spin_unlock_wr(&limit->p_spin);
368                 break;
369         case RLIMIT_DATA:
370                 if (limp->rlim_cur > maxdsiz)
371                         limp->rlim_cur = maxdsiz;
372                 if (limp->rlim_max > maxdsiz)
373                         limp->rlim_max = maxdsiz;
374                 break;
375 
376         case RLIMIT_STACK:
377                 if (limp->rlim_cur > maxssiz)
378                         limp->rlim_cur = maxssiz;
379                 if (limp->rlim_max > maxssiz)
380                         limp->rlim_max = maxssiz;
381                 /*
382                  * Stack is allocated to the max at exec time with only
383                  * "rlim_cur" bytes accessible.  If stack limit is going
384                  * up make more accessible, if going down make inaccessible.
385                  */
386 		spin_lock_rd(&limit->p_spin);
387                 if (limp->rlim_cur != alimp->rlim_cur) {
388                         vm_offset_t addr;
389                         vm_size_t size;
390                         vm_prot_t prot;
391 
392                         if (limp->rlim_cur > alimp->rlim_cur) {
393                                 prot = VM_PROT_ALL;
394                                 size = limp->rlim_cur - alimp->rlim_cur;
395                                 addr = USRSTACK - limp->rlim_cur;
396                         } else {
397                                 prot = VM_PROT_NONE;
398                                 size = alimp->rlim_cur - limp->rlim_cur;
399                                 addr = USRSTACK - alimp->rlim_cur;
400                         }
401 			spin_unlock_rd(&limit->p_spin);
402                         addr = trunc_page(addr);
403                         size = round_page(size);
404 			get_mplock();
405                         vm_map_protect(&p->p_vmspace->vm_map,
406 				       addr, addr+size, prot, FALSE);
407 			rel_mplock();
408                 } else {
409 			spin_unlock_rd(&limit->p_spin);
410 		}
411                 break;
412 
413         case RLIMIT_NOFILE:
414                 if (limp->rlim_cur > maxfilesperproc)
415                         limp->rlim_cur = maxfilesperproc;
416                 if (limp->rlim_max > maxfilesperproc)
417                         limp->rlim_max = maxfilesperproc;
418                 break;
419 
420         case RLIMIT_NPROC:
421                 if (limp->rlim_cur > maxprocperuid)
422                         limp->rlim_cur = maxprocperuid;
423                 if (limp->rlim_max > maxprocperuid)
424                         limp->rlim_max = maxprocperuid;
425                 if (limp->rlim_cur < 1)
426                         limp->rlim_cur = 1;
427                 if (limp->rlim_max < 1)
428                         limp->rlim_max = 1;
429                 break;
430         case RLIMIT_POSIXLOCKS:
431                 if (limp->rlim_cur > maxposixlocksperuid)
432                         limp->rlim_cur = maxposixlocksperuid;
433                 if (limp->rlim_max > maxposixlocksperuid)
434                         limp->rlim_max = maxposixlocksperuid;
435                 break;
436         }
437 	spin_lock_wr(&limit->p_spin);
438         *alimp = *limp;
439 	spin_unlock_wr(&limit->p_spin);
440         return (0);
441 }
442 
443 /*
444  * The rlimit indexed by which is returned in the second argument.
445  *
446  * MPSAFE
447  */
448 int
449 kern_getrlimit(u_int which, struct rlimit *limp)
450 {
451 	struct proc *p = curproc;
452 	struct plimit *limit;
453 
454         if (which >= RLIM_NLIMITS)
455                 return (EINVAL);
456 
457 	limit = p->p_limit;
458 	spin_lock_rd(&limit->p_spin);
459         *limp = p->p_rlimit[which];
460 	spin_unlock_rd(&limit->p_spin);
461         return (0);
462 }
463 
464 /*
465  * Determine if the cpu limit has been reached and return an operations
466  * code for the caller to perform.
467  *
468  * MPSAFE
469  */
470 int
471 plimit_testcpulimit(struct plimit *limit, u_int64_t ttime)
472 {
473 	struct rlimit *rlim;
474 	int mode;
475 
476 	mode = PLIMIT_TESTCPU_OK;
477 	if (limit->p_cpulimit != RLIM_INFINITY) {
478 		spin_lock_rd(&limit->p_spin);
479 		if (ttime > limit->p_cpulimit) {
480 			rlim = &limit->pl_rlimit[RLIMIT_CPU];
481 			if (ttime / (rlim_t)1000000 >= rlim->rlim_max + 5) {
482 				mode = PLIMIT_TESTCPU_KILL;
483 			} else {
484 				mode = PLIMIT_TESTCPU_XCPU;
485 			}
486 		}
487 		spin_unlock_rd(&limit->p_spin);
488 	}
489 	return(mode);
490 }
491 
492 /*
493  * Helper routine to copy olimit to nlimit and initialize nlimit for
494  * use.  nlimit's reference count will be set to 1 and its exclusive bit
495  * will be cleared.
496  *
497  * MPSAFE
498  */
499 static
500 void
501 plimit_copy(struct plimit *olimit, struct plimit *nlimit)
502 {
503 	*nlimit = *olimit;
504 
505 	spin_init(&nlimit->p_spin);
506 	nlimit->p_refcnt = 1;
507 	nlimit->p_exclusive = 0;
508 }
509 
510