xref: /netbsd-src/sys/uvm/uvm_unix.c (revision 17dd36da8292193180754d5047c0926dbb56818c)
1 /*	$NetBSD: uvm_unix.c,v 1.20 2001/03/19 02:25:33 simonb Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993 The Regents of the University of California.
6  * Copyright (c) 1988 University of Utah.
7  *
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *      This product includes software developed by Charles D. Cranor,
25  *	Washington University, the University of California, Berkeley and
26  *	its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
44  *      @(#)vm_unix.c   8.1 (Berkeley) 6/11/93
45  * from: Id: uvm_unix.c,v 1.1.2.2 1997/08/25 18:52:30 chuck Exp
46  */
47 
48 /*
49  * uvm_unix.c: traditional sbrk/grow interface to vm.
50  */
51 #include "opt_compat_netbsd32.h"
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/resourcevar.h>
57 #include <sys/vnode.h>
58 #include <sys/core.h>
59 
60 #include <sys/mount.h>
61 #include <sys/syscallargs.h>
62 
63 #include <uvm/uvm.h>
64 
65 /*
66  * sys_obreak: set break
67  */
68 
69 int
70 sys_obreak(p, v, retval)
71 	struct proc *p;
72 	void *v;
73 	register_t *retval;
74 {
75 	struct sys_obreak_args /* {
76 		syscallarg(char *) nsize;
77 	} */ *uap = v;
78 	struct vmspace *vm = p->p_vmspace;
79 	vaddr_t new, old;
80 	ssize_t diff;
81 	int error;
82 
83 	old = (vaddr_t)vm->vm_daddr;
84 	new = round_page((vaddr_t)SCARG(uap, nsize));
85 	if ((new - old) > p->p_rlimit[RLIMIT_DATA].rlim_cur)
86 		return (ENOMEM);
87 
88 	old = round_page(old + ptoa(vm->vm_dsize));
89 	diff = new - old;
90 
91 	if (diff == 0)
92 		return (0);
93 
94 	/*
95 	 * grow or shrink?
96 	 */
97 	if (diff > 0) {
98 		error = uvm_map(&vm->vm_map, &old, diff, NULL,
99 		    UVM_UNKNOWN_OFFSET, 0,
100 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
101 		    UVM_ADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED|
102 		    UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
103 		if (error) {
104 			uprintf("sbrk: grow %ld failed, error = %d\n",
105 				(long)diff, error);
106 			return error;
107 		}
108 		vm->vm_dsize += atop(diff);
109 	} else {
110 		uvm_deallocate(&vm->vm_map, new, -diff);
111 		vm->vm_dsize -= atop(-diff);
112 	}
113 	return 0;
114 }
115 
116 /*
117  * uvm_grow: enlarge the "stack segment" to include sp.
118  */
119 
120 int
121 uvm_grow(p, sp)
122 	struct proc *p;
123 	vaddr_t sp;
124 {
125 	struct vmspace *vm = p->p_vmspace;
126 	int si;
127 
128 	/*
129 	 * For user defined stacks (from sendsig).
130 	 */
131 	if (sp < (vaddr_t)vm->vm_maxsaddr)
132 		return (0);
133 
134 	/*
135 	 * For common case of already allocated (from trap).
136 	 */
137 	if (sp >= USRSTACK - ctob(vm->vm_ssize))
138 		return (1);
139 
140 	/*
141 	 * Really need to check vs limit and increment stack size if ok.
142 	 */
143 	si = btoc(USRSTACK-sp) - vm->vm_ssize;
144 	if (vm->vm_ssize + si > btoc(p->p_rlimit[RLIMIT_STACK].rlim_cur))
145 		return (0);
146 	vm->vm_ssize += si;
147 	return (1);
148 }
149 
150 /*
151  * sys_oadvise: old advice system call
152  */
153 
154 /* ARGSUSED */
155 int
156 sys_ovadvise(p, v, retval)
157 	struct proc *p;
158 	void *v;
159 	register_t *retval;
160 {
161 #if 0
162 	struct sys_ovadvise_args /* {
163 		syscallarg(int) anom;
164 	} */ *uap = v;
165 #endif
166 
167 	return (EINVAL);
168 }
169 
170 /*
171  * uvm_coredump: dump core!
172  */
173 
174 int
175 uvm_coredump(p, vp, cred, chdr)
176 	struct proc *p;
177 	struct vnode *vp;
178 	struct ucred *cred;
179 	struct core *chdr;
180 {
181 	struct vmspace *vm = p->p_vmspace;
182 	vm_map_t map = &vm->vm_map;
183 	vm_map_entry_t entry;
184 	vaddr_t start, end, maxstack;
185 	struct coreseg cseg;
186 	off_t offset;
187 	int flag, error = 0;
188 
189 	offset = chdr->c_hdrsize + chdr->c_seghdrsize + chdr->c_cpusize;
190 	maxstack = trunc_page(USRSTACK - ctob(vm->vm_ssize));
191 
192 	for (entry = map->header.next; entry != &map->header;
193 	    entry = entry->next) {
194 
195 		/* should never happen for a user process */
196 		if (UVM_ET_ISSUBMAP(entry)) {
197 			panic("uvm_coredump: user process with submap?");
198 		}
199 
200 		if (!(entry->protection & VM_PROT_WRITE))
201 			continue;
202 
203 		start = entry->start;
204 		end = entry->end;
205 
206 		if (start >= VM_MAXUSER_ADDRESS)
207 			continue;
208 
209 		if (end > VM_MAXUSER_ADDRESS)
210 			end = VM_MAXUSER_ADDRESS;
211 
212 		if (start >= (vaddr_t)vm->vm_maxsaddr) {
213 			if (end <= maxstack)
214 				continue;
215 			if (start < maxstack)
216 				start = maxstack;
217 			flag = CORE_STACK;
218 		} else
219 			flag = CORE_DATA;
220 
221 		/*
222 		 * Set up a new core file segment.
223 		 */
224 		CORE_SETMAGIC(cseg, CORESEGMAGIC, CORE_GETMID(*chdr), flag);
225 		cseg.c_addr = start;
226 		cseg.c_size = end - start;
227 
228 		error = vn_rdwr(UIO_WRITE, vp,
229 		    (caddr_t)&cseg, chdr->c_seghdrsize,
230 		    offset, UIO_SYSSPACE,
231 		    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
232 		if (error)
233 			break;
234 
235 		offset += chdr->c_seghdrsize;
236 		error = vn_rdwr(UIO_WRITE, vp,
237 		    (caddr_t)cseg.c_addr, (int)cseg.c_size,
238 		    offset, UIO_USERSPACE,
239 		    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
240 		if (error)
241 			break;
242 
243 		offset += cseg.c_size;
244 		chdr->c_nseg++;
245 	}
246 
247 	return (error);
248 }
249 
250 #if COMPAT_NETBSD32
251 /*
252  * uvm_coredump32: dump 32-bit core!
253  */
254 
255 int
256 uvm_coredump32(p, vp, cred, chdr)
257 	struct proc *p;
258 	struct vnode *vp;
259 	struct ucred *cred;
260 	struct core32 *chdr;
261 {
262 	struct vmspace *vm = p->p_vmspace;
263 	vm_map_t map = &vm->vm_map;
264 	vm_map_entry_t entry;
265 	vaddr_t start, end, maxstack;
266 	struct coreseg32 cseg;
267 	off_t offset;
268 	int flag, error = 0;
269 
270 	offset = chdr->c_hdrsize + chdr->c_seghdrsize + chdr->c_cpusize;
271 	maxstack = trunc_page(USRSTACK - ctob(vm->vm_ssize));
272 
273 	for (entry = map->header.next; entry != &map->header;
274 	    entry = entry->next) {
275 
276 		/* should never happen for a user process */
277 		if (UVM_ET_ISSUBMAP(entry)) {
278 			panic("uvm_coredump: user process with submap?");
279 		}
280 
281 		if (!(entry->protection & VM_PROT_WRITE))
282 			continue;
283 
284 		start = entry->start;
285 		end = entry->end;
286 
287 		if (start >= VM_MAXUSER_ADDRESS)
288 			continue;
289 
290 		if (end > VM_MAXUSER_ADDRESS)
291 			end = VM_MAXUSER_ADDRESS;
292 
293 		if (start >= (vaddr_t)vm->vm_maxsaddr) {
294 			if (end <= maxstack)
295 				continue;
296 			if (start < maxstack)
297 				start = maxstack;
298 			flag = CORE_STACK;
299 		} else
300 			flag = CORE_DATA;
301 
302 		/*
303 		 * Set up a new core file segment.
304 		 */
305 		CORE_SETMAGIC(cseg, CORESEGMAGIC, CORE_GETMID(*chdr), flag);
306 		cseg.c_addr = start;
307 		cseg.c_size = end - start;
308 
309 		error = vn_rdwr(UIO_WRITE, vp,
310 		    (caddr_t)&cseg, chdr->c_seghdrsize,
311 		    offset, UIO_SYSSPACE,
312 		    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
313 		if (error)
314 			break;
315 
316 		offset += chdr->c_seghdrsize;
317 		error = vn_rdwr(UIO_WRITE, vp,
318 		    (caddr_t)(u_long)cseg.c_addr, (int)cseg.c_size,
319 		    offset, UIO_USERSPACE,
320 		    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
321 		if (error)
322 			break;
323 
324 		offset += cseg.c_size;
325 		chdr->c_nseg++;
326 	}
327 
328 	return (error);
329 }
330 
331 #endif
332