1 /* $OpenBSD: uvm_unix.c,v 1.47 2013/01/16 21:47:08 deraadt Exp $ */ 2 /* $NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993 The Regents of the University of California. 7 * Copyright (c) 1988 University of Utah. 8 * 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by Charles D. Cranor, 26 * Washington University, the University of California, Berkeley and 27 * its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$ 45 * @(#)vm_unix.c 8.1 (Berkeley) 6/11/93 46 * from: Id: uvm_unix.c,v 1.1.2.2 1997/08/25 18:52:30 chuck Exp 47 */ 48 49 /* 50 * uvm_unix.c: traditional sbrk/grow interface to vm. 51 */ 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/proc.h> 56 #include <sys/resourcevar.h> 57 #include <sys/vnode.h> 58 #include <sys/core.h> 59 60 #include <sys/mount.h> 61 #include <sys/syscallargs.h> 62 63 #include <uvm/uvm.h> 64 65 /* 66 * sys_obreak: set break 67 */ 68 69 int 70 sys_obreak(struct proc *p, void *v, register_t *retval) 71 { 72 struct sys_obreak_args /* { 73 syscallarg(char *) nsize; 74 } */ *uap = v; 75 struct vmspace *vm = p->p_vmspace; 76 vaddr_t new, old, base; 77 int error; 78 79 base = (vaddr_t)vm->vm_daddr; 80 new = round_page((vaddr_t)SCARG(uap, nsize)); 81 if (new < base || (new - base) > p->p_rlimit[RLIMIT_DATA].rlim_cur) 82 return (ENOMEM); 83 84 old = round_page(base + ptoa(vm->vm_dsize)); 85 86 if (new == old) 87 return (0); 88 89 /* 90 * grow or shrink? 91 */ 92 if (new > old) { 93 error = uvm_map(&vm->vm_map, &old, new - old, NULL, 94 UVM_UNKNOWN_OFFSET, 0, 95 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RWX, UVM_INH_COPY, 96 UVM_ADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED| 97 UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW)); 98 if (error) { 99 uprintf("sbrk: grow %ld failed, error = %d\n", 100 new - old, error); 101 return (ENOMEM); 102 } 103 vm->vm_dsize += atop(new - old); 104 } else { 105 uvm_deallocate(&vm->vm_map, new, old - new); 106 vm->vm_dsize -= atop(old - new); 107 } 108 109 return (0); 110 } 111 112 /* 113 * uvm_grow: enlarge the "stack segment" to include sp. 114 */ 115 116 void 117 uvm_grow(struct proc *p, vaddr_t sp) 118 { 119 struct vmspace *vm = p->p_vmspace; 120 int si; 121 122 /* 123 * For user defined stacks (from sendsig). 124 */ 125 if (sp < (vaddr_t)vm->vm_maxsaddr) 126 return; 127 128 /* 129 * For common case of already allocated (from trap). 130 */ 131 #ifdef MACHINE_STACK_GROWS_UP 132 if (sp < USRSTACK + ptoa(vm->vm_ssize)) 133 #else 134 if (sp >= USRSTACK - ptoa(vm->vm_ssize)) 135 #endif 136 return; 137 138 /* 139 * Really need to check vs limit and increment stack size if ok. 140 */ 141 #ifdef MACHINE_STACK_GROWS_UP 142 si = atop(sp - USRSTACK) - vm->vm_ssize + 1; 143 #else 144 si = atop(USRSTACK - sp) - vm->vm_ssize; 145 #endif 146 if (vm->vm_ssize + si <= atop(p->p_rlimit[RLIMIT_STACK].rlim_cur)) 147 vm->vm_ssize += si; 148 } 149 150 #ifndef SMALL_KERNEL 151 152 /* 153 * uvm_coredump: dump core! 154 */ 155 156 int 157 uvm_coredump(struct proc *p, struct vnode *vp, struct ucred *cred, 158 struct core *chdr) 159 { 160 struct vmspace *vm = p->p_vmspace; 161 vm_map_t map = &vm->vm_map; 162 vm_map_entry_t entry, safe; 163 vaddr_t start, end, top; 164 struct coreseg cseg; 165 off_t offset, coffset; 166 int csize, chunk, flag, error = 0; 167 168 offset = chdr->c_hdrsize + chdr->c_seghdrsize + chdr->c_cpusize; 169 170 RB_FOREACH_SAFE(entry, uvm_map_addr, &map->addr, safe) { 171 /* should never happen for a user process */ 172 if (UVM_ET_ISSUBMAP(entry)) { 173 panic("uvm_coredump: user process with submap?"); 174 } 175 176 if (!(entry->protection & VM_PROT_WRITE) && 177 entry->start != p->p_sigcode) 178 continue; 179 180 /* 181 * Don't dump mmaped devices. 182 */ 183 if (entry->object.uvm_obj != NULL && 184 UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) 185 continue; 186 187 start = entry->start; 188 end = entry->end; 189 190 if (start >= VM_MAXUSER_ADDRESS) 191 continue; 192 193 if (end > VM_MAXUSER_ADDRESS) 194 end = VM_MAXUSER_ADDRESS; 195 196 #ifdef MACHINE_STACK_GROWS_UP 197 if (USRSTACK <= start && start < (USRSTACK + MAXSSIZ)) { 198 top = round_page(USRSTACK + ptoa(vm->vm_ssize)); 199 if (end > top) 200 end = top; 201 202 if (start >= end) 203 continue; 204 #else 205 if (start >= (vaddr_t)vm->vm_maxsaddr) { 206 top = trunc_page(USRSTACK - ptoa(vm->vm_ssize)); 207 if (start < top) 208 start = top; 209 210 if (start >= end) 211 continue; 212 #endif 213 flag = CORE_STACK; 214 } else 215 flag = CORE_DATA; 216 217 /* 218 * Set up a new core file segment. 219 */ 220 CORE_SETMAGIC(cseg, CORESEGMAGIC, CORE_GETMID(*chdr), flag); 221 cseg.c_addr = start; 222 cseg.c_size = end - start; 223 224 error = vn_rdwr(UIO_WRITE, vp, 225 (caddr_t)&cseg, chdr->c_seghdrsize, 226 offset, UIO_SYSSPACE, IO_UNIT, cred, NULL, p); 227 /* 228 * We might get an EFAULT on objects mapped beyond 229 * EOF. Ignore the error. 230 */ 231 if (error && error != EFAULT) 232 break; 233 234 offset += chdr->c_seghdrsize; 235 236 coffset = 0; 237 csize = (int)cseg.c_size; 238 do { 239 if (p->p_siglist & sigmask(SIGKILL)) 240 return (EINTR); 241 242 /* Rest of the loop sleeps with lock held, so... */ 243 yield(); 244 245 chunk = MIN(csize, MAXPHYS); 246 error = vn_rdwr(UIO_WRITE, vp, 247 (caddr_t)(u_long)cseg.c_addr + coffset, 248 chunk, offset + coffset, UIO_USERSPACE, 249 IO_UNIT, cred, NULL, p); 250 if (error) 251 return (error); 252 253 coffset += chunk; 254 csize -= chunk; 255 } while (csize > 0); 256 offset += cseg.c_size; 257 258 /* Discard the memory */ 259 uvm_unmap(map, cseg.c_addr, cseg.c_addr + cseg.c_size); 260 261 chdr->c_nseg++; 262 } 263 264 return (error); 265 } 266 267 int 268 uvm_coredump_walkmap(struct proc *p, void *iocookie, 269 int (*func)(struct proc *, void *, struct uvm_coredump_state *), 270 void *cookie) 271 { 272 struct uvm_coredump_state state; 273 struct vmspace *vm = p->p_vmspace; 274 struct vm_map *map = &vm->vm_map; 275 struct vm_map_entry *entry; 276 vaddr_t top; 277 int error; 278 279 RB_FOREACH(entry, uvm_map_addr, &map->addr) { 280 state.cookie = cookie; 281 state.prot = entry->protection; 282 state.flags = 0; 283 284 /* should never happen for a user process */ 285 if (UVM_ET_ISSUBMAP(entry)) { 286 panic("uvm_coredump: user process with submap?"); 287 } 288 289 if (!(entry->protection & VM_PROT_WRITE) && 290 entry->start != p->p_sigcode) 291 continue; 292 293 /* 294 * Don't dump mmaped devices. 295 */ 296 if (entry->object.uvm_obj != NULL && 297 UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) 298 continue; 299 300 state.start = entry->start; 301 state.realend = entry->end; 302 state.end = entry->end; 303 304 if (state.start >= VM_MAXUSER_ADDRESS) 305 continue; 306 307 if (state.end > VM_MAXUSER_ADDRESS) 308 state.end = VM_MAXUSER_ADDRESS; 309 310 #ifdef MACHINE_STACK_GROWS_UP 311 if (USRSTACK <= state.start && 312 state.start < (USRSTACK + MAXSSIZ)) { 313 top = round_page(USRSTACK + ptoa(vm->vm_ssize)); 314 if (state.end > top) 315 state.end = top; 316 317 if (state.start >= state.end) 318 continue; 319 #else 320 if (state.start >= (vaddr_t)vm->vm_maxsaddr) { 321 top = trunc_page(USRSTACK - ptoa(vm->vm_ssize)); 322 if (state.start < top) 323 state.start = top; 324 325 if (state.start >= state.end) 326 continue; 327 #endif 328 state.flags |= UVM_COREDUMP_STACK; 329 } 330 331 error = (*func)(p, iocookie, &state); 332 if (error) 333 return (error); 334 } 335 336 return (0); 337 } 338 339 #endif /* !SMALL_KERNEL */ 340