1 /* $OpenBSD: uvm_unix.c,v 1.57 2016/03/15 18:16:47 stefan Exp $ */ 2 /* $NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993 The Regents of the University of California. 7 * Copyright (c) 1988 University of Utah. 8 * 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$ 40 * @(#)vm_unix.c 8.1 (Berkeley) 6/11/93 41 * from: Id: uvm_unix.c,v 1.1.2.2 1997/08/25 18:52:30 chuck Exp 42 */ 43 44 /* 45 * uvm_unix.c: traditional sbrk/grow interface to vm. 46 */ 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/resourcevar.h> 52 #include <sys/vnode.h> 53 54 #include <sys/mount.h> 55 #include <sys/syscallargs.h> 56 57 #include <uvm/uvm.h> 58 59 /* 60 * sys_obreak: set break 61 */ 62 63 int 64 sys_obreak(struct proc *p, void *v, register_t *retval) 65 { 66 struct sys_obreak_args /* { 67 syscallarg(char *) nsize; 68 } */ *uap = v; 69 struct vmspace *vm = p->p_vmspace; 70 vaddr_t new, old, base; 71 int error; 72 73 base = (vaddr_t)vm->vm_daddr; 74 new = round_page((vaddr_t)SCARG(uap, nsize)); 75 if (new < base || (new - base) > p->p_rlimit[RLIMIT_DATA].rlim_cur) 76 return (ENOMEM); 77 78 old = round_page(base + ptoa(vm->vm_dsize)); 79 80 if (new == old) 81 return (0); 82 83 /* grow or shrink? */ 84 if (new > old) { 85 error = uvm_map(&vm->vm_map, &old, new - old, NULL, 86 UVM_UNKNOWN_OFFSET, 0, 87 UVM_MAPFLAG(PROT_READ | PROT_WRITE, 88 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY, 89 MADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED| 90 UVM_FLAG_COPYONW)); 91 if (error) { 92 uprintf("sbrk: grow %ld failed, error = %d\n", 93 new - old, error); 94 return (ENOMEM); 95 } 96 vm->vm_dsize += atop(new - old); 97 } else { 98 uvm_deallocate(&vm->vm_map, new, old - new); 99 vm->vm_dsize -= atop(old - new); 100 } 101 102 return (0); 103 } 104 105 /* 106 * uvm_grow: enlarge the "stack segment" to include sp. 107 */ 108 void 109 uvm_grow(struct proc *p, vaddr_t sp) 110 { 111 struct vmspace *vm = p->p_vmspace; 112 int si; 113 114 /* For user defined stacks (from sendsig). */ 115 if (sp < (vaddr_t)vm->vm_maxsaddr) 116 return; 117 118 /* For common case of already allocated (from trap). */ 119 #ifdef MACHINE_STACK_GROWS_UP 120 if (sp < (vaddr_t)vm->vm_maxsaddr + ptoa(vm->vm_ssize)) 121 #else 122 if (sp >= (vaddr_t)vm->vm_minsaddr - ptoa(vm->vm_ssize)) 123 #endif 124 return; 125 126 /* Really need to check vs limit and increment stack size if ok. */ 127 #ifdef MACHINE_STACK_GROWS_UP 128 si = atop(sp - (vaddr_t)vm->vm_maxsaddr) - vm->vm_ssize + 1; 129 #else 130 si = atop((vaddr_t)vm->vm_minsaddr - sp) - vm->vm_ssize; 131 #endif 132 if (vm->vm_ssize + si <= atop(p->p_rlimit[RLIMIT_STACK].rlim_cur)) 133 vm->vm_ssize += si; 134 } 135 136 #ifndef SMALL_KERNEL 137 138 /* 139 * Walk the VA space for a process, invoking 'func' on each present range 140 * that should be included in a coredump. 141 */ 142 int 143 uvm_coredump_walkmap(struct proc *p, void *iocookie, 144 int (*func)(struct proc *, void *, struct uvm_coredump_state *), 145 void *cookie) 146 { 147 struct uvm_coredump_state state; 148 struct vmspace *vm = p->p_vmspace; 149 struct vm_map *map = &vm->vm_map; 150 struct vm_map_entry *entry; 151 vaddr_t top; 152 int error; 153 154 RB_FOREACH(entry, uvm_map_addr, &map->addr) { 155 state.cookie = cookie; 156 state.prot = entry->protection; 157 state.flags = 0; 158 159 /* should never happen for a user process */ 160 if (UVM_ET_ISSUBMAP(entry)) { 161 panic("%s: user process with submap?", __func__); 162 } 163 164 if (!(entry->protection & PROT_WRITE) && 165 entry->start != p->p_p->ps_sigcode) 166 continue; 167 168 /* Don't dump mmaped devices. */ 169 if (entry->object.uvm_obj != NULL && 170 UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) 171 continue; 172 173 state.start = entry->start; 174 state.realend = entry->end; 175 state.end = entry->end; 176 177 if (state.start >= VM_MAXUSER_ADDRESS) 178 continue; 179 180 if (state.end > VM_MAXUSER_ADDRESS) 181 state.end = VM_MAXUSER_ADDRESS; 182 183 #ifdef MACHINE_STACK_GROWS_UP 184 if ((vaddr_t)vm->vm_maxsaddr <= state.start && 185 state.start < ((vaddr_t)vm->vm_maxsaddr + MAXSSIZ)) { 186 top = round_page((vaddr_t)vm->vm_maxsaddr + 187 ptoa(vm->vm_ssize)); 188 if (state.end > top) 189 state.end = top; 190 191 if (state.start >= state.end) 192 continue; 193 #else 194 if (state.start >= (vaddr_t)vm->vm_maxsaddr) { 195 top = trunc_page((vaddr_t)vm->vm_minsaddr - 196 ptoa(vm->vm_ssize)); 197 if (state.start < top) 198 state.start = top; 199 200 if (state.start >= state.end) 201 continue; 202 #endif 203 state.flags |= UVM_COREDUMP_STACK; 204 } 205 206 error = (*func)(p, iocookie, &state); 207 if (error) 208 return (error); 209 } 210 211 return (0); 212 } 213 214 #endif /* !SMALL_KERNEL */ 215