1 /* $NetBSD: uvm_mremap.c,v 1.12 2008/06/17 16:17:21 tsutsui Exp $ */ 2 3 /*- 4 * Copyright (c)2006 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: uvm_mremap.c,v 1.12 2008/06/17 16:17:21 tsutsui Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/mman.h> 34 #include <sys/sched.h> 35 #include <sys/syscallargs.h> 36 #include <sys/proc.h> 37 #include <sys/atomic.h> 38 39 #include <uvm/uvm.h> 40 41 static int 42 uvm_mapent_extend(struct vm_map *map, vaddr_t endva, vsize_t size) 43 { 44 struct vm_map_entry *entry; 45 struct vm_map_entry *reserved_entry; 46 struct uvm_object *uobj; 47 int error = 0; 48 49 vm_map_lock(map); 50 if (!uvm_map_lookup_entry(map, endva, &reserved_entry)) { 51 error = ENOENT; 52 goto done; 53 } 54 if (reserved_entry->start != endva || 55 reserved_entry->end != endva + size || 56 reserved_entry->object.uvm_obj != NULL || 57 reserved_entry->aref.ar_amap != NULL) { 58 error = EINVAL; 59 goto done; 60 } 61 entry = reserved_entry->prev; 62 KASSERT(&map->header != entry); 63 if (entry->end != endva) { 64 error = EINVAL; 65 goto done; 66 } 67 68 /* 69 * now, make reserved_entry compatible with entry, and then 70 * try to merge. 71 */ 72 73 uobj = entry->object.uvm_obj; 74 if (uobj) { 75 voff_t offset = entry->offset; 76 voff_t newoffset; 77 78 newoffset = offset + entry->end - entry->start; 79 if (newoffset <= offset) { 80 error = E2BIG; /* XXX */ 81 goto done; 82 } 83 mutex_enter(&uobj->vmobjlock); 84 KASSERT(uobj->uo_refs > 0); 85 atomic_inc_uint(&uobj->uo_refs); 86 mutex_exit(&uobj->vmobjlock); 87 reserved_entry->object.uvm_obj = uobj; 88 reserved_entry->offset = newoffset; 89 } 90 reserved_entry->etype = entry->etype; 91 if (UVM_ET_ISCOPYONWRITE(entry)) { 92 reserved_entry->etype |= UVM_ET_NEEDSCOPY; 93 } 94 reserved_entry->flags &= ~UVM_MAP_NOMERGE; 95 reserved_entry->protection = entry->protection; 96 reserved_entry->max_protection = entry->max_protection; 97 reserved_entry->inheritance = entry->inheritance; 98 reserved_entry->advice = entry->advice; 99 reserved_entry->wired_count = 0; /* XXX should inherit? */ 100 uvm_mapent_trymerge(map, reserved_entry, 0); 101 done: 102 vm_map_unlock(map); 103 104 return error; 105 } 106 107 /* 108 * uvm_mremap: move and/or resize existing mappings. 109 */ 110 111 int 112 uvm_mremap(struct vm_map *oldmap, vaddr_t oldva, vsize_t oldsize, 113 struct vm_map *newmap, vaddr_t *newvap, vsize_t newsize, 114 struct proc *newproc, int flags) 115 { 116 vaddr_t dstva; 117 vsize_t movesize; 118 vaddr_t newva; 119 vaddr_t align = 0; 120 int error = 0; 121 const bool fixed = (flags & MAP_FIXED) != 0; 122 123 if (fixed) { 124 newva = *newvap; 125 } else { 126 newva = 0; 127 } 128 if ((oldva & PAGE_MASK) != 0 || 129 (newva & PAGE_MASK) != 0 || 130 (oldsize & PAGE_MASK) != 0 || 131 (newsize & PAGE_MASK) != 0) { 132 return EINVAL; 133 } 134 /* XXX zero-size should be allowed? */ 135 if (oldva + oldsize <= oldva || newva + newsize <= newva) { 136 return EINVAL; 137 } 138 139 /* 140 * Try to see if any requested alignment can even be attemped. 141 * Make sure we can express the alignment (asking for a >= 4GB 142 * alignment on an ILP32 architecure make no sense) and the 143 * alignment is at least for a page sized quanitiy. If the 144 * request was for a fixed mapping, make sure supplied address 145 * adheres to the request alignment. 146 */ 147 align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT; 148 if (align) { 149 if (align >= sizeof(vaddr_t) * NBBY) 150 return(EINVAL); 151 align = 1L << align; 152 if (align < PAGE_SIZE) 153 return(EINVAL); 154 if (align >= vm_map_max(oldmap)) 155 return(ENOMEM); 156 if (flags & MAP_FIXED) { 157 if ((*newvap & (align-1)) != 0) 158 return(EINVAL); 159 align = 0; 160 } 161 } 162 163 /* 164 * check the easy cases first. 165 */ 166 167 if ((!fixed || newva == oldva) && newmap == oldmap && 168 (align == 0 || (oldva & (align - 1)) == 0)) { 169 vaddr_t va; 170 171 if (newsize == oldsize) { 172 newva = oldva; 173 goto done; 174 } 175 if (newsize < oldsize) { 176 uvm_unmap(oldmap, oldva + newsize, oldva + oldsize); 177 newva = oldva; 178 goto done; 179 } 180 va = oldva + oldsize; 181 if (uvm_map_reserve(oldmap, newsize - oldsize, 0, 0, &va, 182 UVM_FLAG_FIXED)) { 183 newva = oldva; 184 goto extend; 185 } 186 if (fixed) { 187 return ENOMEM; 188 } 189 } 190 191 /* 192 * we need to move mappings. 193 */ 194 195 if (!fixed) { 196 KASSERT(&newproc->p_vmspace->vm_map == newmap); 197 newva = newproc->p_emul->e_vm_default_addr(newproc, 198 (vaddr_t)newproc->p_vmspace->vm_daddr, newsize); 199 } 200 dstva = newva; 201 if (!uvm_map_reserve(newmap, newsize, oldva, align, &dstva, 202 fixed ? UVM_FLAG_FIXED : 0)) { 203 return ENOMEM; 204 } 205 KASSERT(!fixed || dstva == newva); 206 newva = dstva; 207 movesize = MIN(oldsize, newsize); 208 error = uvm_map_extract(oldmap, oldva, movesize, newmap, &dstva, 209 UVM_EXTRACT_RESERVED); 210 KASSERT(dstva == newva); 211 if (error != 0) { 212 /* undo uvm_map_reserve */ 213 uvm_unmap(newmap, newva, newva + newsize); 214 return error; 215 } 216 if (newsize > oldsize) { 217 extend: 218 error = uvm_mapent_extend(newmap, newva + oldsize, 219 newsize - oldsize); 220 if (error != 0) { 221 /* undo uvm_map_reserve and uvm_map_extract */ 222 uvm_unmap(newmap, newva, newva + newsize); 223 return error; 224 } 225 } 226 227 /* 228 * now we won't fail. remove original entries. 229 */ 230 231 if (oldva != newva || oldmap != newmap) { 232 uvm_unmap(oldmap, oldva, oldva + oldsize); 233 } 234 done: 235 *newvap = newva; 236 return 0; 237 } 238 239 /* 240 * sys_mremap: mremap system call. 241 */ 242 243 int 244 sys_mremap(struct lwp *l, const struct sys_mremap_args *uap, register_t *retval) 245 { 246 /* { 247 syscallarg(void *) old_address; 248 syscallarg(size_t) old_size; 249 syscallarg(void *) new_address; 250 syscallarg(size_t) new_size; 251 syscallarg(int) flags; 252 } */ 253 254 struct proc *p; 255 struct vm_map *map; 256 vaddr_t oldva; 257 vaddr_t newva; 258 size_t oldsize; 259 size_t newsize; 260 int flags; 261 int error; 262 263 flags = SCARG(uap, flags); 264 oldva = (vaddr_t)SCARG(uap, old_address); 265 oldsize = (vsize_t)(SCARG(uap, old_size)); 266 newva = (vaddr_t)SCARG(uap, new_address); 267 newsize = (vsize_t)(SCARG(uap, new_size)); 268 269 if ((flags & ~(MAP_FIXED | MAP_ALIGNMENT_MASK)) != 0) { 270 error = EINVAL; 271 goto done; 272 } 273 274 oldsize = round_page(oldsize); 275 newsize = round_page(newsize); 276 277 p = l->l_proc; 278 map = &p->p_vmspace->vm_map; 279 error = uvm_mremap(map, oldva, oldsize, map, &newva, newsize, p, 280 flags); 281 282 done: 283 *retval = (error != 0) ? 0 : (register_t)newva; 284 return error; 285 286 } 287