1 /* $NetBSD: uvm_mremap.c,v 1.19 2017/05/06 21:34:52 joerg Exp $ */ 2 3 /*- 4 * Copyright (c)2006,2007,2009 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: uvm_mremap.c,v 1.19 2017/05/06 21:34:52 joerg Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/mman.h> 34 #include <sys/sched.h> 35 #include <sys/syscallargs.h> 36 #include <sys/proc.h> 37 #include <sys/atomic.h> 38 39 #include <uvm/uvm.h> 40 41 static int 42 uvm_mapent_extend(struct vm_map *map, vaddr_t endva, vsize_t size) 43 { 44 struct vm_map_entry *entry; 45 struct vm_map_entry *reserved_entry; 46 struct uvm_object *uobj; 47 int error = 0; 48 49 vm_map_lock(map); 50 if (!uvm_map_lookup_entry(map, endva, &reserved_entry)) { 51 error = ENOENT; 52 goto done; 53 } 54 if (reserved_entry->start != endva || 55 reserved_entry->end != endva + size || 56 reserved_entry->object.uvm_obj != NULL || 57 reserved_entry->aref.ar_amap != NULL || 58 reserved_entry->protection != VM_PROT_NONE) { 59 error = EINVAL; 60 goto done; 61 } 62 entry = reserved_entry->prev; 63 if (&map->header == entry || entry->end != endva) { 64 error = EINVAL; 65 goto done; 66 } 67 68 /* 69 * now, make reserved_entry compatible with entry, and then 70 * try to merge. 71 */ 72 73 uobj = entry->object.uvm_obj; 74 if (uobj) { 75 voff_t offset = entry->offset; 76 voff_t newoffset; 77 78 newoffset = offset + entry->end - entry->start; 79 if (newoffset <= offset) { 80 error = E2BIG; /* XXX */ 81 goto done; 82 } 83 mutex_enter(uobj->vmobjlock); 84 KASSERT(uobj->uo_refs > 0); 85 atomic_inc_uint(&uobj->uo_refs); 86 mutex_exit(uobj->vmobjlock); 87 reserved_entry->object.uvm_obj = uobj; 88 reserved_entry->offset = newoffset; 89 } 90 reserved_entry->etype = entry->etype; 91 if (UVM_ET_ISCOPYONWRITE(entry)) { 92 reserved_entry->etype |= UVM_ET_NEEDSCOPY; 93 } 94 reserved_entry->flags &= ~UVM_MAP_NOMERGE; 95 reserved_entry->protection = entry->protection; 96 reserved_entry->max_protection = entry->max_protection; 97 reserved_entry->inheritance = entry->inheritance; 98 reserved_entry->advice = entry->advice; 99 reserved_entry->wired_count = 0; /* XXX should inherit? */ 100 uvm_mapent_trymerge(map, reserved_entry, 0); 101 done: 102 vm_map_unlock(map); 103 104 return error; 105 } 106 107 /* 108 * uvm_mremap: move and/or resize existing mappings. 109 */ 110 111 int 112 uvm_mremap(struct vm_map *oldmap, vaddr_t oldva, vsize_t oldsize, 113 struct vm_map *newmap, vaddr_t *newvap, vsize_t newsize, 114 struct proc *newproc, int flags) 115 { 116 vaddr_t dstva; 117 vsize_t movesize; 118 vaddr_t newva; 119 int alignshift; 120 vaddr_t align = 0; 121 int error = 0; 122 const bool fixed = (flags & MAP_FIXED) != 0; 123 const bool duplicate = (flags & MAP_REMAPDUP) != 0; 124 125 if (fixed) { 126 newva = *newvap; 127 } else { 128 newva = 0; 129 } 130 if ((oldva & PAGE_MASK) != 0 || 131 (newva & PAGE_MASK) != 0 || 132 (oldsize & PAGE_MASK) != 0 || 133 (newsize & PAGE_MASK) != 0) { 134 return EINVAL; 135 } 136 /* XXX zero-size should be allowed? */ 137 if (oldva + oldsize <= oldva || newva + newsize <= newva) { 138 return EINVAL; 139 } 140 141 /* 142 * Try to see if any requested alignment can even be attempted. 143 * Make sure we can express the alignment (asking for a >= 4GB 144 * alignment on an ILP32 architecure make no sense) and the 145 * alignment is at least for a page sized quanitiy. If the 146 * request was for a fixed mapping, make sure supplied address 147 * adheres to the request alignment. 148 */ 149 alignshift = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT; 150 if (alignshift != 0) { 151 if (alignshift >= sizeof(vaddr_t) * NBBY) 152 return EINVAL; 153 align = 1L << alignshift; 154 if (align < PAGE_SIZE) 155 return EINVAL; 156 if (align >= vm_map_max(oldmap)) 157 return ENOMEM; 158 if ((flags & MAP_FIXED) != 0) { 159 if ((*newvap & (align - 1)) != 0) 160 return EINVAL; 161 align = 0; 162 } 163 } 164 165 /* 166 * check the easy cases first. 167 */ 168 169 if (!duplicate && 170 (!fixed || newva == oldva) && newmap == oldmap && 171 (align == 0 || (oldva & (align - 1)) == 0)) { 172 vaddr_t va; 173 174 if (newsize == oldsize) { 175 newva = oldva; 176 goto done; 177 } 178 if (newsize < oldsize) { 179 uvm_unmap(oldmap, oldva + newsize, oldva + oldsize); 180 newva = oldva; 181 goto done; 182 } 183 va = oldva + oldsize; 184 if (uvm_map_reserve(oldmap, newsize - oldsize, 0, 0, &va, 185 UVM_FLAG_FIXED)) { 186 newva = oldva; 187 goto extend; 188 } 189 if (fixed) { 190 return ENOMEM; 191 } 192 } 193 194 /* 195 * we need to move mappings. 196 */ 197 198 if (!fixed) { 199 KASSERT(&newproc->p_vmspace->vm_map == newmap); 200 newva = newproc->p_emul->e_vm_default_addr(newproc, 201 (vaddr_t)newproc->p_vmspace->vm_daddr, newsize, 202 newproc->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN); 203 } 204 dstva = newva; 205 if (!uvm_map_reserve(newmap, newsize, oldva, align, &dstva, 206 fixed ? UVM_FLAG_FIXED : 0)) { 207 return ENOMEM; 208 } 209 KASSERT(!fixed || dstva == newva); 210 newva = dstva; 211 movesize = MIN(oldsize, newsize); 212 error = uvm_map_extract(oldmap, oldva, movesize, newmap, &dstva, 213 UVM_EXTRACT_RESERVED); 214 KASSERT(dstva == newva); 215 if (error != 0) { 216 /* 217 * undo uvm_map_reserve. 218 */ 219 uvm_unmap(newmap, newva, newva + newsize); 220 return error; 221 } 222 if (newsize > oldsize) { 223 extend: 224 error = uvm_mapent_extend(newmap, newva + oldsize, 225 newsize - oldsize); 226 if (error != 0) { 227 /* 228 * undo uvm_map_reserve and uvm_map_extract. 229 */ 230 if (newva == oldva && newmap == oldmap) { 231 uvm_unmap(newmap, newva + oldsize, 232 newva + newsize); 233 } else { 234 uvm_unmap(newmap, newva, newva + newsize); 235 } 236 return error; 237 } 238 } 239 240 /* 241 * now we won't fail. 242 * remove original entries unless we did in-place extend. 243 */ 244 245 if (!duplicate && (oldva != newva || oldmap != newmap)) { 246 uvm_unmap(oldmap, oldva, oldva + oldsize); 247 } 248 done: 249 *newvap = newva; 250 return 0; 251 } 252 253 /* 254 * sys_mremap: mremap system call. 255 */ 256 257 int 258 sys_mremap(struct lwp *l, const struct sys_mremap_args *uap, register_t *retval) 259 { 260 /* { 261 syscallarg(void *) old_address; 262 syscallarg(size_t) old_size; 263 syscallarg(void *) new_address; 264 syscallarg(size_t) new_size; 265 syscallarg(int) flags; 266 } */ 267 268 struct proc *p; 269 struct vm_map *map; 270 vaddr_t oldva; 271 vaddr_t newva; 272 size_t oldsize; 273 size_t newsize; 274 int flags; 275 int error; 276 277 flags = SCARG(uap, flags); 278 oldva = (vaddr_t)SCARG(uap, old_address); 279 oldsize = (vsize_t)(SCARG(uap, old_size)); 280 newva = (vaddr_t)SCARG(uap, new_address); 281 newsize = (vsize_t)(SCARG(uap, new_size)); 282 283 if ((flags & ~(MAP_FIXED | MAP_REMAPDUP | MAP_ALIGNMENT_MASK)) != 0) { 284 error = EINVAL; 285 goto done; 286 } 287 288 oldsize = round_page(oldsize); 289 newsize = round_page(newsize); 290 291 p = l->l_proc; 292 map = &p->p_vmspace->vm_map; 293 error = uvm_mremap(map, oldva, oldsize, map, &newva, newsize, p, flags); 294 295 done: 296 *retval = (error != 0) ? 0 : (register_t)newva; 297 return error; 298 } 299