xref: /netbsd-src/sys/uvm/uvm_mmap.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: uvm_mmap.c,v 1.126 2008/06/03 21:48:27 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993 The Regents of the University of California.
6  * Copyright (c) 1988 University of Utah.
7  *
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *      This product includes software developed by the Charles D. Cranor,
25  *	Washington University, University of California, Berkeley and
26  *	its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
44  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
45  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
46  */
47 
48 /*
49  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
50  * function.
51  */
52 
53 #include <sys/cdefs.h>
54 __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.126 2008/06/03 21:48:27 ad Exp $");
55 
56 #include "opt_compat_netbsd.h"
57 #include "opt_pax.h"
58 #include "veriexec.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/file.h>
63 #include <sys/filedesc.h>
64 #include <sys/resourcevar.h>
65 #include <sys/mman.h>
66 #include <sys/mount.h>
67 #include <sys/proc.h>
68 #include <sys/malloc.h>
69 #include <sys/vnode.h>
70 #include <sys/conf.h>
71 #include <sys/stat.h>
72 
73 #if NVERIEXEC > 0
74 #include <sys/verified_exec.h>
75 #endif /* NVERIEXEC > 0 */
76 
77 #ifdef PAX_MPROTECT
78 #include <sys/pax.h>
79 #endif /* PAX_MPROTECT */
80 
81 #include <miscfs/specfs/specdev.h>
82 
83 #include <sys/syscallargs.h>
84 
85 #include <uvm/uvm.h>
86 #include <uvm/uvm_device.h>
87 
88 #ifndef COMPAT_ZERODEV
89 #define COMPAT_ZERODEV(dev)	(0)
90 #endif
91 
92 static int
93 range_test(vaddr_t addr, vsize_t size, bool ismmap)
94 {
95 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
96 	vaddr_t vm_max_address = VM_MAXUSER_ADDRESS;
97 	vaddr_t eaddr = addr + size;
98 
99 	if (addr < vm_min_address)
100 		return EINVAL;
101 	if (eaddr > vm_max_address)
102 		return ismmap ? EFBIG : EINVAL;
103 	if (addr > eaddr) /* no wrapping! */
104 		return ismmap ? EOVERFLOW : EINVAL;
105 	return 0;
106 }
107 
108 /*
109  * unimplemented VM system calls:
110  */
111 
112 /*
113  * sys_sbrk: sbrk system call.
114  */
115 
116 /* ARGSUSED */
117 int
118 sys_sbrk(struct lwp *l, const struct sys_sbrk_args *uap, register_t *retval)
119 {
120 	/* {
121 		syscallarg(intptr_t) incr;
122 	} */
123 
124 	return (ENOSYS);
125 }
126 
127 /*
128  * sys_sstk: sstk system call.
129  */
130 
131 /* ARGSUSED */
132 int
133 sys_sstk(struct lwp *l, const struct sys_sstk_args *uap, register_t *retval)
134 {
135 	/* {
136 		syscallarg(int) incr;
137 	} */
138 
139 	return (ENOSYS);
140 }
141 
142 /*
143  * sys_mincore: determine if pages are in core or not.
144  */
145 
146 /* ARGSUSED */
147 int
148 sys_mincore(struct lwp *l, const struct sys_mincore_args *uap, register_t *retval)
149 {
150 	/* {
151 		syscallarg(void *) addr;
152 		syscallarg(size_t) len;
153 		syscallarg(char *) vec;
154 	} */
155 	struct proc *p = l->l_proc;
156 	struct vm_page *pg;
157 	char *vec, pgi;
158 	struct uvm_object *uobj;
159 	struct vm_amap *amap;
160 	struct vm_anon *anon;
161 	struct vm_map_entry *entry;
162 	vaddr_t start, end, lim;
163 	struct vm_map *map;
164 	vsize_t len;
165 	int error = 0, npgs;
166 
167 	map = &p->p_vmspace->vm_map;
168 
169 	start = (vaddr_t)SCARG(uap, addr);
170 	len = SCARG(uap, len);
171 	vec = SCARG(uap, vec);
172 
173 	if (start & PAGE_MASK)
174 		return (EINVAL);
175 	len = round_page(len);
176 	end = start + len;
177 	if (end <= start)
178 		return (EINVAL);
179 
180 	/*
181 	 * Lock down vec, so our returned status isn't outdated by
182 	 * storing the status byte for a page.
183 	 */
184 
185 	npgs = len >> PAGE_SHIFT;
186 	error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE);
187 	if (error) {
188 		return error;
189 	}
190 	vm_map_lock_read(map);
191 
192 	if (uvm_map_lookup_entry(map, start, &entry) == false) {
193 		error = ENOMEM;
194 		goto out;
195 	}
196 
197 	for (/* nothing */;
198 	     entry != &map->header && entry->start < end;
199 	     entry = entry->next) {
200 		KASSERT(!UVM_ET_ISSUBMAP(entry));
201 		KASSERT(start >= entry->start);
202 
203 		/* Make sure there are no holes. */
204 		if (entry->end < end &&
205 		     (entry->next == &map->header ||
206 		      entry->next->start > entry->end)) {
207 			error = ENOMEM;
208 			goto out;
209 		}
210 
211 		lim = end < entry->end ? end : entry->end;
212 
213 		/*
214 		 * Special case for objects with no "real" pages.  Those
215 		 * are always considered resident (mapped devices).
216 		 */
217 
218 		if (UVM_ET_ISOBJ(entry)) {
219 			KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
220 			if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
221 				for (/* nothing */; start < lim;
222 				     start += PAGE_SIZE, vec++)
223 					subyte(vec, 1);
224 				continue;
225 			}
226 		}
227 
228 		amap = entry->aref.ar_amap;	/* top layer */
229 		uobj = entry->object.uvm_obj;	/* bottom layer */
230 
231 		if (amap != NULL)
232 			amap_lock(amap);
233 		if (uobj != NULL)
234 			mutex_enter(&uobj->vmobjlock);
235 
236 		for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
237 			pgi = 0;
238 			if (amap != NULL) {
239 				/* Check the top layer first. */
240 				anon = amap_lookup(&entry->aref,
241 				    start - entry->start);
242 				/* Don't need to lock anon here. */
243 				if (anon != NULL && anon->an_page != NULL) {
244 
245 					/*
246 					 * Anon has the page for this entry
247 					 * offset.
248 					 */
249 
250 					pgi = 1;
251 				}
252 			}
253 			if (uobj != NULL && pgi == 0) {
254 				/* Check the bottom layer. */
255 				pg = uvm_pagelookup(uobj,
256 				    entry->offset + (start - entry->start));
257 				if (pg != NULL) {
258 
259 					/*
260 					 * Object has the page for this entry
261 					 * offset.
262 					 */
263 
264 					pgi = 1;
265 				}
266 			}
267 			(void) subyte(vec, pgi);
268 		}
269 		if (uobj != NULL)
270 			mutex_exit(&uobj->vmobjlock);
271 		if (amap != NULL)
272 			amap_unlock(amap);
273 	}
274 
275  out:
276 	vm_map_unlock_read(map);
277 	uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
278 	return (error);
279 }
280 
281 /*
282  * sys_mmap: mmap system call.
283  *
284  * => file offset and address may not be page aligned
285  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
286  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
287  *      and the return value is adjusted up by the page offset.
288  */
289 
290 int
291 sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
292 {
293 	/* {
294 		syscallarg(void *) addr;
295 		syscallarg(size_t) len;
296 		syscallarg(int) prot;
297 		syscallarg(int) flags;
298 		syscallarg(int) fd;
299 		syscallarg(long) pad;
300 		syscallarg(off_t) pos;
301 	} */
302 	struct proc *p = l->l_proc;
303 	vaddr_t addr;
304 	struct vattr va;
305 	off_t pos;
306 	vsize_t size, pageoff;
307 	vm_prot_t prot, maxprot;
308 	int flags, fd;
309 	vaddr_t defaddr;
310 	struct file *fp = NULL;
311 	struct vnode *vp;
312 	void *handle;
313 	int error;
314 #ifdef PAX_ASLR
315 	vaddr_t orig_addr;
316 #endif /* PAX_ASLR */
317 
318 	/*
319 	 * first, extract syscall args from the uap.
320 	 */
321 
322 	addr = (vaddr_t)SCARG(uap, addr);
323 	size = (vsize_t)SCARG(uap, len);
324 	prot = SCARG(uap, prot) & VM_PROT_ALL;
325 	flags = SCARG(uap, flags);
326 	fd = SCARG(uap, fd);
327 	pos = SCARG(uap, pos);
328 
329 #ifdef PAX_ASLR
330 	orig_addr = addr;
331 #endif /* PAX_ASLR */
332 
333 	/*
334 	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
335 	 * validate the flags.
336 	 */
337 	if (flags & MAP_COPY)
338 		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
339 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
340 		return (EINVAL);
341 
342 	/*
343 	 * align file position and save offset.  adjust size.
344 	 */
345 
346 	pageoff = (pos & PAGE_MASK);
347 	pos  -= pageoff;
348 	size += pageoff;			/* add offset */
349 	size = (vsize_t)round_page(size);	/* round up */
350 
351 	/*
352 	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
353 	 */
354 	if (flags & MAP_FIXED) {
355 
356 		/* ensure address and file offset are aligned properly */
357 		addr -= pageoff;
358 		if (addr & PAGE_MASK)
359 			return (EINVAL);
360 
361 		error = range_test(addr, size, true);
362 		if (error)
363 			return error;
364 	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
365 
366 		/*
367 		 * not fixed: make sure we skip over the largest
368 		 * possible heap for non-topdown mapping arrangements.
369 		 * we will refine our guess later (e.g. to account for
370 		 * VAC, etc)
371 		 */
372 
373 		defaddr = p->p_emul->e_vm_default_addr(p,
374 		    (vaddr_t)p->p_vmspace->vm_daddr, size);
375 
376 		if (addr == 0 ||
377 		    !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
378 			addr = MAX(addr, defaddr);
379 		else
380 			addr = MIN(addr, defaddr);
381 	}
382 
383 	/*
384 	 * check for file mappings (i.e. not anonymous) and verify file.
385 	 */
386 
387 	if ((flags & MAP_ANON) == 0) {
388 		if ((fp = fd_getfile(fd)) == NULL)
389 			return (EBADF);
390 		if (fp->f_type != DTYPE_VNODE) {
391 			fd_putfile(fd);
392 			return (ENODEV);		/* only mmap vnodes! */
393 		}
394 		vp = fp->f_data;		/* convert to vnode */
395 		if (vp->v_type != VREG && vp->v_type != VCHR &&
396 		    vp->v_type != VBLK) {
397 			fd_putfile(fd);
398 			return (ENODEV);  /* only REG/CHR/BLK support mmap */
399 		}
400 		if (vp->v_type != VCHR && pos < 0) {
401 			fd_putfile(fd);
402 			return (EINVAL);
403 		}
404 		if (vp->v_type != VCHR && (pos + size) < pos) {
405 			fd_putfile(fd);
406 			return (EOVERFLOW);		/* no offset wrapping */
407 		}
408 
409 		/* special case: catch SunOS style /dev/zero */
410 		if (vp->v_type == VCHR
411 		    && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
412 			flags |= MAP_ANON;
413 			fd_putfile(fd);
414 			fp = NULL;
415 			goto is_anon;
416 		}
417 
418 		/*
419 		 * Old programs may not select a specific sharing type, so
420 		 * default to an appropriate one.
421 		 *
422 		 * XXX: how does MAP_ANON fit in the picture?
423 		 */
424 		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
425 #if defined(DEBUG)
426 			printf("WARNING: defaulted mmap() share type to "
427 			   "%s (pid %d command %s)\n", vp->v_type == VCHR ?
428 			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
429 			    p->p_comm);
430 #endif
431 			if (vp->v_type == VCHR)
432 				flags |= MAP_SHARED;	/* for a device */
433 			else
434 				flags |= MAP_PRIVATE;	/* for a file */
435 		}
436 
437 		/*
438 		 * MAP_PRIVATE device mappings don't make sense (and aren't
439 		 * supported anyway).  However, some programs rely on this,
440 		 * so just change it to MAP_SHARED.
441 		 */
442 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
443 			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
444 		}
445 
446 		/*
447 		 * now check protection
448 		 */
449 
450 		maxprot = VM_PROT_EXECUTE;
451 
452 		/* check read access */
453 		if (fp->f_flag & FREAD)
454 			maxprot |= VM_PROT_READ;
455 		else if (prot & PROT_READ) {
456 			fd_putfile(fd);
457 			return (EACCES);
458 		}
459 
460 		/* check write access, shared case first */
461 		if (flags & MAP_SHARED) {
462 			/*
463 			 * if the file is writable, only add PROT_WRITE to
464 			 * maxprot if the file is not immutable, append-only.
465 			 * otherwise, if we have asked for PROT_WRITE, return
466 			 * EPERM.
467 			 */
468 			if (fp->f_flag & FWRITE) {
469 				if ((error =
470 				    VOP_GETATTR(vp, &va, l->l_cred))) {
471 					fd_putfile(fd);
472 					return (error);
473 				}
474 				if ((va.va_flags &
475 				    (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
476 					maxprot |= VM_PROT_WRITE;
477 				else if (prot & PROT_WRITE) {
478 					fd_putfile(fd);
479 					return (EPERM);
480 				}
481 			}
482 			else if (prot & PROT_WRITE) {
483 				fd_putfile(fd);
484 				return (EACCES);
485 			}
486 		} else {
487 			/* MAP_PRIVATE mappings can always write to */
488 			maxprot |= VM_PROT_WRITE;
489 		}
490 		handle = vp;
491 
492 	} else {		/* MAP_ANON case */
493 		/*
494 		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
495 		 */
496 		if (fd != -1)
497 			return (EINVAL);
498 
499  is_anon:		/* label for SunOS style /dev/zero */
500 		handle = NULL;
501 		maxprot = VM_PROT_ALL;
502 		pos = 0;
503 	}
504 
505 	/*
506 	 * XXX (in)sanity check.  We don't do proper datasize checking
507 	 * XXX for anonymous (or private writable) mmap().  However,
508 	 * XXX know that if we're trying to allocate more than the amount
509 	 * XXX remaining under our current data size limit, _that_ should
510 	 * XXX be disallowed.
511 	 */
512 	if ((flags & MAP_ANON) != 0 ||
513 	    ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
514 		if (size >
515 		    (p->p_rlimit[RLIMIT_DATA].rlim_cur -
516 		     ctob(p->p_vmspace->vm_dsize))) {
517 		     	if (fp != NULL)
518 				fd_putfile(fd);
519 			return (ENOMEM);
520 		}
521 	}
522 
523 #if NVERIEXEC > 0
524 	if (handle != NULL) {
525 		/*
526 		 * Check if the file can be executed indirectly.
527 		 *
528 		 * XXX: This gives false warnings about "Incorrect access type"
529 		 * XXX: if the mapping is not executable. Harmless, but will be
530 		 * XXX: fixed as part of other changes.
531 		 */
532 		if (veriexec_verify(l, handle, "(mmap)", VERIEXEC_INDIRECT,
533 		    NULL)) {
534 			/*
535 			 * Don't allow executable mappings if we can't
536 			 * indirectly execute the file.
537 			 */
538 			if (prot & VM_PROT_EXECUTE) {
539 			     	if (fp != NULL)
540 					fd_putfile(fd);
541 				return (EPERM);
542 			}
543 
544 			/*
545 			 * Strip the executable bit from 'maxprot' to make sure
546 			 * it can't be made executable later.
547 			 */
548 			maxprot &= ~VM_PROT_EXECUTE;
549 		}
550 	}
551 #endif /* NVERIEXEC > 0 */
552 
553 #ifdef PAX_MPROTECT
554 	pax_mprotect(l, &prot, &maxprot);
555 #endif /* PAX_MPROTECT */
556 
557 #ifdef PAX_ASLR
558 	pax_aslr(l, &addr, orig_addr, flags);
559 #endif /* PAX_ASLR */
560 
561 	/*
562 	 * now let kernel internal function uvm_mmap do the work.
563 	 */
564 
565 	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
566 	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
567 
568 	if (error == 0)
569 		/* remember to add offset */
570 		*retval = (register_t)(addr + pageoff);
571 
572      	if (fp != NULL)
573 		fd_putfile(fd);
574 
575 	return (error);
576 }
577 
578 /*
579  * sys___msync13: the msync system call (a front-end for flush)
580  */
581 
582 int
583 sys___msync13(struct lwp *l, const struct sys___msync13_args *uap, register_t *retval)
584 {
585 	/* {
586 		syscallarg(void *) addr;
587 		syscallarg(size_t) len;
588 		syscallarg(int) flags;
589 	} */
590 	struct proc *p = l->l_proc;
591 	vaddr_t addr;
592 	vsize_t size, pageoff;
593 	struct vm_map *map;
594 	int error, rv, flags, uvmflags;
595 
596 	/*
597 	 * extract syscall args from the uap
598 	 */
599 
600 	addr = (vaddr_t)SCARG(uap, addr);
601 	size = (vsize_t)SCARG(uap, len);
602 	flags = SCARG(uap, flags);
603 
604 	/* sanity check flags */
605 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
606 	    (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
607 	    (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
608 		return (EINVAL);
609 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
610 		flags |= MS_SYNC;
611 
612 	/*
613 	 * align the address to a page boundary and adjust the size accordingly.
614 	 */
615 
616 	pageoff = (addr & PAGE_MASK);
617 	addr -= pageoff;
618 	size += pageoff;
619 	size = (vsize_t)round_page(size);
620 
621 	error = range_test(addr, size, false);
622 	if (error)
623 		return error;
624 
625 	/*
626 	 * get map
627 	 */
628 
629 	map = &p->p_vmspace->vm_map;
630 
631 	/*
632 	 * XXXCDC: do we really need this semantic?
633 	 *
634 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
635 	 * pages with the region containing addr".  Unfortunately, we
636 	 * don't really keep track of individual mmaps so we approximate
637 	 * by flushing the range of the map entry containing addr.
638 	 * This can be incorrect if the region splits or is coalesced
639 	 * with a neighbor.
640 	 */
641 
642 	if (size == 0) {
643 		struct vm_map_entry *entry;
644 
645 		vm_map_lock_read(map);
646 		rv = uvm_map_lookup_entry(map, addr, &entry);
647 		if (rv == true) {
648 			addr = entry->start;
649 			size = entry->end - entry->start;
650 		}
651 		vm_map_unlock_read(map);
652 		if (rv == false)
653 			return (EINVAL);
654 	}
655 
656 	/*
657 	 * translate MS_ flags into PGO_ flags
658 	 */
659 
660 	uvmflags = PGO_CLEANIT;
661 	if (flags & MS_INVALIDATE)
662 		uvmflags |= PGO_FREE;
663 	if (flags & MS_SYNC)
664 		uvmflags |= PGO_SYNCIO;
665 
666 	error = uvm_map_clean(map, addr, addr+size, uvmflags);
667 	return error;
668 }
669 
670 /*
671  * sys_munmap: unmap a users memory
672  */
673 
674 int
675 sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
676 {
677 	/* {
678 		syscallarg(void *) addr;
679 		syscallarg(size_t) len;
680 	} */
681 	struct proc *p = l->l_proc;
682 	vaddr_t addr;
683 	vsize_t size, pageoff;
684 	struct vm_map *map;
685 	struct vm_map_entry *dead_entries;
686 	int error;
687 
688 	/*
689 	 * get syscall args.
690 	 */
691 
692 	addr = (vaddr_t)SCARG(uap, addr);
693 	size = (vsize_t)SCARG(uap, len);
694 
695 	/*
696 	 * align the address to a page boundary and adjust the size accordingly.
697 	 */
698 
699 	pageoff = (addr & PAGE_MASK);
700 	addr -= pageoff;
701 	size += pageoff;
702 	size = (vsize_t)round_page(size);
703 
704 	if (size == 0)
705 		return (0);
706 
707 	error = range_test(addr, size, false);
708 	if (error)
709 		return error;
710 
711 	map = &p->p_vmspace->vm_map;
712 
713 	/*
714 	 * interesting system call semantic: make sure entire range is
715 	 * allocated before allowing an unmap.
716 	 */
717 
718 	vm_map_lock(map);
719 #if 0
720 	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
721 		vm_map_unlock(map);
722 		return (EINVAL);
723 	}
724 #endif
725 	uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL, 0);
726 	vm_map_unlock(map);
727 	if (dead_entries != NULL)
728 		uvm_unmap_detach(dead_entries, 0);
729 	return (0);
730 }
731 
732 /*
733  * sys_mprotect: the mprotect system call
734  */
735 
736 int
737 sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap, register_t *retval)
738 {
739 	/* {
740 		syscallarg(void *) addr;
741 		syscallarg(size_t) len;
742 		syscallarg(int) prot;
743 	} */
744 	struct proc *p = l->l_proc;
745 	vaddr_t addr;
746 	vsize_t size, pageoff;
747 	vm_prot_t prot;
748 	int error;
749 
750 	/*
751 	 * extract syscall args from uap
752 	 */
753 
754 	addr = (vaddr_t)SCARG(uap, addr);
755 	size = (vsize_t)SCARG(uap, len);
756 	prot = SCARG(uap, prot) & VM_PROT_ALL;
757 
758 	/*
759 	 * align the address to a page boundary and adjust the size accordingly.
760 	 */
761 
762 	pageoff = (addr & PAGE_MASK);
763 	addr -= pageoff;
764 	size += pageoff;
765 	size = round_page(size);
766 
767 	error = range_test(addr, size, false);
768 	if (error)
769 		return error;
770 
771 	error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
772 				false);
773 	return error;
774 }
775 
776 /*
777  * sys_minherit: the minherit system call
778  */
779 
780 int
781 sys_minherit(struct lwp *l, const struct sys_minherit_args *uap, register_t *retval)
782 {
783 	/* {
784 		syscallarg(void *) addr;
785 		syscallarg(int) len;
786 		syscallarg(int) inherit;
787 	} */
788 	struct proc *p = l->l_proc;
789 	vaddr_t addr;
790 	vsize_t size, pageoff;
791 	vm_inherit_t inherit;
792 	int error;
793 
794 	addr = (vaddr_t)SCARG(uap, addr);
795 	size = (vsize_t)SCARG(uap, len);
796 	inherit = SCARG(uap, inherit);
797 
798 	/*
799 	 * align the address to a page boundary and adjust the size accordingly.
800 	 */
801 
802 	pageoff = (addr & PAGE_MASK);
803 	addr -= pageoff;
804 	size += pageoff;
805 	size = (vsize_t)round_page(size);
806 
807 	error = range_test(addr, size, false);
808 	if (error)
809 		return error;
810 
811 	error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
812 				inherit);
813 	return error;
814 }
815 
816 /*
817  * sys_madvise: give advice about memory usage.
818  */
819 
820 /* ARGSUSED */
821 int
822 sys_madvise(struct lwp *l, const struct sys_madvise_args *uap, register_t *retval)
823 {
824 	/* {
825 		syscallarg(void *) addr;
826 		syscallarg(size_t) len;
827 		syscallarg(int) behav;
828 	} */
829 	struct proc *p = l->l_proc;
830 	vaddr_t addr;
831 	vsize_t size, pageoff;
832 	int advice, error;
833 
834 	addr = (vaddr_t)SCARG(uap, addr);
835 	size = (vsize_t)SCARG(uap, len);
836 	advice = SCARG(uap, behav);
837 
838 	/*
839 	 * align the address to a page boundary, and adjust the size accordingly
840 	 */
841 
842 	pageoff = (addr & PAGE_MASK);
843 	addr -= pageoff;
844 	size += pageoff;
845 	size = (vsize_t)round_page(size);
846 
847 	error = range_test(addr, size, false);
848 	if (error)
849 		return error;
850 
851 	switch (advice) {
852 	case MADV_NORMAL:
853 	case MADV_RANDOM:
854 	case MADV_SEQUENTIAL:
855 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
856 		    advice);
857 		break;
858 
859 	case MADV_WILLNEED:
860 
861 		/*
862 		 * Activate all these pages, pre-faulting them in if
863 		 * necessary.
864 		 */
865 		/*
866 		 * XXX IMPLEMENT ME.
867 		 * Should invent a "weak" mode for uvm_fault()
868 		 * which would only do the PGO_LOCKED pgo_get().
869 		 */
870 
871 		return (0);
872 
873 	case MADV_DONTNEED:
874 
875 		/*
876 		 * Deactivate all these pages.  We don't need them
877 		 * any more.  We don't, however, toss the data in
878 		 * the pages.
879 		 */
880 
881 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
882 		    PGO_DEACTIVATE);
883 		break;
884 
885 	case MADV_FREE:
886 
887 		/*
888 		 * These pages contain no valid data, and may be
889 		 * garbage-collected.  Toss all resources, including
890 		 * any swap space in use.
891 		 */
892 
893 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
894 		    PGO_FREE);
895 		break;
896 
897 	case MADV_SPACEAVAIL:
898 
899 		/*
900 		 * XXXMRG What is this?  I think it's:
901 		 *
902 		 *	Ensure that we have allocated backing-store
903 		 *	for these pages.
904 		 *
905 		 * This is going to require changes to the page daemon,
906 		 * as it will free swap space allocated to pages in core.
907 		 * There's also what to do for device/file/anonymous memory.
908 		 */
909 
910 		return (EINVAL);
911 
912 	default:
913 		return (EINVAL);
914 	}
915 
916 	return error;
917 }
918 
919 /*
920  * sys_mlock: memory lock
921  */
922 
923 int
924 sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval)
925 {
926 	/* {
927 		syscallarg(const void *) addr;
928 		syscallarg(size_t) len;
929 	} */
930 	struct proc *p = l->l_proc;
931 	vaddr_t addr;
932 	vsize_t size, pageoff;
933 	int error;
934 
935 	/*
936 	 * extract syscall args from uap
937 	 */
938 
939 	addr = (vaddr_t)SCARG(uap, addr);
940 	size = (vsize_t)SCARG(uap, len);
941 
942 	/*
943 	 * align the address to a page boundary and adjust the size accordingly
944 	 */
945 
946 	pageoff = (addr & PAGE_MASK);
947 	addr -= pageoff;
948 	size += pageoff;
949 	size = (vsize_t)round_page(size);
950 
951 	error = range_test(addr, size, false);
952 	if (error)
953 		return error;
954 
955 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
956 		return (EAGAIN);
957 
958 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
959 			p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
960 		return (EAGAIN);
961 
962 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
963 	    0);
964 	if (error == EFAULT)
965 		error = ENOMEM;
966 	return error;
967 }
968 
969 /*
970  * sys_munlock: unlock wired pages
971  */
972 
973 int
974 sys_munlock(struct lwp *l, const struct sys_munlock_args *uap, register_t *retval)
975 {
976 	/* {
977 		syscallarg(const void *) addr;
978 		syscallarg(size_t) len;
979 	} */
980 	struct proc *p = l->l_proc;
981 	vaddr_t addr;
982 	vsize_t size, pageoff;
983 	int error;
984 
985 	/*
986 	 * extract syscall args from uap
987 	 */
988 
989 	addr = (vaddr_t)SCARG(uap, addr);
990 	size = (vsize_t)SCARG(uap, len);
991 
992 	/*
993 	 * align the address to a page boundary, and adjust the size accordingly
994 	 */
995 
996 	pageoff = (addr & PAGE_MASK);
997 	addr -= pageoff;
998 	size += pageoff;
999 	size = (vsize_t)round_page(size);
1000 
1001 	error = range_test(addr, size, false);
1002 	if (error)
1003 		return error;
1004 
1005 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true,
1006 	    0);
1007 	if (error == EFAULT)
1008 		error = ENOMEM;
1009 	return error;
1010 }
1011 
1012 /*
1013  * sys_mlockall: lock all pages mapped into an address space.
1014  */
1015 
1016 int
1017 sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap, register_t *retval)
1018 {
1019 	/* {
1020 		syscallarg(int) flags;
1021 	} */
1022 	struct proc *p = l->l_proc;
1023 	int error, flags;
1024 
1025 	flags = SCARG(uap, flags);
1026 
1027 	if (flags == 0 ||
1028 	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
1029 		return (EINVAL);
1030 
1031 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
1032 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1033 	return (error);
1034 }
1035 
1036 /*
1037  * sys_munlockall: unlock all pages mapped into an address space.
1038  */
1039 
1040 int
1041 sys_munlockall(struct lwp *l, const void *v, register_t *retval)
1042 {
1043 	struct proc *p = l->l_proc;
1044 
1045 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
1046 	return (0);
1047 }
1048 
1049 /*
1050  * uvm_mmap: internal version of mmap
1051  *
1052  * - used by sys_mmap and various framebuffers
1053  * - handle is a vnode pointer or NULL for MAP_ANON
1054  * - caller must page-align the file offset
1055  */
1056 
1057 int
1058 uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
1059 	struct vm_map *map;
1060 	vaddr_t *addr;
1061 	vsize_t size;
1062 	vm_prot_t prot, maxprot;
1063 	int flags;
1064 	void *handle;
1065 	voff_t foff;
1066 	vsize_t locklimit;
1067 {
1068 	struct uvm_object *uobj;
1069 	struct vnode *vp;
1070 	vaddr_t align = 0;
1071 	int error;
1072 	int advice = UVM_ADV_NORMAL;
1073 	uvm_flag_t uvmflag = 0;
1074 	bool needwritemap;
1075 
1076 	/*
1077 	 * check params
1078 	 */
1079 
1080 	if (size == 0)
1081 		return(0);
1082 	if (foff & PAGE_MASK)
1083 		return(EINVAL);
1084 	if ((prot & maxprot) != prot)
1085 		return(EINVAL);
1086 
1087 	/*
1088 	 * for non-fixed mappings, round off the suggested address.
1089 	 * for fixed mappings, check alignment and zap old mappings.
1090 	 */
1091 
1092 	if ((flags & MAP_FIXED) == 0) {
1093 		*addr = round_page(*addr);
1094 	} else {
1095 		if (*addr & PAGE_MASK)
1096 			return(EINVAL);
1097 		uvmflag |= UVM_FLAG_FIXED;
1098 		(void) uvm_unmap(map, *addr, *addr + size);
1099 	}
1100 
1101 	/*
1102 	 * Try to see if any requested alignment can even be attemped.
1103 	 * Make sure we can express the alignment (asking for a >= 4GB
1104 	 * alignment on an ILP32 architecure make no sense) and the
1105 	 * alignment is at least for a page sized quanitiy.  If the
1106 	 * request was for a fixed mapping, make sure supplied address
1107 	 * adheres to the request alignment.
1108 	 */
1109 	align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
1110 	if (align) {
1111 		if (align >= sizeof(vaddr_t) * NBBY)
1112 			return(EINVAL);
1113 		align = 1L << align;
1114 		if (align < PAGE_SIZE)
1115 			return(EINVAL);
1116 		if (align >= vm_map_max(map))
1117 			return(ENOMEM);
1118 		if (flags & MAP_FIXED) {
1119 			if ((*addr & (align-1)) != 0)
1120 				return(EINVAL);
1121 			align = 0;
1122 		}
1123 	}
1124 
1125 	/*
1126 	 * handle anon vs. non-anon mappings.   for non-anon mappings attach
1127 	 * to underlying vm object.
1128 	 */
1129 
1130 	if (flags & MAP_ANON) {
1131 		KASSERT(handle == NULL);
1132 		foff = UVM_UNKNOWN_OFFSET;
1133 		uobj = NULL;
1134 		if ((flags & MAP_SHARED) == 0)
1135 			/* XXX: defer amap create */
1136 			uvmflag |= UVM_FLAG_COPYONW;
1137 		else
1138 			/* shared: create amap now */
1139 			uvmflag |= UVM_FLAG_OVERLAY;
1140 
1141 	} else {
1142 		KASSERT(handle != NULL);
1143 		vp = (struct vnode *)handle;
1144 
1145 		/*
1146 		 * Don't allow mmap for EXEC if the file system
1147 		 * is mounted NOEXEC.
1148 		 */
1149 		if ((prot & PROT_EXEC) != 0 &&
1150 		    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0)
1151 			return (EACCES);
1152 
1153 		if (vp->v_type != VCHR) {
1154 			error = VOP_MMAP(vp, prot, curlwp->l_cred);
1155 			if (error) {
1156 				return error;
1157 			}
1158 			vref(vp);
1159 			uobj = &vp->v_uobj;
1160 
1161 			/*
1162 			 * If the vnode is being mapped with PROT_EXEC,
1163 			 * then mark it as text.
1164 			 */
1165 			if (prot & PROT_EXEC) {
1166 				vn_markexec(vp);
1167 			}
1168 		} else {
1169 			int i = maxprot;
1170 
1171 			/*
1172 			 * XXX Some devices don't like to be mapped with
1173 			 * XXX PROT_EXEC or PROT_WRITE, but we don't really
1174 			 * XXX have a better way of handling this, right now
1175 			 */
1176 			do {
1177 				uobj = udv_attach((void *) &vp->v_rdev,
1178 				    (flags & MAP_SHARED) ? i :
1179 				    (i & ~VM_PROT_WRITE), foff, size);
1180 				i--;
1181 			} while ((uobj == NULL) && (i > 0));
1182 			advice = UVM_ADV_RANDOM;
1183 		}
1184 		if (uobj == NULL)
1185 			return((vp->v_type == VREG) ? ENOMEM : EINVAL);
1186 		if ((flags & MAP_SHARED) == 0) {
1187 			uvmflag |= UVM_FLAG_COPYONW;
1188 		}
1189 
1190 		/*
1191 		 * Set vnode flags to indicate the new kinds of mapping.
1192 		 * We take the vnode lock in exclusive mode here to serialize
1193 		 * with direct I/O.
1194 		 *
1195 		 * Safe to check for these flag values without a lock, as
1196 		 * long as a reference to the vnode is held.
1197 		 */
1198 		needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
1199 			(flags & MAP_SHARED) != 0 &&
1200 			(maxprot & VM_PROT_WRITE) != 0;
1201 		if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
1202 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1203 			vp->v_vflag |= VV_MAPPED;
1204 			if (needwritemap) {
1205 				mutex_enter(&vp->v_interlock);
1206 				vp->v_iflag |= VI_WRMAP;
1207 				mutex_exit(&vp->v_interlock);
1208 			}
1209 			VOP_UNLOCK(vp, 0);
1210 		}
1211 	}
1212 
1213 	uvmflag = UVM_MAPFLAG(prot, maxprot,
1214 			(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
1215 			advice, uvmflag);
1216 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
1217 	if (error) {
1218 		if (uobj)
1219 			uobj->pgops->pgo_detach(uobj);
1220 		return error;
1221 	}
1222 
1223 	/*
1224 	 * POSIX 1003.1b -- if our address space was configured
1225 	 * to lock all future mappings, wire the one we just made.
1226 	 *
1227 	 * Also handle the MAP_WIRED flag here.
1228 	 */
1229 
1230 	if (prot == VM_PROT_NONE) {
1231 
1232 		/*
1233 		 * No more work to do in this case.
1234 		 */
1235 
1236 		return (0);
1237 	}
1238 	if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
1239 		vm_map_lock(map);
1240 		if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
1241 		    (locklimit != 0 &&
1242 		     size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
1243 		     locklimit)) {
1244 			vm_map_unlock(map);
1245 			uvm_unmap(map, *addr, *addr + size);
1246 			return ENOMEM;
1247 		}
1248 
1249 		/*
1250 		 * uvm_map_pageable() always returns the map unlocked.
1251 		 */
1252 
1253 		error = uvm_map_pageable(map, *addr, *addr + size,
1254 					 false, UVM_LK_ENTER);
1255 		if (error) {
1256 			uvm_unmap(map, *addr, *addr + size);
1257 			return error;
1258 		}
1259 		return (0);
1260 	}
1261 	return 0;
1262 }
1263 
1264 vaddr_t
1265 uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz)
1266 {
1267 
1268 	return VM_DEFAULT_ADDRESS(base, sz);
1269 }
1270