xref: /netbsd-src/sys/uvm/uvm_mmap.c (revision fd5cb0acea84d278e04e640d37ca2398f894991f)
1 /*	$NetBSD: uvm_mmap.c,v 1.87 2005/01/23 15:58:13 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993 The Regents of the University of California.
6  * Copyright (c) 1988 University of Utah.
7  *
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *      This product includes software developed by the Charles D. Cranor,
25  *	Washington University, University of California, Berkeley and
26  *	its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
44  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
45  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
46  */
47 
48 /*
49  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
50  * function.
51  */
52 
53 #include <sys/cdefs.h>
54 __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.87 2005/01/23 15:58:13 chs Exp $");
55 
56 #include "opt_compat_netbsd.h"
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/file.h>
61 #include <sys/filedesc.h>
62 #include <sys/resourcevar.h>
63 #include <sys/mman.h>
64 #include <sys/mount.h>
65 #include <sys/proc.h>
66 #include <sys/malloc.h>
67 #include <sys/vnode.h>
68 #include <sys/conf.h>
69 #include <sys/stat.h>
70 
71 #include <miscfs/specfs/specdev.h>
72 
73 #include <sys/sa.h>
74 #include <sys/syscallargs.h>
75 
76 #include <uvm/uvm.h>
77 #include <uvm/uvm_device.h>
78 
79 #ifndef COMPAT_ZERODEV
80 #define COMPAT_ZERODEV(dev)	(0)
81 #endif
82 
83 /*
84  * unimplemented VM system calls:
85  */
86 
87 /*
88  * sys_sbrk: sbrk system call.
89  */
90 
91 /* ARGSUSED */
92 int
93 sys_sbrk(l, v, retval)
94 	struct lwp *l;
95 	void *v;
96 	register_t *retval;
97 {
98 #if 0
99 	struct sys_sbrk_args /* {
100 		syscallarg(intptr_t) incr;
101 	} */ *uap = v;
102 #endif
103 
104 	return (ENOSYS);
105 }
106 
107 /*
108  * sys_sstk: sstk system call.
109  */
110 
111 /* ARGSUSED */
112 int
113 sys_sstk(l, v, retval)
114 	struct lwp *l;
115 	void *v;
116 	register_t *retval;
117 {
118 #if 0
119 	struct sys_sstk_args /* {
120 		syscallarg(int) incr;
121 	} */ *uap = v;
122 #endif
123 
124 	return (ENOSYS);
125 }
126 
127 /*
128  * sys_mincore: determine if pages are in core or not.
129  */
130 
131 /* ARGSUSED */
132 int
133 sys_mincore(l, v, retval)
134 	struct lwp *l;
135 	void *v;
136 	register_t *retval;
137 {
138 	struct sys_mincore_args /* {
139 		syscallarg(void *) addr;
140 		syscallarg(size_t) len;
141 		syscallarg(char *) vec;
142 	} */ *uap = v;
143 	struct proc *p = l->l_proc;
144 	struct vm_page *pg;
145 	char *vec, pgi;
146 	struct uvm_object *uobj;
147 	struct vm_amap *amap;
148 	struct vm_anon *anon;
149 	struct vm_map_entry *entry;
150 	vaddr_t start, end, lim;
151 	struct vm_map *map;
152 	vsize_t len;
153 	int error = 0, npgs;
154 
155 	map = &p->p_vmspace->vm_map;
156 
157 	start = (vaddr_t)SCARG(uap, addr);
158 	len = SCARG(uap, len);
159 	vec = SCARG(uap, vec);
160 
161 	if (start & PAGE_MASK)
162 		return (EINVAL);
163 	len = round_page(len);
164 	end = start + len;
165 	if (end <= start)
166 		return (EINVAL);
167 
168 	/*
169 	 * Lock down vec, so our returned status isn't outdated by
170 	 * storing the status byte for a page.
171 	 */
172 
173 	npgs = len >> PAGE_SHIFT;
174 	error = uvm_vslock(p, vec, npgs, VM_PROT_WRITE);
175 	if (error) {
176 		return error;
177 	}
178 	vm_map_lock_read(map);
179 
180 	if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
181 		error = ENOMEM;
182 		goto out;
183 	}
184 
185 	for (/* nothing */;
186 	     entry != &map->header && entry->start < end;
187 	     entry = entry->next) {
188 		KASSERT(!UVM_ET_ISSUBMAP(entry));
189 		KASSERT(start >= entry->start);
190 
191 		/* Make sure there are no holes. */
192 		if (entry->end < end &&
193 		     (entry->next == &map->header ||
194 		      entry->next->start > entry->end)) {
195 			error = ENOMEM;
196 			goto out;
197 		}
198 
199 		lim = end < entry->end ? end : entry->end;
200 
201 		/*
202 		 * Special case for objects with no "real" pages.  Those
203 		 * are always considered resident (mapped devices).
204 		 */
205 
206 		if (UVM_ET_ISOBJ(entry)) {
207 			KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
208 			if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
209 				for (/* nothing */; start < lim;
210 				     start += PAGE_SIZE, vec++)
211 					subyte(vec, 1);
212 				continue;
213 			}
214 		}
215 
216 		amap = entry->aref.ar_amap;	/* top layer */
217 		uobj = entry->object.uvm_obj;	/* bottom layer */
218 
219 		if (amap != NULL)
220 			amap_lock(amap);
221 		if (uobj != NULL)
222 			simple_lock(&uobj->vmobjlock);
223 
224 		for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
225 			pgi = 0;
226 			if (amap != NULL) {
227 				/* Check the top layer first. */
228 				anon = amap_lookup(&entry->aref,
229 				    start - entry->start);
230 				/* Don't need to lock anon here. */
231 				if (anon != NULL && anon->u.an_page != NULL) {
232 
233 					/*
234 					 * Anon has the page for this entry
235 					 * offset.
236 					 */
237 
238 					pgi = 1;
239 				}
240 			}
241 			if (uobj != NULL && pgi == 0) {
242 				/* Check the bottom layer. */
243 				pg = uvm_pagelookup(uobj,
244 				    entry->offset + (start - entry->start));
245 				if (pg != NULL) {
246 
247 					/*
248 					 * Object has the page for this entry
249 					 * offset.
250 					 */
251 
252 					pgi = 1;
253 				}
254 			}
255 			(void) subyte(vec, pgi);
256 		}
257 		if (uobj != NULL)
258 			simple_unlock(&uobj->vmobjlock);
259 		if (amap != NULL)
260 			amap_unlock(amap);
261 	}
262 
263  out:
264 	vm_map_unlock_read(map);
265 	uvm_vsunlock(p, SCARG(uap, vec), npgs);
266 	return (error);
267 }
268 
269 /*
270  * sys_mmap: mmap system call.
271  *
272  * => file offset and address may not be page aligned
273  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
274  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
275  *      and the return value is adjusted up by the page offset.
276  */
277 
278 int
279 sys_mmap(l, v, retval)
280 	struct lwp *l;
281 	void *v;
282 	register_t *retval;
283 {
284 	struct sys_mmap_args /* {
285 		syscallarg(caddr_t) addr;
286 		syscallarg(size_t) len;
287 		syscallarg(int) prot;
288 		syscallarg(int) flags;
289 		syscallarg(int) fd;
290 		syscallarg(long) pad;
291 		syscallarg(off_t) pos;
292 	} */ *uap = v;
293 	struct proc *p = l->l_proc;
294 	vaddr_t addr;
295 	struct vattr va;
296 	off_t pos;
297 	vsize_t size, pageoff;
298 	vm_prot_t prot, maxprot;
299 	int flags, fd;
300 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
301 	struct filedesc *fdp = p->p_fd;
302 	struct file *fp;
303 	struct vnode *vp;
304 	void *handle;
305 	int error;
306 
307 	/*
308 	 * first, extract syscall args from the uap.
309 	 */
310 
311 	addr = (vaddr_t)SCARG(uap, addr);
312 	size = (vsize_t)SCARG(uap, len);
313 	prot = SCARG(uap, prot) & VM_PROT_ALL;
314 	flags = SCARG(uap, flags);
315 	fd = SCARG(uap, fd);
316 	pos = SCARG(uap, pos);
317 
318 	/*
319 	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
320 	 * validate the flags.
321 	 */
322 	if (flags & MAP_COPY)
323 		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
324 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
325 		return (EINVAL);
326 
327 	/*
328 	 * align file position and save offset.  adjust size.
329 	 */
330 
331 	pageoff = (pos & PAGE_MASK);
332 	pos  -= pageoff;
333 	size += pageoff;			/* add offset */
334 	size = (vsize_t)round_page(size);	/* round up */
335 	if ((ssize_t) size < 0)
336 		return (EINVAL);			/* don't allow wrap */
337 
338 	/*
339 	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
340 	 */
341 
342 	if (flags & MAP_FIXED) {
343 
344 		/* ensure address and file offset are aligned properly */
345 		addr -= pageoff;
346 		if (addr & PAGE_MASK)
347 			return (EINVAL);
348 
349 		if (VM_MAXUSER_ADDRESS > 0 &&
350 		    (addr + size) > VM_MAXUSER_ADDRESS)
351 			return (EFBIG);
352 		if (vm_min_address > 0 && addr < vm_min_address)
353 			return (EINVAL);
354 		if (addr > addr + size)
355 			return (EOVERFLOW);		/* no wrapping! */
356 
357 	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
358 
359 		/*
360 		 * not fixed: make sure we skip over the largest
361 		 * possible heap for non-topdown mapping arrangements.
362 		 * we will refine our guess later (e.g. to account for
363 		 * VAC, etc)
364 		 */
365 
366 		if (addr == 0 ||
367 		    !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
368 			addr = MAX(addr,
369 			    VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, size));
370 		else
371 			addr = MIN(addr,
372 			    VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, size));
373 	}
374 
375 	/*
376 	 * check for file mappings (i.e. not anonymous) and verify file.
377 	 */
378 
379 	if ((flags & MAP_ANON) == 0) {
380 
381 		if ((fp = fd_getfile(fdp, fd)) == NULL)
382 			return (EBADF);
383 
384 		simple_unlock(&fp->f_slock);
385 
386 		if (fp->f_type != DTYPE_VNODE)
387 			return (ENODEV);		/* only mmap vnodes! */
388 		vp = (struct vnode *)fp->f_data;	/* convert to vnode */
389 
390 		if (vp->v_type != VREG && vp->v_type != VCHR &&
391 		    vp->v_type != VBLK)
392 			return (ENODEV);  /* only REG/CHR/BLK support mmap */
393 
394 		if (vp->v_type != VCHR && pos < 0)
395 			return (EINVAL);
396 
397 		if (vp->v_type != VCHR && (pos + size) < pos)
398 			return (EOVERFLOW);		/* no offset wrapping */
399 
400 		/* special case: catch SunOS style /dev/zero */
401 		if (vp->v_type == VCHR
402 		    && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
403 			flags |= MAP_ANON;
404 			goto is_anon;
405 		}
406 
407 		/*
408 		 * Old programs may not select a specific sharing type, so
409 		 * default to an appropriate one.
410 		 *
411 		 * XXX: how does MAP_ANON fit in the picture?
412 		 */
413 		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
414 #if defined(DEBUG)
415 			printf("WARNING: defaulted mmap() share type to "
416 			   "%s (pid %d command %s)\n", vp->v_type == VCHR ?
417 			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
418 			    p->p_comm);
419 #endif
420 			if (vp->v_type == VCHR)
421 				flags |= MAP_SHARED;	/* for a device */
422 			else
423 				flags |= MAP_PRIVATE;	/* for a file */
424 		}
425 
426 		/*
427 		 * MAP_PRIVATE device mappings don't make sense (and aren't
428 		 * supported anyway).  However, some programs rely on this,
429 		 * so just change it to MAP_SHARED.
430 		 */
431 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
432 			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
433 		}
434 
435 		/*
436 		 * now check protection
437 		 */
438 
439 		maxprot = VM_PROT_EXECUTE;
440 
441 		/* check read access */
442 		if (fp->f_flag & FREAD)
443 			maxprot |= VM_PROT_READ;
444 		else if (prot & PROT_READ)
445 			return (EACCES);
446 
447 		/* check write access, shared case first */
448 		if (flags & MAP_SHARED) {
449 			/*
450 			 * if the file is writable, only add PROT_WRITE to
451 			 * maxprot if the file is not immutable, append-only.
452 			 * otherwise, if we have asked for PROT_WRITE, return
453 			 * EPERM.
454 			 */
455 			if (fp->f_flag & FWRITE) {
456 				if ((error =
457 				    VOP_GETATTR(vp, &va, p->p_ucred, p)))
458 					return (error);
459 				if ((va.va_flags &
460 				    (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
461 					maxprot |= VM_PROT_WRITE;
462 				else if (prot & PROT_WRITE)
463 					return (EPERM);
464 			}
465 			else if (prot & PROT_WRITE)
466 				return (EACCES);
467 		} else {
468 			/* MAP_PRIVATE mappings can always write to */
469 			maxprot |= VM_PROT_WRITE;
470 		}
471 		handle = vp;
472 
473 	} else {		/* MAP_ANON case */
474 		/*
475 		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
476 		 */
477 		if (fd != -1)
478 			return (EINVAL);
479 
480  is_anon:		/* label for SunOS style /dev/zero */
481 		handle = NULL;
482 		maxprot = VM_PROT_ALL;
483 		pos = 0;
484 	}
485 
486 	/*
487 	 * XXX (in)sanity check.  We don't do proper datasize checking
488 	 * XXX for anonymous (or private writable) mmap().  However,
489 	 * XXX know that if we're trying to allocate more than the amount
490 	 * XXX remaining under our current data size limit, _that_ should
491 	 * XXX be disallowed.
492 	 */
493 	if ((flags & MAP_ANON) != 0 ||
494 	    ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
495 		if (size >
496 		    (p->p_rlimit[RLIMIT_DATA].rlim_cur -
497 		     ctob(p->p_vmspace->vm_dsize))) {
498 			return (ENOMEM);
499 		}
500 	}
501 
502 	/*
503 	 * now let kernel internal function uvm_mmap do the work.
504 	 */
505 
506 	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
507 	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
508 
509 	if (error == 0)
510 		/* remember to add offset */
511 		*retval = (register_t)(addr + pageoff);
512 
513 	return (error);
514 }
515 
516 /*
517  * sys___msync13: the msync system call (a front-end for flush)
518  */
519 
520 int
521 sys___msync13(l, v, retval)
522 	struct lwp *l;
523 	void *v;
524 	register_t *retval;
525 {
526 	struct sys___msync13_args /* {
527 		syscallarg(caddr_t) addr;
528 		syscallarg(size_t) len;
529 		syscallarg(int) flags;
530 	} */ *uap = v;
531 	struct proc *p = l->l_proc;
532 	vaddr_t addr;
533 	vsize_t size, pageoff;
534 	struct vm_map *map;
535 	int error, rv, flags, uvmflags;
536 
537 	/*
538 	 * extract syscall args from the uap
539 	 */
540 
541 	addr = (vaddr_t)SCARG(uap, addr);
542 	size = (vsize_t)SCARG(uap, len);
543 	flags = SCARG(uap, flags);
544 
545 	/* sanity check flags */
546 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
547 	    (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
548 	    (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
549 		return (EINVAL);
550 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
551 		flags |= MS_SYNC;
552 
553 	/*
554 	 * align the address to a page boundary and adjust the size accordingly.
555 	 */
556 
557 	pageoff = (addr & PAGE_MASK);
558 	addr -= pageoff;
559 	size += pageoff;
560 	size = (vsize_t)round_page(size);
561 
562 	/* disallow wrap-around. */
563 	if (addr + size < addr)
564 		return (EINVAL);
565 
566 	/*
567 	 * get map
568 	 */
569 
570 	map = &p->p_vmspace->vm_map;
571 
572 	/*
573 	 * XXXCDC: do we really need this semantic?
574 	 *
575 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
576 	 * pages with the region containing addr".  Unfortunately, we
577 	 * don't really keep track of individual mmaps so we approximate
578 	 * by flushing the range of the map entry containing addr.
579 	 * This can be incorrect if the region splits or is coalesced
580 	 * with a neighbor.
581 	 */
582 
583 	if (size == 0) {
584 		struct vm_map_entry *entry;
585 
586 		vm_map_lock_read(map);
587 		rv = uvm_map_lookup_entry(map, addr, &entry);
588 		if (rv == TRUE) {
589 			addr = entry->start;
590 			size = entry->end - entry->start;
591 		}
592 		vm_map_unlock_read(map);
593 		if (rv == FALSE)
594 			return (EINVAL);
595 	}
596 
597 	/*
598 	 * translate MS_ flags into PGO_ flags
599 	 */
600 
601 	uvmflags = PGO_CLEANIT;
602 	if (flags & MS_INVALIDATE)
603 		uvmflags |= PGO_FREE;
604 	if (flags & MS_SYNC)
605 		uvmflags |= PGO_SYNCIO;
606 	else
607 		uvmflags |= PGO_SYNCIO;	 /* XXXCDC: force sync for now! */
608 
609 	error = uvm_map_clean(map, addr, addr+size, uvmflags);
610 	return error;
611 }
612 
613 /*
614  * sys_munmap: unmap a users memory
615  */
616 
617 int
618 sys_munmap(l, v, retval)
619 	struct lwp *l;
620 	void *v;
621 	register_t *retval;
622 {
623 	struct sys_munmap_args /* {
624 		syscallarg(caddr_t) addr;
625 		syscallarg(size_t) len;
626 	} */ *uap = v;
627 	struct proc *p = l->l_proc;
628 	vaddr_t addr;
629 	vsize_t size, pageoff;
630 	struct vm_map *map;
631 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
632 	struct vm_map_entry *dead_entries;
633 
634 	/*
635 	 * get syscall args.
636 	 */
637 
638 	addr = (vaddr_t)SCARG(uap, addr);
639 	size = (vsize_t)SCARG(uap, len);
640 
641 	/*
642 	 * align the address to a page boundary and adjust the size accordingly.
643 	 */
644 
645 	pageoff = (addr & PAGE_MASK);
646 	addr -= pageoff;
647 	size += pageoff;
648 	size = (vsize_t)round_page(size);
649 
650 	if ((int)size < 0)
651 		return (EINVAL);
652 	if (size == 0)
653 		return (0);
654 
655 	/*
656 	 * Check for illegal addresses.  Watch out for address wrap...
657 	 * Note that VM_*_ADDRESS are not constants due to casts (argh).
658 	 */
659 	if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
660 		return (EINVAL);
661 	if (vm_min_address > 0 && addr < vm_min_address)
662 		return (EINVAL);
663 	if (addr > addr + size)
664 		return (EINVAL);
665 	map = &p->p_vmspace->vm_map;
666 
667 	/*
668 	 * interesting system call semantic: make sure entire range is
669 	 * allocated before allowing an unmap.
670 	 */
671 
672 	vm_map_lock(map);
673 #if 0
674 	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
675 		vm_map_unlock(map);
676 		return (EINVAL);
677 	}
678 #endif
679 	uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL);
680 	vm_map_unlock(map);
681 	if (dead_entries != NULL)
682 		uvm_unmap_detach(dead_entries, 0);
683 	return (0);
684 }
685 
686 /*
687  * sys_mprotect: the mprotect system call
688  */
689 
690 int
691 sys_mprotect(l, v, retval)
692 	struct lwp *l;
693 	void *v;
694 	register_t *retval;
695 {
696 	struct sys_mprotect_args /* {
697 		syscallarg(caddr_t) addr;
698 		syscallarg(size_t) len;
699 		syscallarg(int) prot;
700 	} */ *uap = v;
701 	struct proc *p = l->l_proc;
702 	vaddr_t addr;
703 	vsize_t size, pageoff;
704 	vm_prot_t prot;
705 	int error;
706 
707 	/*
708 	 * extract syscall args from uap
709 	 */
710 
711 	addr = (vaddr_t)SCARG(uap, addr);
712 	size = (vsize_t)SCARG(uap, len);
713 	prot = SCARG(uap, prot) & VM_PROT_ALL;
714 
715 	/*
716 	 * align the address to a page boundary and adjust the size accordingly.
717 	 */
718 
719 	pageoff = (addr & PAGE_MASK);
720 	addr -= pageoff;
721 	size += pageoff;
722 	size = round_page(size);
723 
724 	error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
725 				FALSE);
726 	return error;
727 }
728 
729 /*
730  * sys_minherit: the minherit system call
731  */
732 
733 int
734 sys_minherit(l, v, retval)
735 	struct lwp *l;
736 	void *v;
737 	register_t *retval;
738 {
739 	struct sys_minherit_args /* {
740 		syscallarg(caddr_t) addr;
741 		syscallarg(int) len;
742 		syscallarg(int) inherit;
743 	} */ *uap = v;
744 	struct proc *p = l->l_proc;
745 	vaddr_t addr;
746 	vsize_t size, pageoff;
747 	vm_inherit_t inherit;
748 	int error;
749 
750 	addr = (vaddr_t)SCARG(uap, addr);
751 	size = (vsize_t)SCARG(uap, len);
752 	inherit = SCARG(uap, inherit);
753 
754 	/*
755 	 * align the address to a page boundary and adjust the size accordingly.
756 	 */
757 
758 	pageoff = (addr & PAGE_MASK);
759 	addr -= pageoff;
760 	size += pageoff;
761 	size = (vsize_t)round_page(size);
762 
763 	if ((int)size < 0)
764 		return (EINVAL);
765 	error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
766 				inherit);
767 	return error;
768 }
769 
770 /*
771  * sys_madvise: give advice about memory usage.
772  */
773 
774 /* ARGSUSED */
775 int
776 sys_madvise(l, v, retval)
777 	struct lwp *l;
778 	void *v;
779 	register_t *retval;
780 {
781 	struct sys_madvise_args /* {
782 		syscallarg(caddr_t) addr;
783 		syscallarg(size_t) len;
784 		syscallarg(int) behav;
785 	} */ *uap = v;
786 	struct proc *p = l->l_proc;
787 	vaddr_t addr;
788 	vsize_t size, pageoff;
789 	int advice, error;
790 
791 	addr = (vaddr_t)SCARG(uap, addr);
792 	size = (vsize_t)SCARG(uap, len);
793 	advice = SCARG(uap, behav);
794 
795 	/*
796 	 * align the address to a page boundary, and adjust the size accordingly
797 	 */
798 
799 	pageoff = (addr & PAGE_MASK);
800 	addr -= pageoff;
801 	size += pageoff;
802 	size = (vsize_t)round_page(size);
803 
804 	if ((ssize_t)size <= 0)
805 		return (EINVAL);
806 
807 	switch (advice) {
808 	case MADV_NORMAL:
809 	case MADV_RANDOM:
810 	case MADV_SEQUENTIAL:
811 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,
812 		    advice);
813 		break;
814 
815 	case MADV_WILLNEED:
816 
817 		/*
818 		 * Activate all these pages, pre-faulting them in if
819 		 * necessary.
820 		 */
821 		/*
822 		 * XXX IMPLEMENT ME.
823 		 * Should invent a "weak" mode for uvm_fault()
824 		 * which would only do the PGO_LOCKED pgo_get().
825 		 */
826 
827 		return (0);
828 
829 	case MADV_DONTNEED:
830 
831 		/*
832 		 * Deactivate all these pages.  We don't need them
833 		 * any more.  We don't, however, toss the data in
834 		 * the pages.
835 		 */
836 
837 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
838 		    PGO_DEACTIVATE);
839 		break;
840 
841 	case MADV_FREE:
842 
843 		/*
844 		 * These pages contain no valid data, and may be
845 		 * garbage-collected.  Toss all resources, including
846 		 * any swap space in use.
847 		 */
848 
849 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
850 		    PGO_FREE);
851 		break;
852 
853 	case MADV_SPACEAVAIL:
854 
855 		/*
856 		 * XXXMRG What is this?  I think it's:
857 		 *
858 		 *	Ensure that we have allocated backing-store
859 		 *	for these pages.
860 		 *
861 		 * This is going to require changes to the page daemon,
862 		 * as it will free swap space allocated to pages in core.
863 		 * There's also what to do for device/file/anonymous memory.
864 		 */
865 
866 		return (EINVAL);
867 
868 	default:
869 		return (EINVAL);
870 	}
871 
872 	return error;
873 }
874 
875 /*
876  * sys_mlock: memory lock
877  */
878 
879 int
880 sys_mlock(l, v, retval)
881 	struct lwp *l;
882 	void *v;
883 	register_t *retval;
884 {
885 	struct sys_mlock_args /* {
886 		syscallarg(const void *) addr;
887 		syscallarg(size_t) len;
888 	} */ *uap = v;
889 	struct proc *p = l->l_proc;
890 	vaddr_t addr;
891 	vsize_t size, pageoff;
892 	int error;
893 
894 	/*
895 	 * extract syscall args from uap
896 	 */
897 
898 	addr = (vaddr_t)SCARG(uap, addr);
899 	size = (vsize_t)SCARG(uap, len);
900 
901 	/*
902 	 * align the address to a page boundary and adjust the size accordingly
903 	 */
904 
905 	pageoff = (addr & PAGE_MASK);
906 	addr -= pageoff;
907 	size += pageoff;
908 	size = (vsize_t)round_page(size);
909 
910 	/* disallow wrap-around. */
911 	if (addr + size < addr)
912 		return (EINVAL);
913 
914 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
915 		return (EAGAIN);
916 
917 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
918 			p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
919 		return (EAGAIN);
920 
921 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
922 	    0);
923 	if (error == EFAULT)
924 		error = ENOMEM;
925 	return error;
926 }
927 
928 /*
929  * sys_munlock: unlock wired pages
930  */
931 
932 int
933 sys_munlock(l, v, retval)
934 	struct lwp *l;
935 	void *v;
936 	register_t *retval;
937 {
938 	struct sys_munlock_args /* {
939 		syscallarg(const void *) addr;
940 		syscallarg(size_t) len;
941 	} */ *uap = v;
942 	struct proc *p = l->l_proc;
943 	vaddr_t addr;
944 	vsize_t size, pageoff;
945 	int error;
946 
947 	/*
948 	 * extract syscall args from uap
949 	 */
950 
951 	addr = (vaddr_t)SCARG(uap, addr);
952 	size = (vsize_t)SCARG(uap, len);
953 
954 	/*
955 	 * align the address to a page boundary, and adjust the size accordingly
956 	 */
957 
958 	pageoff = (addr & PAGE_MASK);
959 	addr -= pageoff;
960 	size += pageoff;
961 	size = (vsize_t)round_page(size);
962 
963 	/* disallow wrap-around. */
964 	if (addr + size < addr)
965 		return (EINVAL);
966 
967 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
968 	    0);
969 	if (error == EFAULT)
970 		error = ENOMEM;
971 	return error;
972 }
973 
974 /*
975  * sys_mlockall: lock all pages mapped into an address space.
976  */
977 
978 int
979 sys_mlockall(l, v, retval)
980 	struct lwp *l;
981 	void *v;
982 	register_t *retval;
983 {
984 	struct sys_mlockall_args /* {
985 		syscallarg(int) flags;
986 	} */ *uap = v;
987 	struct proc *p = l->l_proc;
988 	int error, flags;
989 
990 	flags = SCARG(uap, flags);
991 
992 	if (flags == 0 ||
993 	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
994 		return (EINVAL);
995 
996 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
997 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
998 	return (error);
999 }
1000 
1001 /*
1002  * sys_munlockall: unlock all pages mapped into an address space.
1003  */
1004 
1005 int
1006 sys_munlockall(l, v, retval)
1007 	struct lwp *l;
1008 	void *v;
1009 	register_t *retval;
1010 {
1011 	struct proc *p = l->l_proc;
1012 
1013 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
1014 	return (0);
1015 }
1016 
1017 /*
1018  * uvm_mmap: internal version of mmap
1019  *
1020  * - used by sys_mmap and various framebuffers
1021  * - handle is a vnode pointer or NULL for MAP_ANON
1022  * - caller must page-align the file offset
1023  */
1024 
1025 int
1026 uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
1027 	struct vm_map *map;
1028 	vaddr_t *addr;
1029 	vsize_t size;
1030 	vm_prot_t prot, maxprot;
1031 	int flags;
1032 	void *handle;
1033 	voff_t foff;
1034 	vsize_t locklimit;
1035 {
1036 	struct uvm_object *uobj;
1037 	struct vnode *vp;
1038 	vaddr_t align = 0;
1039 	int error;
1040 	int advice = UVM_ADV_NORMAL;
1041 	uvm_flag_t uvmflag = 0;
1042 
1043 	/*
1044 	 * check params
1045 	 */
1046 
1047 	if (size == 0)
1048 		return(0);
1049 	if (foff & PAGE_MASK)
1050 		return(EINVAL);
1051 	if ((prot & maxprot) != prot)
1052 		return(EINVAL);
1053 
1054 	/*
1055 	 * for non-fixed mappings, round off the suggested address.
1056 	 * for fixed mappings, check alignment and zap old mappings.
1057 	 */
1058 
1059 	if ((flags & MAP_FIXED) == 0) {
1060 		*addr = round_page(*addr);
1061 	} else {
1062 		if (*addr & PAGE_MASK)
1063 			return(EINVAL);
1064 		uvmflag |= UVM_FLAG_FIXED;
1065 		(void) uvm_unmap(map, *addr, *addr + size);
1066 	}
1067 
1068 	/*
1069 	 * Try to see if any requested alignment can even be attemped.
1070 	 * Make sure we can express the alignment (asking for a >= 4GB
1071 	 * alignment on an ILP32 architecure make no sense) and the
1072 	 * alignment is at least for a page sized quanitiy.  If the
1073 	 * request was for a fixed mapping, make sure supplied address
1074 	 * adheres to the request alignment.
1075 	 */
1076 	align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
1077 	if (align) {
1078 		if (align >= sizeof(vaddr_t) * NBBY)
1079 			return(EINVAL);
1080 		align = 1L << align;
1081 		if (align < PAGE_SIZE)
1082 			return(EINVAL);
1083 		if (align >= map->max_offset)
1084 			return(ENOMEM);
1085 		if (flags & MAP_FIXED) {
1086 			if ((*addr & (align-1)) != 0)
1087 				return(EINVAL);
1088 			align = 0;
1089 		}
1090 	}
1091 
1092 	/*
1093 	 * handle anon vs. non-anon mappings.   for non-anon mappings attach
1094 	 * to underlying vm object.
1095 	 */
1096 
1097 	if (flags & MAP_ANON) {
1098 		foff = UVM_UNKNOWN_OFFSET;
1099 		uobj = NULL;
1100 		if ((flags & MAP_SHARED) == 0)
1101 			/* XXX: defer amap create */
1102 			uvmflag |= UVM_FLAG_COPYONW;
1103 		else
1104 			/* shared: create amap now */
1105 			uvmflag |= UVM_FLAG_OVERLAY;
1106 
1107 	} else {
1108 		vp = (struct vnode *)handle;
1109 
1110 		/*
1111 		 * Don't allow mmap for EXEC if the file system
1112 		 * is mounted NOEXEC.
1113 		 */
1114 		if ((prot & PROT_EXEC) != 0 &&
1115 		    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0)
1116 			return (EACCES);
1117 
1118 		if (vp->v_type != VCHR) {
1119 			error = VOP_MMAP(vp, 0, curproc->p_ucred, curproc);
1120 			if (error) {
1121 				return error;
1122 			}
1123 
1124 			uobj = uvn_attach((void *)vp, (flags & MAP_SHARED) ?
1125 			   maxprot : (maxprot & ~VM_PROT_WRITE));
1126 
1127 			/* XXX for now, attach doesn't gain a ref */
1128 			VREF(vp);
1129 
1130 			/*
1131 			 * If the vnode is being mapped with PROT_EXEC,
1132 			 * then mark it as text.
1133 			 */
1134 			if (prot & PROT_EXEC)
1135 				vn_markexec(vp);
1136 		} else {
1137 			int i = maxprot;
1138 
1139 			/*
1140 			 * XXX Some devices don't like to be mapped with
1141 			 * XXX PROT_EXEC or PROT_WRITE, but we don't really
1142 			 * XXX have a better way of handling this, right now
1143 			 */
1144 			do {
1145 				uobj = udv_attach((void *) &vp->v_rdev,
1146 				    (flags & MAP_SHARED) ? i :
1147 				    (i & ~VM_PROT_WRITE), foff, size);
1148 				i--;
1149 			} while ((uobj == NULL) && (i > 0));
1150 			advice = UVM_ADV_RANDOM;
1151 		}
1152 		if (uobj == NULL)
1153 			return((vp->v_type == VREG) ? ENOMEM : EINVAL);
1154 		if ((flags & MAP_SHARED) == 0)
1155 			uvmflag |= UVM_FLAG_COPYONW;
1156 	}
1157 
1158 	uvmflag = UVM_MAPFLAG(prot, maxprot,
1159 			(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
1160 			advice, uvmflag);
1161 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
1162 	if (error) {
1163 		if (uobj)
1164 			uobj->pgops->pgo_detach(uobj);
1165 		return error;
1166 	}
1167 
1168 	/*
1169 	 * POSIX 1003.1b -- if our address space was configured
1170 	 * to lock all future mappings, wire the one we just made.
1171 	 *
1172 	 * Also handle the MAP_WIRED flag here.
1173 	 */
1174 
1175 	if (prot == VM_PROT_NONE) {
1176 
1177 		/*
1178 		 * No more work to do in this case.
1179 		 */
1180 
1181 		return (0);
1182 	}
1183 	vm_map_lock(map);
1184 	if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
1185 		if (atop(size) + uvmexp.wired > uvmexp.wiredmax ||
1186 		    (locklimit != 0 &&
1187 		     size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
1188 		     locklimit)) {
1189 			vm_map_unlock(map);
1190 			uvm_unmap(map, *addr, *addr + size);
1191 			return ENOMEM;
1192 		}
1193 
1194 		/*
1195 		 * uvm_map_pageable() always returns the map unlocked.
1196 		 */
1197 
1198 		error = uvm_map_pageable(map, *addr, *addr + size,
1199 					 FALSE, UVM_LK_ENTER);
1200 		if (error) {
1201 			uvm_unmap(map, *addr, *addr + size);
1202 			return error;
1203 		}
1204 		return (0);
1205 	}
1206 	vm_map_unlock(map);
1207 	return 0;
1208 }
1209