xref: /csrg-svn/sys/vm/vm_mmap.c (revision 48384)
145749Smckusick /*
245749Smckusick  * Copyright (c) 1988 University of Utah.
345749Smckusick  * Copyright (c) 1991 The Regents of the University of California.
445749Smckusick  * All rights reserved.
545749Smckusick  *
645749Smckusick  * This code is derived from software contributed to Berkeley by
745749Smckusick  * the Systems Programming Group of the University of Utah Computer
845749Smckusick  * Science Department.
945749Smckusick  *
1045749Smckusick  * %sccs.include.redist.c%
1145749Smckusick  *
1245749Smckusick  * from: Utah $Hdr: vm_mmap.c 1.3 90/01/21$
1345749Smckusick  *
14*48384Skarels  *	@(#)vm_mmap.c	7.3 (Berkeley) 04/20/91
1545749Smckusick  */
1645749Smckusick 
1745749Smckusick /*
1845749Smckusick  * Mapped file (mmap) interface to VM
1945749Smckusick  */
2045749Smckusick 
2145749Smckusick #include "param.h"
2245749Smckusick #include "systm.h"
2345916Smckusick #include "filedesc.h"
2445749Smckusick #include "proc.h"
2545749Smckusick #include "vnode.h"
2645749Smckusick #include "specdev.h"
2745749Smckusick #include "file.h"
2845749Smckusick #include "mman.h"
2945749Smckusick #include "conf.h"
3045749Smckusick 
31*48384Skarels #include "vm.h"
32*48384Skarels #include "vm_pager.h"
33*48384Skarels #include "vm_prot.h"
34*48384Skarels #include "vm_statistics.h"
3545749Smckusick 
3645749Smckusick #ifdef DEBUG
3745749Smckusick int mmapdebug = 0;
3845749Smckusick #define MDB_FOLLOW	0x01
3945749Smckusick #define MDB_SYNC	0x02
4045749Smckusick #define MDB_MAPIT	0x04
4145749Smckusick #endif
4245749Smckusick 
4345749Smckusick /* ARGSUSED */
4445749Smckusick getpagesize(p, uap, retval)
4545749Smckusick 	struct proc *p;
46*48384Skarels 	void *uap;
4745749Smckusick 	int *retval;
4845749Smckusick {
4945749Smckusick 
5045749Smckusick 	*retval = NBPG * CLSIZE;
5145749Smckusick 	return (0);
5245749Smckusick }
5345749Smckusick 
5445749Smckusick /* ARGSUSED */
5545749Smckusick sbrk(p, uap, retval)
5645749Smckusick 	struct proc *p;
5745749Smckusick 	struct args {
5845749Smckusick 		int	incr;
5945749Smckusick 	} *uap;
6045749Smckusick 	int *retval;
6145749Smckusick {
6245749Smckusick 
6345749Smckusick 	/* Not yet implemented */
6445749Smckusick 	return (EOPNOTSUPP);
6545749Smckusick }
6645749Smckusick 
6745749Smckusick /* ARGSUSED */
6845749Smckusick sstk(p, uap, retval)
6945749Smckusick 	struct proc *p;
7045749Smckusick 	struct args {
7145749Smckusick 		int	incr;
7245749Smckusick 	} *uap;
7345749Smckusick 	int *retval;
7445749Smckusick {
7545749Smckusick 
7645749Smckusick 	/* Not yet implemented */
7745749Smckusick 	return (EOPNOTSUPP);
7845749Smckusick }
7945749Smckusick 
8045749Smckusick smmap(p, uap, retval)
8145916Smckusick 	struct proc *p;
8245749Smckusick 	register struct args {
8345749Smckusick 		caddr_t	addr;
8445749Smckusick 		int	len;
8545749Smckusick 		int	prot;
8645749Smckusick 		int	flags;
8745749Smckusick 		int	fd;
8845749Smckusick 		off_t	pos;
8945749Smckusick 	} *uap;
9045749Smckusick 	int *retval;
9145749Smckusick {
9245916Smckusick 	register struct filedesc *fdp = p->p_fd;
9345916Smckusick 	register struct file *fp;
9445749Smckusick 	struct vnode *vp;
9545749Smckusick 	vm_offset_t addr;
9645749Smckusick 	vm_size_t size;
9745749Smckusick 	vm_prot_t prot;
9845749Smckusick 	caddr_t handle;
9945749Smckusick 	int mtype, error;
10045749Smckusick 
10145749Smckusick #ifdef DEBUG
10245749Smckusick 	if (mmapdebug & MDB_FOLLOW)
10345749Smckusick 		printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
10445749Smckusick 		       p->p_pid, uap->addr, uap->len, uap->prot,
10545749Smckusick 		       uap->flags, uap->fd, uap->pos);
10645749Smckusick #endif
10745749Smckusick 	/*
10845749Smckusick 	 * Make sure one of the sharing types is specified
10945749Smckusick 	 */
11045749Smckusick 	mtype = uap->flags & MAP_TYPE;
11145749Smckusick 	switch (mtype) {
11245749Smckusick 	case MAP_FILE:
11345749Smckusick 	case MAP_ANON:
11445749Smckusick 		break;
11545749Smckusick 	default:
11645749Smckusick 		return(EINVAL);
11745749Smckusick 	}
11845749Smckusick 	/*
11945749Smckusick 	 * Address (if FIXED) and size must be page aligned
12045749Smckusick 	 */
12145749Smckusick 	size = (vm_size_t)uap->len;
12245749Smckusick 	addr = (vm_offset_t)uap->addr;
12345749Smckusick 	if ((size & page_mask) ||
12445749Smckusick 	    (uap->flags & MAP_FIXED) && (addr & page_mask))
12545749Smckusick 		return(EINVAL);
12645749Smckusick 	/*
12745749Smckusick 	 * Mapping file or named anonymous, get fp for validation
12845749Smckusick 	 */
12945749Smckusick 	if (mtype == MAP_FILE || uap->fd != -1) {
130*48384Skarels 		if (((unsigned)uap->fd) >= fdp->fd_nfiles ||
131*48384Skarels 		    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
13245749Smckusick 			return(EBADF);
13345749Smckusick 	}
13445749Smckusick 	/*
13545749Smckusick 	 * If we are mapping a file we need to check various
13645749Smckusick 	 * file/vnode related things.
13745749Smckusick 	 */
13845749Smckusick 	if (mtype == MAP_FILE) {
13945749Smckusick 		/*
14045749Smckusick 		 * Obtain vnode and make sure it is of appropriate type
14145749Smckusick 		 */
14245749Smckusick 		if (fp->f_type != DTYPE_VNODE)
14345749Smckusick 			return(EINVAL);
14445749Smckusick 		vp = (struct vnode *)fp->f_data;
14545749Smckusick 		if (vp->v_type != VREG && vp->v_type != VCHR)
14645749Smckusick 			return(EINVAL);
14745749Smckusick 		/*
14845749Smckusick 		 * Ensure that file protection and desired protection
14945749Smckusick 		 * are compatible.  Note that we only worry about writability
15045749Smckusick 		 * if mapping is shared.
15145749Smckusick 		 */
15245749Smckusick 		if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 ||
15345749Smckusick 		    ((uap->flags & MAP_SHARED) &&
15445749Smckusick 		     (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0))
15545749Smckusick 			return(EACCES);
15645749Smckusick 		handle = (caddr_t)vp;
15745749Smckusick 	} else if (uap->fd != -1)
15845749Smckusick 		handle = (caddr_t)fp;
15945749Smckusick 	else
16045749Smckusick 		handle = NULL;
16145749Smckusick 	/*
16245749Smckusick 	 * Map protections to MACH style
16345749Smckusick 	 */
16445749Smckusick 	prot = VM_PROT_NONE;
16545749Smckusick 	if (uap->prot & PROT_READ)
16645749Smckusick 		prot |= VM_PROT_READ;
16745749Smckusick 	if (uap->prot & PROT_WRITE)
16845749Smckusick 		prot |= VM_PROT_WRITE;
16945749Smckusick 	if (uap->prot & PROT_EXEC)
17045749Smckusick 		prot |= VM_PROT_EXECUTE;
17145749Smckusick 
172*48384Skarels 	error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot,
17345749Smckusick 			uap->flags, handle, (vm_offset_t)uap->pos);
17445749Smckusick 	if (error == 0)
17545749Smckusick 		*retval = (int) addr;
17645749Smckusick 	return(error);
17745749Smckusick }
17845749Smckusick 
17945749Smckusick msync(p, uap, retval)
18045749Smckusick 	struct proc *p;
18145749Smckusick 	struct args {
18245749Smckusick 		char	*addr;
18345749Smckusick 		int	len;
18445749Smckusick 	} *uap;
18545749Smckusick 	int *retval;
18645749Smckusick {
18745749Smckusick 	vm_offset_t addr, objoff, oaddr;
18845749Smckusick 	vm_size_t size, osize;
18945749Smckusick 	vm_prot_t prot, mprot;
19045749Smckusick 	vm_inherit_t inherit;
19145749Smckusick 	vm_object_t object;
19245749Smckusick 	boolean_t shared;
19345749Smckusick 	int rv;
19445749Smckusick 
19545749Smckusick #ifdef DEBUG
19645749Smckusick 	if (mmapdebug & (MDB_FOLLOW|MDB_SYNC))
19745749Smckusick 		printf("msync(%d): addr %x len %x\n",
19845749Smckusick 		       p->p_pid, uap->addr, uap->len);
19945749Smckusick #endif
20045749Smckusick 	if (((int)uap->addr & page_mask) || (uap->len & page_mask))
20145749Smckusick 		return(EINVAL);
20245749Smckusick 	addr = oaddr = (vm_offset_t)uap->addr;
20345749Smckusick 	osize = (vm_size_t)uap->len;
20445749Smckusick 	/*
20545749Smckusick 	 * Region must be entirely contained in a single entry
20645749Smckusick 	 */
207*48384Skarels 	if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize,
208*48384Skarels 	    TRUE))
20945749Smckusick 		return(EINVAL);
21045749Smckusick 	/*
21145749Smckusick 	 * Determine the object associated with that entry
21245749Smckusick 	 * (object is returned locked on KERN_SUCCESS)
21345749Smckusick 	 */
214*48384Skarels 	rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot,
21545749Smckusick 		       &inherit, &shared, &object, &objoff);
21645749Smckusick 	if (rv != KERN_SUCCESS)
21745749Smckusick 		return(EINVAL);
21845749Smckusick #ifdef DEBUG
21945749Smckusick 	if (mmapdebug & MDB_SYNC)
22045749Smckusick 		printf("msync: region: object %x addr %x size %d objoff %d\n",
22145749Smckusick 		       object, addr, size, objoff);
22245749Smckusick #endif
22345749Smckusick 	/*
22445749Smckusick 	 * Do not msync non-vnoded backed objects.
22545749Smckusick 	 */
226*48384Skarels 	if (object->internal || object->pager == NULL ||
22745749Smckusick 	    object->pager->pg_type != PG_VNODE) {
22845749Smckusick 		vm_object_unlock(object);
22945749Smckusick 		return(EINVAL);
23045749Smckusick 	}
23145749Smckusick 	objoff += oaddr - addr;
23245749Smckusick 	if (osize == 0)
23345749Smckusick 		osize = size;
23445749Smckusick #ifdef DEBUG
23545749Smckusick 	if (mmapdebug & MDB_SYNC)
23645749Smckusick 		printf("msync: cleaning/flushing object range [%x-%x)\n",
23745749Smckusick 		       objoff, objoff+osize);
23845749Smckusick #endif
23945749Smckusick 	if (prot & VM_PROT_WRITE)
24045749Smckusick 		vm_object_page_clean(object, objoff, objoff+osize);
24145749Smckusick 	/*
24245749Smckusick 	 * (XXX)
24345749Smckusick 	 * Bummer, gotta flush all cached pages to ensure
24445749Smckusick 	 * consistency with the file system cache.
24545749Smckusick 	 */
24645749Smckusick 	vm_object_page_remove(object, objoff, objoff+osize);
24745749Smckusick 	vm_object_unlock(object);
24845749Smckusick 	return(0);
24945749Smckusick }
25045749Smckusick 
25145749Smckusick munmap(p, uap, retval)
25245749Smckusick 	register struct proc *p;
25345749Smckusick 	register struct args {
25445749Smckusick 		caddr_t	addr;
25545749Smckusick 		int	len;
25645749Smckusick 	} *uap;
25745749Smckusick 	int *retval;
25845749Smckusick {
25945749Smckusick 	vm_offset_t addr;
26045749Smckusick 	vm_size_t size;
26145749Smckusick 
26245749Smckusick #ifdef DEBUG
26345749Smckusick 	if (mmapdebug & MDB_FOLLOW)
26445749Smckusick 		printf("munmap(%d): addr %x len %x\n",
26545749Smckusick 		       p->p_pid, uap->addr, uap->len);
26645749Smckusick #endif
26745749Smckusick 
26845749Smckusick 	addr = (vm_offset_t) uap->addr;
26945749Smckusick 	size = (vm_size_t) uap->len;
27045749Smckusick 	if ((addr & page_mask) || (size & page_mask))
27145749Smckusick 		return(EINVAL);
27245749Smckusick 	if (size == 0)
27345749Smckusick 		return(0);
274*48384Skarels 	if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+size,
275*48384Skarels 	    FALSE))
27645749Smckusick 		return(EINVAL);
27745749Smckusick 	/* returns nothing but KERN_SUCCESS anyway */
278*48384Skarels 	(void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size);
27945749Smckusick 	return(0);
28045749Smckusick }
28145749Smckusick 
28245749Smckusick munmapfd(fd)
28345749Smckusick {
28445749Smckusick #ifdef DEBUG
28545749Smckusick 	if (mmapdebug & MDB_FOLLOW)
286*48384Skarels 		printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd);
28745749Smckusick #endif
28845749Smckusick 
28945749Smckusick 	/*
29045749Smckusick 	 * XXX -- should vm_deallocate any regions mapped to this file
29145749Smckusick 	 */
292*48384Skarels 	curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
29345749Smckusick }
29445749Smckusick 
29545749Smckusick mprotect(p, uap, retval)
29645749Smckusick 	struct proc *p;
29745749Smckusick 	struct args {
29845749Smckusick 		char	*addr;
29945749Smckusick 		int	len;
30045749Smckusick 		int	prot;
30145749Smckusick 	} *uap;
30245749Smckusick 	int *retval;
30345749Smckusick {
30445749Smckusick 	vm_offset_t addr;
30545749Smckusick 	vm_size_t size;
30645749Smckusick 	register vm_prot_t prot;
30745749Smckusick 
30845749Smckusick #ifdef DEBUG
30945749Smckusick 	if (mmapdebug & MDB_FOLLOW)
31045749Smckusick 		printf("mprotect(%d): addr %x len %x prot %d\n",
31145749Smckusick 		       p->p_pid, uap->addr, uap->len, uap->prot);
31245749Smckusick #endif
31345749Smckusick 
31445749Smckusick 	addr = (vm_offset_t) uap->addr;
31545749Smckusick 	size = (vm_size_t) uap->len;
31645749Smckusick 	if ((addr & page_mask) || (size & page_mask))
31745749Smckusick 		return(EINVAL);
31845749Smckusick 	/*
31945749Smckusick 	 * Map protections
32045749Smckusick 	 */
32145749Smckusick 	prot = VM_PROT_NONE;
32245749Smckusick 	if (uap->prot & PROT_READ)
32345749Smckusick 		prot |= VM_PROT_READ;
32445749Smckusick 	if (uap->prot & PROT_WRITE)
32545749Smckusick 		prot |= VM_PROT_WRITE;
32645749Smckusick 	if (uap->prot & PROT_EXEC)
32745749Smckusick 		prot |= VM_PROT_EXECUTE;
32845749Smckusick 
329*48384Skarels 	switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot,
330*48384Skarels 	    FALSE)) {
33145749Smckusick 	case KERN_SUCCESS:
33245749Smckusick 		return (0);
33345749Smckusick 	case KERN_PROTECTION_FAILURE:
33445749Smckusick 		return (EACCES);
33545749Smckusick 	}
33645749Smckusick 	return (EINVAL);
33745749Smckusick }
33845749Smckusick 
33945749Smckusick /* ARGSUSED */
34045749Smckusick madvise(p, uap, retval)
34145749Smckusick 	struct proc *p;
34245749Smckusick 	struct args {
34345749Smckusick 		char	*addr;
34445749Smckusick 		int	len;
34545749Smckusick 		int	behav;
34645749Smckusick 	} *uap;
34745749Smckusick 	int *retval;
34845749Smckusick {
34945749Smckusick 
35045749Smckusick 	/* Not yet implemented */
35145749Smckusick 	return (EOPNOTSUPP);
35245749Smckusick }
35345749Smckusick 
35445749Smckusick /* ARGSUSED */
35545749Smckusick mincore(p, uap, retval)
35645749Smckusick 	struct proc *p;
35745749Smckusick 	struct args {
35845749Smckusick 		char	*addr;
35945749Smckusick 		int	len;
36045749Smckusick 		char	*vec;
36145749Smckusick 	} *uap;
36245749Smckusick 	int *retval;
36345749Smckusick {
36445749Smckusick 
36545749Smckusick 	/* Not yet implemented */
36645749Smckusick 	return (EOPNOTSUPP);
36745749Smckusick }
36845749Smckusick 
36945749Smckusick /*
37045749Smckusick  * Internal version of mmap.
37145749Smckusick  * Currently used by mmap, exec, and sys5 shared memory.
37245749Smckusick  * Handle is:
37345749Smckusick  *	MAP_FILE: a vnode pointer
37445749Smckusick  *	MAP_ANON: NULL or a file pointer
37545749Smckusick  */
37645749Smckusick vm_mmap(map, addr, size, prot, flags, handle, foff)
37745749Smckusick 	register vm_map_t map;
37845749Smckusick 	register vm_offset_t *addr;
37945749Smckusick 	register vm_size_t size;
38045749Smckusick 	vm_prot_t prot;
38145749Smckusick 	register int flags;
38245749Smckusick 	caddr_t handle;		/* XXX should be vp */
38345749Smckusick 	vm_offset_t foff;
38445749Smckusick {
38545749Smckusick 	register vm_pager_t pager;
38645749Smckusick 	boolean_t fitit;
38745749Smckusick 	vm_object_t object;
38845749Smckusick 	struct vnode *vp;
38945749Smckusick 	int type;
39045749Smckusick 	int rv = KERN_SUCCESS;
39145749Smckusick 
39245749Smckusick 	if (size == 0)
39345749Smckusick 		return (0);
39445749Smckusick 
39545749Smckusick 	if ((flags & MAP_FIXED) == 0) {
39645749Smckusick 		fitit = TRUE;
39745749Smckusick 		*addr = round_page(*addr);
39845749Smckusick 	} else {
39945749Smckusick 		fitit = FALSE;
40045749Smckusick 		(void) vm_deallocate(map, *addr, size);
40145749Smckusick 	}
40245749Smckusick 
40345749Smckusick 	/*
40445749Smckusick 	 * Lookup/allocate pager.  All except an unnamed anonymous lookup
40545749Smckusick 	 * gain a reference to ensure continued existance of the object.
40645749Smckusick 	 * (XXX the exception is to appease the pageout daemon)
40745749Smckusick 	 */
40845749Smckusick 	if ((flags & MAP_TYPE) == MAP_ANON)
40945749Smckusick 		type = PG_DFLT;
41045749Smckusick 	else {
41145749Smckusick 		vp = (struct vnode *)handle;
41245749Smckusick 		if (vp->v_type == VCHR) {
41345749Smckusick 			type = PG_DEVICE;
41445749Smckusick 			handle = (caddr_t)vp->v_rdev;
41545749Smckusick 		} else
41645749Smckusick 			type = PG_VNODE;
41745749Smckusick 	}
41845749Smckusick 	pager = vm_pager_allocate(type, handle, size, prot);
419*48384Skarels 	if (pager == NULL)
42045749Smckusick 		return (type == PG_DEVICE ? EINVAL : ENOMEM);
42145749Smckusick 	/*
42245749Smckusick 	 * Find object and release extra reference gained by lookup
42345749Smckusick 	 */
42445749Smckusick 	object = vm_object_lookup(pager);
42545749Smckusick 	vm_object_deallocate(object);
42645749Smckusick 
42745749Smckusick 	/*
42845749Smckusick 	 * Anonymous memory.
42945749Smckusick 	 */
43045749Smckusick 	if ((flags & MAP_TYPE) == MAP_ANON) {
43145749Smckusick 		rv = vm_allocate_with_pager(map, addr, size, fitit,
43245749Smckusick 					    pager, (vm_offset_t)foff, TRUE);
43345749Smckusick 		if (rv != KERN_SUCCESS) {
43445749Smckusick 			if (handle == NULL)
43545749Smckusick 				vm_pager_deallocate(pager);
43645749Smckusick 			else
43745749Smckusick 				vm_object_deallocate(object);
43845749Smckusick 			goto out;
43945749Smckusick 		}
44045749Smckusick 		/*
44145749Smckusick 		 * Don't cache anonymous objects.
44245749Smckusick 		 * Loses the reference gained by vm_pager_allocate.
44345749Smckusick 		 */
44445749Smckusick 		(void) pager_cache(object, FALSE);
44545749Smckusick #ifdef DEBUG
44645749Smckusick 		if (mmapdebug & MDB_MAPIT)
44745749Smckusick 			printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",
448*48384Skarels 			       curproc->p_pid, *addr, size, pager);
44945749Smckusick #endif
45045749Smckusick 	}
45145749Smckusick 	/*
45245749Smckusick 	 * Must be type MAP_FILE.
45345749Smckusick 	 * Distinguish between character special and regular files.
45445749Smckusick 	 */
45545749Smckusick 	else if (vp->v_type == VCHR) {
45645749Smckusick 		rv = vm_allocate_with_pager(map, addr, size, fitit,
45745749Smckusick 					    pager, (vm_offset_t)foff, FALSE);
45845749Smckusick 		/*
45945749Smckusick 		 * Uncache the object and lose the reference gained
46045749Smckusick 		 * by vm_pager_allocate().  If the call to
46145749Smckusick 		 * vm_allocate_with_pager() was sucessful, then we
46245749Smckusick 		 * gained an additional reference ensuring the object
46345749Smckusick 		 * will continue to exist.  If the call failed then
46445749Smckusick 		 * the deallocate call below will terminate the
46545749Smckusick 		 * object which is fine.
46645749Smckusick 		 */
46745749Smckusick 		(void) pager_cache(object, FALSE);
46845749Smckusick 		if (rv != KERN_SUCCESS)
46945749Smckusick 			goto out;
47045749Smckusick 	}
47145749Smckusick 	/*
47245749Smckusick 	 * A regular file
47345749Smckusick 	 */
47445749Smckusick 	else {
47545749Smckusick #ifdef DEBUG
476*48384Skarels 		if (object == NULL)
47745749Smckusick 			printf("vm_mmap: no object: vp %x, pager %x\n",
47845749Smckusick 			       vp, pager);
47945749Smckusick #endif
48045749Smckusick 		/*
48145749Smckusick 		 * Map it directly.
48245749Smckusick 		 * Allows modifications to go out to the vnode.
48345749Smckusick 		 */
48445749Smckusick 		if (flags & MAP_SHARED) {
48545749Smckusick 			rv = vm_allocate_with_pager(map, addr, size,
48645749Smckusick 						    fitit, pager,
48745749Smckusick 						    (vm_offset_t)foff, FALSE);
48845749Smckusick 			if (rv != KERN_SUCCESS) {
48945749Smckusick 				vm_object_deallocate(object);
49045749Smckusick 				goto out;
49145749Smckusick 			}
49245749Smckusick 			/*
49345749Smckusick 			 * Don't cache the object.  This is the easiest way
49445749Smckusick 			 * of ensuring that data gets back to the filesystem
49545749Smckusick 			 * because vnode_pager_deallocate() will fsync the
49645749Smckusick 			 * vnode.  pager_cache() will lose the extra ref.
49745749Smckusick 			 */
49845749Smckusick 			if (prot & VM_PROT_WRITE)
49945749Smckusick 				pager_cache(object, FALSE);
50045749Smckusick 			else
50145749Smckusick 				vm_object_deallocate(object);
50245749Smckusick 		}
50345749Smckusick 		/*
50445749Smckusick 		 * Copy-on-write of file.  Two flavors.
50545749Smckusick 		 * MAP_COPY is true COW, you essentially get a snapshot of
50645749Smckusick 		 * the region at the time of mapping.  MAP_PRIVATE means only
50745749Smckusick 		 * that your changes are not reflected back to the object.
50845749Smckusick 		 * Changes made by others will be seen.
50945749Smckusick 		 */
51045749Smckusick 		else {
51145749Smckusick 			vm_map_t tmap;
51245749Smckusick 			vm_offset_t off;
51345749Smckusick 
51445749Smckusick 			/* locate and allocate the target address space */
515*48384Skarels 			rv = vm_map_find(map, NULL, (vm_offset_t)0,
51645749Smckusick 					 addr, size, fitit);
51745749Smckusick 			if (rv != KERN_SUCCESS) {
51845749Smckusick 				vm_object_deallocate(object);
51945749Smckusick 				goto out;
52045749Smckusick 			}
52145749Smckusick 			tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS,
52245749Smckusick 					     VM_MIN_ADDRESS+size, TRUE);
52345749Smckusick 			off = VM_MIN_ADDRESS;
52445749Smckusick 			rv = vm_allocate_with_pager(tmap, &off, size,
52545749Smckusick 						    TRUE, pager,
52645749Smckusick 						    (vm_offset_t)foff, FALSE);
52745749Smckusick 			if (rv != KERN_SUCCESS) {
52845749Smckusick 				vm_object_deallocate(object);
52945749Smckusick 				vm_map_deallocate(tmap);
53045749Smckusick 				goto out;
53145749Smckusick 			}
53245749Smckusick 			/*
53345749Smckusick 			 * (XXX)
53445749Smckusick 			 * MAP_PRIVATE implies that we see changes made by
53545749Smckusick 			 * others.  To ensure that we need to guarentee that
53645749Smckusick 			 * no copy object is created (otherwise original
53745749Smckusick 			 * pages would be pushed to the copy object and we
53845749Smckusick 			 * would never see changes made by others).  We
53945749Smckusick 			 * totally sleeze it right now by marking the object
54045749Smckusick 			 * internal temporarily.
54145749Smckusick 			 */
54245749Smckusick 			if ((flags & MAP_COPY) == 0)
54345749Smckusick 				object->internal = TRUE;
54445749Smckusick 			rv = vm_map_copy(map, tmap, *addr, size, off,
54545749Smckusick 					 FALSE, FALSE);
54645749Smckusick 			object->internal = FALSE;
54745749Smckusick 			/*
54845749Smckusick 			 * (XXX)
54945749Smckusick 			 * My oh my, this only gets worse...
55045749Smckusick 			 * Force creation of a shadow object so that
55145749Smckusick 			 * vm_map_fork will do the right thing.
55245749Smckusick 			 */
55345749Smckusick 			if ((flags & MAP_COPY) == 0) {
55445749Smckusick 				vm_map_t tmap;
55545749Smckusick 				vm_map_entry_t tentry;
55645749Smckusick 				vm_object_t tobject;
55745749Smckusick 				vm_offset_t toffset;
55845749Smckusick 				vm_prot_t tprot;
55945749Smckusick 				boolean_t twired, tsu;
56045749Smckusick 
56145749Smckusick 				tmap = map;
56245749Smckusick 				vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
56345749Smckusick 					      &tentry, &tobject, &toffset,
56445749Smckusick 					      &tprot, &twired, &tsu);
56545749Smckusick 				vm_map_lookup_done(tmap, tentry);
56645749Smckusick 			}
56745749Smckusick 			/*
56845749Smckusick 			 * (XXX)
56945749Smckusick 			 * Map copy code cannot detect sharing unless a
57045749Smckusick 			 * sharing map is involved.  So we cheat and write
57145749Smckusick 			 * protect everything ourselves.  Note we cannot
57245749Smckusick 			 * use vm_object_pmap_copy() because that relies
57345749Smckusick 			 * on the page copy_on_write bit which isn't
57445749Smckusick 			 * always accurate with shared objects.
57545749Smckusick 			 */
57645749Smckusick 			vm_object_pmap_force_copy(object, (vm_offset_t)foff,
57745749Smckusick 					    (vm_offset_t)foff+size);
57845749Smckusick 			vm_object_deallocate(object);
57945749Smckusick 			vm_map_deallocate(tmap);
58045749Smckusick 			if (rv != KERN_SUCCESS)
58145749Smckusick 				goto out;
58245749Smckusick 		}
58345749Smckusick #ifdef DEBUG
58445749Smckusick 		if (mmapdebug & MDB_MAPIT)
58545749Smckusick 			printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",
586*48384Skarels 			       curproc->p_pid, *addr, size, pager);
58745749Smckusick #endif
58845749Smckusick 	}
58945749Smckusick 	/*
59045749Smckusick 	 * Correct protection (default is VM_PROT_ALL).
59145749Smckusick 	 * Note that we set the maximum protection.  This may not be
59245749Smckusick 	 * entirely correct.  Maybe the maximum protection should be based
59345749Smckusick 	 * on the object permissions where it makes sense (e.g. a vnode).
59445749Smckusick 	 *
59545749Smckusick 	 * Changed my mind: leave max prot at VM_PROT_ALL.
59645749Smckusick 	 */
59745749Smckusick 	if (prot != VM_PROT_ALL) {
59845749Smckusick 		rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE);
59945749Smckusick 		if (rv != KERN_SUCCESS) {
60045749Smckusick 			(void) vm_deallocate(map, *addr, size);
60145749Smckusick 			goto out;
60245749Smckusick 		}
60345749Smckusick 	}
60445749Smckusick 	/*
60545749Smckusick 	 * Shared memory is also shared with children.
60645749Smckusick 	 */
60745749Smckusick 	if (flags & MAP_SHARED) {
60845749Smckusick 		rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE);
60945749Smckusick 		if (rv != KERN_SUCCESS) {
61045749Smckusick 			(void) vm_deallocate(map, *addr, size);
61145749Smckusick 			goto out;
61245749Smckusick 		}
61345749Smckusick 	}
61445749Smckusick out:
61545749Smckusick #ifdef DEBUG
61645749Smckusick 	if (mmapdebug & MDB_MAPIT)
61745749Smckusick 		printf("vm_mmap: rv %d\n", rv);
61845749Smckusick #endif
61945749Smckusick 	switch (rv) {
62045749Smckusick 	case KERN_SUCCESS:
62145749Smckusick 		return (0);
62245749Smckusick 	case KERN_INVALID_ADDRESS:
62345749Smckusick 	case KERN_NO_SPACE:
62445749Smckusick 		return (ENOMEM);
62545749Smckusick 	case KERN_PROTECTION_FAILURE:
62645749Smckusick 		return (EACCES);
62745749Smckusick 	default:
62845749Smckusick 		return (EINVAL);
62945749Smckusick 	}
63045749Smckusick }
63145749Smckusick 
63245749Smckusick /*
63345749Smckusick  * Internal bastardized version of MACHs vm_region system call.
63445749Smckusick  * Given address and size it returns map attributes as well
63545749Smckusick  * as the (locked) object mapped at that location.
63645749Smckusick  */
63745749Smckusick vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff)
63845749Smckusick 	vm_map_t	map;
63945749Smckusick 	vm_offset_t	*addr;		/* IN/OUT */
64045749Smckusick 	vm_size_t	*size;		/* OUT */
64145749Smckusick 	vm_prot_t	*prot;		/* OUT */
64245749Smckusick 	vm_prot_t	*max_prot;	/* OUT */
64345749Smckusick 	vm_inherit_t	*inheritance;	/* OUT */
64445749Smckusick 	boolean_t	*shared;	/* OUT */
64545749Smckusick 	vm_object_t	*object;	/* OUT */
64645749Smckusick 	vm_offset_t	*objoff;	/* OUT */
64745749Smckusick {
64845749Smckusick 	vm_map_entry_t	tmp_entry;
64945749Smckusick 	register
65045749Smckusick 	vm_map_entry_t	entry;
65145749Smckusick 	register
65245749Smckusick 	vm_offset_t	tmp_offset;
65345749Smckusick 	vm_offset_t	start;
65445749Smckusick 
655*48384Skarels 	if (map == NULL)
65645749Smckusick 		return(KERN_INVALID_ARGUMENT);
65745749Smckusick 
65845749Smckusick 	start = *addr;
65945749Smckusick 
66045749Smckusick 	vm_map_lock_read(map);
66145749Smckusick 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
66245749Smckusick 		if ((entry = tmp_entry->next) == &map->header) {
66345749Smckusick 			vm_map_unlock_read(map);
66445749Smckusick 		   	return(KERN_NO_SPACE);
66545749Smckusick 		}
66645749Smckusick 		start = entry->start;
66745749Smckusick 		*addr = start;
66845749Smckusick 	} else
66945749Smckusick 		entry = tmp_entry;
67045749Smckusick 
67145749Smckusick 	*prot = entry->protection;
67245749Smckusick 	*max_prot = entry->max_protection;
67345749Smckusick 	*inheritance = entry->inheritance;
67445749Smckusick 
67545749Smckusick 	tmp_offset = entry->offset + (start - entry->start);
67645749Smckusick 	*size = (entry->end - start);
67745749Smckusick 
67845749Smckusick 	if (entry->is_a_map) {
67945749Smckusick 		register vm_map_t share_map;
68045749Smckusick 		vm_size_t share_size;
68145749Smckusick 
68245749Smckusick 		share_map = entry->object.share_map;
68345749Smckusick 
68445749Smckusick 		vm_map_lock_read(share_map);
68545749Smckusick 		(void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry);
68645749Smckusick 
68745749Smckusick 		if ((share_size = (tmp_entry->end - tmp_offset)) < *size)
68845749Smckusick 			*size = share_size;
68945749Smckusick 
69045749Smckusick 		vm_object_lock(tmp_entry->object);
69145749Smckusick 		*object = tmp_entry->object.vm_object;
69245749Smckusick 		*objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start);
69345749Smckusick 
69445749Smckusick 		*shared = (share_map->ref_count != 1);
69545749Smckusick 		vm_map_unlock_read(share_map);
69645749Smckusick 	} else {
69745749Smckusick 		vm_object_lock(entry->object);
69845749Smckusick 		*object = entry->object.vm_object;
69945749Smckusick 		*objoff = tmp_offset;
70045749Smckusick 
70145749Smckusick 		*shared = FALSE;
70245749Smckusick 	}
70345749Smckusick 
70445749Smckusick 	vm_map_unlock_read(map);
70545749Smckusick 
70645749Smckusick 	return(KERN_SUCCESS);
70745749Smckusick }
70845749Smckusick 
70945749Smckusick /*
71045749Smckusick  * Yet another bastard routine.
71145749Smckusick  */
71245749Smckusick vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal)
71345749Smckusick 	register vm_map_t	map;
71445749Smckusick 	register vm_offset_t	*addr;
71545749Smckusick 	register vm_size_t	size;
71645749Smckusick 	boolean_t		fitit;
71745749Smckusick 	vm_pager_t		pager;
71845749Smckusick 	vm_offset_t		poffset;
71945749Smckusick 	boolean_t		internal;
72045749Smckusick {
72145749Smckusick 	register vm_object_t	object;
72245749Smckusick 	register int		result;
72345749Smckusick 
724*48384Skarels 	if (map == NULL)
72545749Smckusick 		return(KERN_INVALID_ARGUMENT);
72645749Smckusick 
72745749Smckusick 	*addr = trunc_page(*addr);
72845749Smckusick 	size = round_page(size);
72945749Smckusick 
73045749Smckusick 	/*
73145749Smckusick 	 *	Lookup the pager/paging-space in the object cache.
73245749Smckusick 	 *	If it's not there, then create a new object and cache
73345749Smckusick 	 *	it.
73445749Smckusick 	 */
73545749Smckusick 	object = vm_object_lookup(pager);
73645749Smckusick 	vm_stat.lookups++;
737*48384Skarels 	if (object == NULL) {
73845749Smckusick 		object = vm_object_allocate(size);
73945749Smckusick 		vm_object_enter(object, pager);
74045749Smckusick 	} else
74145749Smckusick 		vm_stat.hits++;
74245749Smckusick 	object->internal = internal;
74345749Smckusick 
74445749Smckusick 	result = vm_map_find(map, object, poffset, addr, size, fitit);
74545749Smckusick 	if (result != KERN_SUCCESS)
74645749Smckusick 		vm_object_deallocate(object);
747*48384Skarels 	else if (pager != NULL)
74845749Smckusick 		vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
74945749Smckusick 	return(result);
75045749Smckusick }
75145749Smckusick 
75245749Smckusick /*
75345749Smckusick  * XXX: this routine belongs in vm_map.c.
75445749Smckusick  *
75545749Smckusick  * Returns TRUE if the range [start - end) is allocated in either
75645749Smckusick  * a single entry (single_entry == TRUE) or multiple contiguous
75745749Smckusick  * entries (single_entry == FALSE).
75845749Smckusick  *
75945749Smckusick  * start and end should be page aligned.
76045749Smckusick  */
76145749Smckusick boolean_t
76245749Smckusick vm_map_is_allocated(map, start, end, single_entry)
76345749Smckusick 	vm_map_t map;
76445749Smckusick 	vm_offset_t start, end;
76545749Smckusick 	boolean_t single_entry;
76645749Smckusick {
76745749Smckusick 	vm_map_entry_t mapent;
76845749Smckusick 	register vm_offset_t nend;
76945749Smckusick 
77045749Smckusick 	vm_map_lock_read(map);
77145749Smckusick 
77245749Smckusick 	/*
77345749Smckusick 	 * Start address not in any entry
77445749Smckusick 	 */
77545749Smckusick 	if (!vm_map_lookup_entry(map, start, &mapent)) {
77645749Smckusick 		vm_map_unlock_read(map);
77745749Smckusick 		return (FALSE);
77845749Smckusick 	}
77945749Smckusick 	/*
78045749Smckusick 	 * Find the maximum stretch of contiguously allocated space
78145749Smckusick 	 */
78245749Smckusick 	nend = mapent->end;
78345749Smckusick 	if (!single_entry) {
78445749Smckusick 		mapent = mapent->next;
78545749Smckusick 		while (mapent != &map->header && mapent->start == nend) {
78645749Smckusick 			nend = mapent->end;
78745749Smckusick 			mapent = mapent->next;
78845749Smckusick 		}
78945749Smckusick 	}
79045749Smckusick 
79145749Smckusick 	vm_map_unlock_read(map);
79245749Smckusick 	return (end <= nend);
79345749Smckusick }
79445749Smckusick 
79545749Smckusick #include "../vm/vm_page.h"
79645749Smckusick 
79745749Smckusick /*
79845749Smckusick  * Doesn't trust the COW bit in the page structure.
79945749Smckusick  * vm_fault can improperly set it.
80045749Smckusick  */
80145749Smckusick vm_object_pmap_force_copy(object, start, end)
80245749Smckusick 	register vm_object_t	object;
80345749Smckusick 	register vm_offset_t	start;
80445749Smckusick 	register vm_offset_t	end;
80545749Smckusick {
80645749Smckusick 	register vm_page_t	p;
80745749Smckusick 
808*48384Skarels 	if (object == NULL)
80945749Smckusick 		return;
81045749Smckusick 
81145749Smckusick 	vm_object_lock(object);
81245749Smckusick 	p = (vm_page_t) queue_first(&object->memq);
81345749Smckusick 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
81445749Smckusick 		if (start <= p->offset && p->offset < end) {
81545749Smckusick 			pmap_copy_on_write(VM_PAGE_TO_PHYS(p));
81645749Smckusick 			p->copy_on_write = TRUE;
81745749Smckusick 		}
81845749Smckusick 		p = (vm_page_t) queue_next(&p->listq);
81945749Smckusick 	}
82045749Smckusick 	vm_object_unlock(object);
82145749Smckusick }
822