145748Smckusick /*
263379Sbostic * Copyright (c) 1991, 1993
363379Sbostic * The Regents of the University of California. All rights reserved.
445748Smckusick *
545748Smckusick * This code is derived from software contributed to Berkeley by
645748Smckusick * The Mach Operating System project at Carnegie-Mellon University.
745748Smckusick *
848493Smckusick * %sccs.include.redist.c%
945748Smckusick *
10*65694Shibler * @(#)vm_user.c 8.2 (Berkeley) 01/12/94
1148493Smckusick *
1248493Smckusick *
1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University.
1448493Smckusick * All rights reserved.
1548493Smckusick *
1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young
1748493Smckusick *
1848493Smckusick * Permission to use, copy, modify and distribute this software and
1948493Smckusick * its documentation is hereby granted, provided that both the copyright
2048493Smckusick * notice and this permission notice appear in all copies of the
2148493Smckusick * software, derivative works or modified versions, and any portions
2248493Smckusick * thereof, and that both notices appear in supporting documentation.
2348493Smckusick *
2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
2748493Smckusick *
2848493Smckusick * Carnegie Mellon requests users of this software to return to
2948493Smckusick *
3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
3148493Smckusick * School of Computer Science
3248493Smckusick * Carnegie Mellon University
3348493Smckusick * Pittsburgh PA 15213-3890
3448493Smckusick *
3548493Smckusick * any improvements or extensions that they make and grant Carnegie the
3648493Smckusick * rights to redistribute these changes.
3745748Smckusick */
3845748Smckusick
3945748Smckusick /*
4045748Smckusick * User-exported virtual memory functions.
4145748Smckusick */
4245748Smckusick
4353356Sbostic #include <sys/param.h>
4453356Sbostic #include <sys/systm.h>
4553356Sbostic #include <sys/proc.h>
4645748Smckusick
4753356Sbostic #include <vm/vm.h>
4845748Smckusick
4945748Smckusick simple_lock_data_t vm_alloc_lock; /* XXX */
5045748Smckusick
5145748Smckusick #ifdef MACHVMCOMPAT
5245748Smckusick /*
5345748Smckusick * BSD style syscall interfaces to MACH calls
5445748Smckusick * All return MACH return values.
5545748Smckusick */
5654915Storek struct svm_allocate_args {
5754915Storek vm_map_t map;
5854915Storek vm_offset_t *addr;
5954915Storek vm_size_t size;
6054915Storek boolean_t anywhere;
6154915Storek };
6245748Smckusick /* ARGSUSED */
6353356Sbostic int
svm_allocate(p,uap,retval)6445748Smckusick svm_allocate(p, uap, retval)
6545748Smckusick struct proc *p;
6654915Storek struct svm_allocate_args *uap;
6745748Smckusick int *retval;
6845748Smckusick {
6945748Smckusick vm_offset_t addr;
7045748Smckusick int rv;
7145748Smckusick
7245748Smckusick uap->map = p->p_map; /* XXX */
7345748Smckusick
7445748Smckusick if (copyin((caddr_t)uap->addr, (caddr_t)&addr, sizeof (addr)))
7545748Smckusick rv = KERN_INVALID_ARGUMENT;
7645748Smckusick else
7745748Smckusick rv = vm_allocate(uap->map, &addr, uap->size, uap->anywhere);
7845748Smckusick if (rv == KERN_SUCCESS) {
7945748Smckusick if (copyout((caddr_t)&addr, (caddr_t)uap->addr, sizeof(addr)))
8045748Smckusick rv = KERN_INVALID_ARGUMENT;
8145748Smckusick }
8245748Smckusick return((int)rv);
8345748Smckusick }
8445748Smckusick
8554915Storek struct svm_deallocate_args {
8654915Storek vm_map_t map;
8754915Storek vm_offset_t addr;
8854915Storek vm_size_t size;
8954915Storek };
9045748Smckusick /* ARGSUSED */
9153356Sbostic int
svm_deallocate(p,uap,retval)9245748Smckusick svm_deallocate(p, uap, retval)
9345748Smckusick struct proc *p;
9454915Storek struct svm_deallocate_args *uap;
9545748Smckusick int *retval;
9645748Smckusick {
9745748Smckusick int rv;
9845748Smckusick
9945748Smckusick uap->map = p->p_map; /* XXX */
10045748Smckusick rv = vm_deallocate(uap->map, uap->addr, uap->size);
10145748Smckusick return((int)rv);
10245748Smckusick }
10345748Smckusick
10454915Storek struct svm_inherit_args {
10554915Storek vm_map_t map;
10654915Storek vm_offset_t addr;
10754915Storek vm_size_t size;
10854915Storek vm_inherit_t inherit;
10954915Storek };
11045748Smckusick /* ARGSUSED */
11153356Sbostic int
svm_inherit(p,uap,retval)11245748Smckusick svm_inherit(p, uap, retval)
11345748Smckusick struct proc *p;
11454915Storek struct svm_inherit_args *uap;
11545748Smckusick int *retval;
11645748Smckusick {
11745748Smckusick int rv;
11845748Smckusick
11945748Smckusick uap->map = p->p_map; /* XXX */
12045748Smckusick rv = vm_inherit(uap->map, uap->addr, uap->size, uap->inherit);
12145748Smckusick return((int)rv);
12245748Smckusick }
12345748Smckusick
12454915Storek struct svm_protect_args {
12554915Storek vm_map_t map;
12654915Storek vm_offset_t addr;
12754915Storek vm_size_t size;
12854915Storek boolean_t setmax;
12954915Storek vm_prot_t prot;
13054915Storek };
13145748Smckusick /* ARGSUSED */
13253356Sbostic int
svm_protect(p,uap,retval)13345748Smckusick svm_protect(p, uap, retval)
13445748Smckusick struct proc *p;
13554915Storek struct svm_protect_args *uap;
13645748Smckusick int *retval;
13745748Smckusick {
13845748Smckusick int rv;
13945748Smckusick
14045748Smckusick uap->map = p->p_map; /* XXX */
14145748Smckusick rv = vm_protect(uap->map, uap->addr, uap->size, uap->setmax, uap->prot);
14245748Smckusick return((int)rv);
14345748Smckusick }
144*65694Shibler
145*65694Shibler /*
146*65694Shibler * vm_inherit sets the inheritence of the specified range in the
147*65694Shibler * specified map.
148*65694Shibler */
149*65694Shibler int
vm_inherit(map,start,size,new_inheritance)150*65694Shibler vm_inherit(map, start, size, new_inheritance)
151*65694Shibler register vm_map_t map;
152*65694Shibler vm_offset_t start;
153*65694Shibler vm_size_t size;
154*65694Shibler vm_inherit_t new_inheritance;
155*65694Shibler {
156*65694Shibler if (map == NULL)
157*65694Shibler return(KERN_INVALID_ARGUMENT);
158*65694Shibler
159*65694Shibler return(vm_map_inherit(map, trunc_page(start), round_page(start+size), new_inheritance));
160*65694Shibler }
161*65694Shibler
162*65694Shibler /*
163*65694Shibler * vm_protect sets the protection of the specified range in the
164*65694Shibler * specified map.
165*65694Shibler */
166*65694Shibler
167*65694Shibler int
vm_protect(map,start,size,set_maximum,new_protection)168*65694Shibler vm_protect(map, start, size, set_maximum, new_protection)
169*65694Shibler register vm_map_t map;
170*65694Shibler vm_offset_t start;
171*65694Shibler vm_size_t size;
172*65694Shibler boolean_t set_maximum;
173*65694Shibler vm_prot_t new_protection;
174*65694Shibler {
175*65694Shibler if (map == NULL)
176*65694Shibler return(KERN_INVALID_ARGUMENT);
177*65694Shibler
178*65694Shibler return(vm_map_protect(map, trunc_page(start), round_page(start+size), new_protection, set_maximum));
179*65694Shibler }
18045748Smckusick #endif
18145748Smckusick
18245748Smckusick /*
18345748Smckusick * vm_allocate allocates "zero fill" memory in the specfied
18445748Smckusick * map.
18545748Smckusick */
18653356Sbostic int
vm_allocate(map,addr,size,anywhere)18745748Smckusick vm_allocate(map, addr, size, anywhere)
18845748Smckusick register vm_map_t map;
18945748Smckusick register vm_offset_t *addr;
19045748Smckusick register vm_size_t size;
19145748Smckusick boolean_t anywhere;
19245748Smckusick {
19345748Smckusick int result;
19445748Smckusick
19548386Skarels if (map == NULL)
19645748Smckusick return(KERN_INVALID_ARGUMENT);
19745748Smckusick if (size == 0) {
19845748Smckusick *addr = 0;
19945748Smckusick return(KERN_SUCCESS);
20045748Smckusick }
20145748Smckusick
20245748Smckusick if (anywhere)
20345748Smckusick *addr = vm_map_min(map);
20445748Smckusick else
20545748Smckusick *addr = trunc_page(*addr);
20645748Smckusick size = round_page(size);
20745748Smckusick
208*65694Shibler result = vm_map_find(map, NULL, (vm_offset_t) 0, addr, size, anywhere);
20945748Smckusick
21045748Smckusick return(result);
21145748Smckusick }
21245748Smckusick
21345748Smckusick /*
21445748Smckusick * vm_deallocate deallocates the specified range of addresses in the
21545748Smckusick * specified address map.
21645748Smckusick */
21753356Sbostic int
vm_deallocate(map,start,size)21845748Smckusick vm_deallocate(map, start, size)
21945748Smckusick register vm_map_t map;
22045748Smckusick vm_offset_t start;
22145748Smckusick vm_size_t size;
22245748Smckusick {
22348386Skarels if (map == NULL)
22445748Smckusick return(KERN_INVALID_ARGUMENT);
22545748Smckusick
22645748Smckusick if (size == (vm_offset_t) 0)
22745748Smckusick return(KERN_SUCCESS);
22845748Smckusick
22945748Smckusick return(vm_map_remove(map, trunc_page(start), round_page(start+size)));
23045748Smckusick }
23145748Smckusick
23245748Smckusick /*
233*65694Shibler * Similar to vm_allocate but assigns an explicit pager.
23445748Smckusick */
23553356Sbostic int
vm_allocate_with_pager(map,addr,size,anywhere,pager,poffset,internal)236*65694Shibler vm_allocate_with_pager(map, addr, size, anywhere, pager, poffset, internal)
23745748Smckusick register vm_map_t map;
238*65694Shibler register vm_offset_t *addr;
239*65694Shibler register vm_size_t size;
240*65694Shibler boolean_t anywhere;
241*65694Shibler vm_pager_t pager;
242*65694Shibler vm_offset_t poffset;
243*65694Shibler boolean_t internal;
24445748Smckusick {
245*65694Shibler register vm_object_t object;
246*65694Shibler register int result;
247*65694Shibler
24848386Skarels if (map == NULL)
24945748Smckusick return(KERN_INVALID_ARGUMENT);
25045748Smckusick
251*65694Shibler *addr = trunc_page(*addr);
252*65694Shibler size = round_page(size);
25345748Smckusick
254*65694Shibler /*
255*65694Shibler * Lookup the pager/paging-space in the object cache.
256*65694Shibler * If it's not there, then create a new object and cache
257*65694Shibler * it.
258*65694Shibler */
259*65694Shibler object = vm_object_lookup(pager);
260*65694Shibler cnt.v_lookups++;
261*65694Shibler if (object == NULL) {
262*65694Shibler object = vm_object_allocate(size);
263*65694Shibler /*
264*65694Shibler * From Mike Hibler: "unnamed anonymous objects should never
265*65694Shibler * be on the hash list ... For now you can just change
266*65694Shibler * vm_allocate_with_pager to not do vm_object_enter if this
267*65694Shibler * is an internal object ..."
268*65694Shibler */
269*65694Shibler if (!internal)
270*65694Shibler vm_object_enter(object, pager);
271*65694Shibler } else
272*65694Shibler cnt.v_hits++;
273*65694Shibler if (internal)
274*65694Shibler object->flags |= OBJ_INTERNAL;
275*65694Shibler else {
276*65694Shibler object->flags &= ~OBJ_INTERNAL;
277*65694Shibler cnt.v_nzfod -= atop(size);
278*65694Shibler }
27945748Smckusick
280*65694Shibler result = vm_map_find(map, object, poffset, addr, size, anywhere);
281*65694Shibler if (result != KERN_SUCCESS)
282*65694Shibler vm_object_deallocate(object);
283*65694Shibler else if (pager != NULL)
284*65694Shibler vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
285*65694Shibler return(result);
28645748Smckusick }
287