xref: /csrg-svn/sys/vm/vm_pager.c (revision 65481)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_pager.c	8.5 (Berkeley) 01/04/94
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Paging space routine stubs.  Emulates a matchmaker-like interface
41  *	for builtin pagers.
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_kern.h>
51 
52 #ifdef SWAPPAGER
53 extern struct pagerops swappagerops;
54 #endif
55 
56 #ifdef VNODEPAGER
57 extern struct pagerops vnodepagerops;
58 #endif
59 
60 #ifdef DEVPAGER
61 extern struct pagerops devicepagerops;
62 #endif
63 
64 struct pagerops *pagertab[] = {
65 #ifdef SWAPPAGER
66 	&swappagerops,		/* PG_SWAP */
67 #else
68 	NULL,
69 #endif
70 #ifdef VNODEPAGER
71 	&vnodepagerops,		/* PG_VNODE */
72 #else
73 	NULL,
74 #endif
75 #ifdef DEVPAGER
76 	&devicepagerops,	/* PG_DEV */
77 #else
78 	NULL,
79 #endif
80 };
81 int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
82 
83 struct pagerops *dfltpagerops = NULL;	/* default pager */
84 
85 /*
86  * Kernel address space for mapping pages.
87  * Used by pagers where KVAs are needed for IO.
88  */
89 #define PAGER_MAP_SIZE	(256 * PAGE_SIZE)
90 vm_map_t pager_map;
91 vm_offset_t pager_sva, pager_eva;
92 
93 void
94 vm_pager_init()
95 {
96 	struct pagerops **pgops;
97 
98 	/*
99 	 * Allocate a kernel submap for tracking get/put page mappings
100 	 */
101 	pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
102 				  PAGER_MAP_SIZE, FALSE);
103 	/*
104 	 * Initialize known pagers
105 	 */
106 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
107 		if (pgops)
108 			(*(*pgops)->pgo_init)();
109 	if (dfltpagerops == NULL)
110 		panic("no default pager");
111 }
112 
113 /*
114  * Allocate an instance of a pager of the given type.
115  * Size, protection and offset parameters are passed in for pagers that
116  * need to perform page-level validation (e.g. the device pager).
117  */
118 vm_pager_t
119 vm_pager_allocate(type, handle, size, prot, off)
120 	int type;
121 	caddr_t handle;
122 	vm_size_t size;
123 	vm_prot_t prot;
124 	vm_offset_t off;
125 {
126 	struct pagerops *ops;
127 
128 	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
129 	if (ops)
130 		return ((*ops->pgo_alloc)(handle, size, prot, off));
131 	return (NULL);
132 }
133 
134 void
135 vm_pager_deallocate(pager)
136 	vm_pager_t	pager;
137 {
138 	if (pager == NULL)
139 		panic("vm_pager_deallocate: null pager");
140 
141 	VM_PAGER_DEALLOC(pager);
142 }
143 
144 int
145 vm_pager_get(pager, m, sync)
146 	vm_pager_t	pager;
147 	vm_page_t	m;
148 	boolean_t	sync;
149 {
150 	extern boolean_t vm_page_zero_fill();
151 
152 	if (pager == NULL)
153 		return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
154 	return(VM_PAGER_GET(pager, m, sync));
155 }
156 
157 int
158 vm_pager_put(pager, m, sync)
159 	vm_pager_t	pager;
160 	vm_page_t	m;
161 	boolean_t	sync;
162 {
163 	if (pager == NULL)
164 		panic("vm_pager_put: null pager");
165 	return(VM_PAGER_PUT(pager, m, sync));
166 }
167 
168 boolean_t
169 vm_pager_has_page(pager, offset)
170 	vm_pager_t	pager;
171 	vm_offset_t	offset;
172 {
173 	if (pager == NULL)
174 		panic("vm_pager_has_page");
175 	return(VM_PAGER_HASPAGE(pager, offset));
176 }
177 
178 /*
179  * Called by pageout daemon before going back to sleep.
180  * Gives pagers a chance to clean up any completed async pageing operations.
181  */
182 void
183 vm_pager_sync()
184 {
185 	struct pagerops **pgops;
186 
187 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
188 		if (pgops)
189 			(*(*pgops)->pgo_putpage)(NULL, NULL, FALSE);
190 }
191 
192 vm_offset_t
193 vm_pager_map_page(m)
194 	vm_page_t	m;
195 {
196 	vm_offset_t kva;
197 
198 #ifdef DEBUG
199 	if ((m->flags & PG_BUSY) == 0)
200 		panic("vm_pager_map_page: page not busy");
201 	if (m->flags & PG_PAGEROWNED)
202 		printf("vm_pager_map_page: page %x already in pager\n", m);
203 #endif
204 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
205 #ifdef DEBUG
206 	m->flags |= PG_PAGEROWNED;
207 #endif
208 	pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
209 		   VM_PROT_DEFAULT, TRUE);
210 	return(kva);
211 }
212 
213 void
214 vm_pager_unmap_page(kva)
215 	vm_offset_t	kva;
216 {
217 #ifdef DEBUG
218 	vm_page_t m;
219 
220 	m = PHYS_TO_VM_PAGE(pmap_extract(vm_map_pmap(pager_map), kva));
221 #endif
222 	pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE);
223 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
224 #ifdef DEBUG
225 	if (m->flags & PG_PAGEROWNED)
226 		m->flags &= ~PG_PAGEROWNED;
227 	else
228 		printf("vm_pager_unmap_page: page %x(%x/%x) not owned\n",
229 		       m, kva, VM_PAGE_TO_PHYS(m));
230 #endif
231 }
232 
233 vm_pager_t
234 vm_pager_lookup(pglist, handle)
235 	register struct pagerlst *pglist;
236 	caddr_t handle;
237 {
238 	register vm_pager_t pager;
239 
240 	for (pager = pglist->tqh_first; pager; pager = pager->pg_list.tqe_next)
241 		if (pager->pg_handle == handle)
242 			return(pager);
243 	return(NULL);
244 }
245 
246 /*
247  * This routine gains a reference to the object.
248  * Explicit deallocation is necessary.
249  */
250 int
251 pager_cache(object, should_cache)
252 	vm_object_t	object;
253 	boolean_t	should_cache;
254 {
255 	if (object == NULL)
256 		return(KERN_INVALID_ARGUMENT);
257 
258 	vm_object_cache_lock();
259 	vm_object_lock(object);
260 	if (should_cache)
261 		object->flags |= OBJ_CANPERSIST;
262 	else
263 		object->flags &= ~OBJ_CANPERSIST;
264 	vm_object_unlock(object);
265 	vm_object_cache_unlock();
266 
267 	vm_object_deallocate(object);
268 
269 	return(KERN_SUCCESS);
270 }
271