xref: /csrg-svn/sys/vm/vm_pager.c (revision 48386)
1 /*
2  * Copyright (c) 1985, 1986 Avadis Tevanian, Jr., Michael Wayne Young
3  * Copyright (c) 1987 Carnegie-Mellon University
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * The CMU software License Agreement specifies the terms and conditions
11  * for use and redistribution.
12  *
13  *	@(#)vm_pager.c	7.2 (Berkeley) 04/20/91
14  */
15 
16 /*
17  *	Paging space routine stubs.  Emulates a matchmaker-like interface
18  *	for builtin pagers.
19  */
20 
21 #include "param.h"
22 #include "malloc.h"
23 
24 #include "vm.h"
25 #include "vm_page.h"
26 #include "vm_kern.h"
27 
28 #ifdef hp300
29 #include "../hp300/hp300/pte.h"			/* XXX XXX XXX */
30 #endif
31 
32 #include "swappager.h"
33 
34 #if NSWAPPAGER > 0
35 extern struct pagerops swappagerops;
36 #else
37 #define	swappagerops	NULL
38 #endif
39 #include "vnodepager.h"
40 #if NVNODEPAGER > 0
41 extern struct pagerops vnodepagerops;
42 #else
43 #define	vnodepagerops	NULL
44 #endif
45 #include "devpager.h"
46 #if NDEVPAGER > 0
47 extern struct pagerops devicepagerops;
48 #else
49 #define	devicepagerops	NULL
50 #endif
51 
52 struct pagerops *pagertab[] = {
53 	&swappagerops,		/* PG_SWAP */
54 	&vnodepagerops,		/* PG_VNODE */
55 	&devicepagerops,	/* PG_DEV */
56 };
57 int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
58 
59 struct pagerops *dfltpagerops = NULL;	/* default pager */
60 
61 /*
62  * Kernel address space for mapping pages.
63  * Used by pagers where KVAs are needed for IO.
64  */
65 #define PAGER_MAP_SIZE	(256 * PAGE_SIZE)
66 vm_map_t pager_map;
67 
68 void
69 vm_pager_init()
70 {
71 	vm_offset_t whocares1, whocares2;
72 	struct pagerops **pgops;
73 
74 	/*
75 	 * Allocate a kernel submap for tracking get/put page mappings
76 	 */
77 	pager_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
78 				  PAGER_MAP_SIZE, FALSE);
79 	/*
80 	 * Initialize known pagers
81 	 */
82 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
83 		(*(*pgops)->pgo_init)();
84 	if (dfltpagerops == NULL)
85 		panic("no default pager");
86 }
87 
88 /*
89  * Allocate an instance of a pager of the given type.
90  */
91 vm_pager_t
92 vm_pager_allocate(type, handle, size, prot)
93 	int type;
94 	caddr_t handle;
95 	vm_size_t size;
96 	vm_prot_t prot;
97 {
98 	vm_pager_t pager;
99 	struct pagerops *ops;
100 
101 	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
102 	return((*ops->pgo_alloc)(handle, size, prot));
103 }
104 
105 void
106 vm_pager_deallocate(pager)
107 	vm_pager_t	pager;
108 {
109 	if (pager == NULL)
110 		panic("vm_pager_deallocate: null pager");
111 
112 	VM_PAGER_DEALLOC(pager);
113 }
114 
115 vm_pager_get(pager, m, sync)
116 	vm_pager_t	pager;
117 	vm_page_t	m;
118 	boolean_t	sync;
119 {
120 	extern boolean_t vm_page_zero_fill();
121 
122 	if (pager == NULL)
123 		return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
124 	return(VM_PAGER_GET(pager, m, sync));
125 }
126 
127 vm_pager_put(pager, m, sync)
128 	vm_pager_t	pager;
129 	vm_page_t	m;
130 	boolean_t	sync;
131 {
132 	if (pager == NULL)
133 		panic("vm_pager_put: null pager");
134 	return(VM_PAGER_PUT(pager, m, sync));
135 }
136 
137 boolean_t
138 vm_pager_has_page(pager, offset)
139 	vm_pager_t	pager;
140 	vm_offset_t	offset;
141 {
142 	if (pager == NULL)
143 		panic("vm_pager_has_page");
144 	return(VM_PAGER_HASPAGE(pager, offset));
145 }
146 
147 /*
148  * Called by pageout daemon before going back to sleep.
149  * Gives pagers a chance to clean up any completed async pageing operations.
150  */
151 void
152 vm_pager_sync()
153 {
154 	struct pagerops **pgops;
155 
156 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
157 		(*(*pgops)->pgo_putpage)(NULL, NULL, FALSE);
158 }
159 
160 vm_offset_t
161 vm_pager_map_page(m)
162 	vm_page_t	m;
163 {
164 	vm_offset_t kva;
165 
166 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
167 #ifdef hp300
168 	/*
169 	 * XXX: cannot use pmap_enter as the mapping would be
170 	 * removed by a pmap_remove_all().
171 	 */
172 	*(int *)kvtopte(kva) = VM_PAGE_TO_PHYS(m) | PG_CI | PG_V;
173 	TBIS(kva);
174 #else
175 	pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
176 		   VM_PROT_DEFAULT, TRUE);
177 #endif
178 	return(kva);
179 }
180 
181 void
182 vm_pager_unmap_page(kva)
183 	vm_offset_t	kva;
184 {
185 #ifdef hp300
186 	*(int *)kvtopte(kva) = PG_NV;
187 	TBIS(kva);
188 #endif
189 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
190 }
191 
192 vm_pager_t
193 vm_pager_lookup(list, handle)
194 	register queue_head_t *list;
195 	caddr_t handle;
196 {
197 	register vm_pager_t pager;
198 
199 	pager = (vm_pager_t) queue_first(list);
200 	while (!queue_end(list, (queue_entry_t)pager)) {
201 		if (pager->pg_handle == handle)
202 			return(pager);
203 		pager = (vm_pager_t) queue_next(&pager->pg_list);
204 	}
205 	return(NULL);
206 }
207 
208 /*
209  * This routine gains a reference to the object.
210  * Explicit deallocation is necessary.
211  */
212 pager_cache(object, should_cache)
213 	vm_object_t	object;
214 	boolean_t	should_cache;
215 {
216 	if (object == NULL)
217 		return(KERN_INVALID_ARGUMENT);
218 
219 	vm_object_cache_lock();
220 	vm_object_lock(object);
221 	object->can_persist = should_cache;
222 	vm_object_unlock(object);
223 	vm_object_cache_unlock();
224 
225 	vm_object_deallocate(object);
226 
227 	return(KERN_SUCCESS);
228 }
229