xref: /csrg-svn/sys/vm/vm_pager.c (revision 45748)
1*45748Smckusick /*
2*45748Smckusick  * Copyright (c) 1985, 1986 Avadis Tevanian, Jr., Michael Wayne Young
3*45748Smckusick  * Copyright (c) 1987 Carnegie-Mellon University
4*45748Smckusick  * Copyright (c) 1991 Regents of the University of California.
5*45748Smckusick  * All rights reserved.
6*45748Smckusick  *
7*45748Smckusick  * This code is derived from software contributed to Berkeley by
8*45748Smckusick  * The Mach Operating System project at Carnegie-Mellon University.
9*45748Smckusick  *
10*45748Smckusick  * The CMU software License Agreement specifies the terms and conditions
11*45748Smckusick  * for use and redistribution.
12*45748Smckusick  *
13*45748Smckusick  *	@(#)vm_pager.c	7.1 (Berkeley) 12/05/90
14*45748Smckusick  */
15*45748Smckusick 
16*45748Smckusick /*
17*45748Smckusick  *	Paging space routine stubs.  Emulates a matchmaker-like interface
18*45748Smckusick  *	for builtin pagers.
19*45748Smckusick  */
20*45748Smckusick 
21*45748Smckusick #include "param.h"
22*45748Smckusick #include "queue.h"
23*45748Smckusick #include "malloc.h"
24*45748Smckusick 
25*45748Smckusick #include "../vm/vm_param.h"
26*45748Smckusick #include "../vm/vm_pager.h"
27*45748Smckusick #include "../vm/vm_page.h"
28*45748Smckusick #include "../vm/vm_prot.h"
29*45748Smckusick #include "../vm/vm_map.h"
30*45748Smckusick #include "../vm/vm_kern.h"
31*45748Smckusick 
32*45748Smckusick #include "../vm/pmap.h"
33*45748Smckusick 
34*45748Smckusick #include "swappager.h"
35*45748Smckusick #if NSWAPPAGER > 0
36*45748Smckusick extern struct pagerops swappagerops;
37*45748Smckusick #else
38*45748Smckusick #define	swappagerops	PAGER_OPS_NULL
39*45748Smckusick #endif
40*45748Smckusick #include "vnodepager.h"
41*45748Smckusick #if NVNODEPAGER > 0
42*45748Smckusick extern struct pagerops vnodepagerops;
43*45748Smckusick #else
44*45748Smckusick #define	vnodepagerops	PAGER_OPS_NULL
45*45748Smckusick #endif
46*45748Smckusick #include "devpager.h"
47*45748Smckusick #if NDEVPAGER > 0
48*45748Smckusick extern struct pagerops devicepagerops;
49*45748Smckusick #else
50*45748Smckusick #define	devicepagerops	PAGER_OPS_NULL
51*45748Smckusick #endif
52*45748Smckusick 
53*45748Smckusick struct pagerops *pagertab[] = {
54*45748Smckusick 	&swappagerops,		/* PG_SWAP */
55*45748Smckusick 	&vnodepagerops,		/* PG_VNODE */
56*45748Smckusick 	&devicepagerops,	/* PG_DEV */
57*45748Smckusick };
58*45748Smckusick int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
59*45748Smckusick 
60*45748Smckusick struct pagerops *dfltpagerops = PAGER_OPS_NULL;	/* default pager */
61*45748Smckusick 
62*45748Smckusick /*
63*45748Smckusick  * Kernel address space for mapping pages.
64*45748Smckusick  * Used by pagers where KVAs are needed for IO.
65*45748Smckusick  */
66*45748Smckusick #define PAGER_MAP_SIZE	(256 * PAGE_SIZE)
67*45748Smckusick vm_map_t pager_map;
68*45748Smckusick 
69*45748Smckusick void
70*45748Smckusick vm_pager_init()
71*45748Smckusick {
72*45748Smckusick 	vm_offset_t whocares1, whocares2;
73*45748Smckusick 	struct pagerops **pgops;
74*45748Smckusick 
75*45748Smckusick 	/*
76*45748Smckusick 	 * Allocate a kernel submap for tracking get/put page mappings
77*45748Smckusick 	 */
78*45748Smckusick 	pager_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
79*45748Smckusick 				  PAGER_MAP_SIZE, FALSE);
80*45748Smckusick 	/*
81*45748Smckusick 	 * Initialize known pagers
82*45748Smckusick 	 */
83*45748Smckusick 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
84*45748Smckusick 		(*(*pgops)->pgo_init)();
85*45748Smckusick 	if (dfltpagerops == PAGER_OPS_NULL)
86*45748Smckusick 		panic("no default pager");
87*45748Smckusick }
88*45748Smckusick 
89*45748Smckusick /*
90*45748Smckusick  * Allocate an instance of a pager of the given type.
91*45748Smckusick  */
92*45748Smckusick vm_pager_t
93*45748Smckusick vm_pager_allocate(type, handle, size, prot)
94*45748Smckusick 	int type;
95*45748Smckusick 	caddr_t handle;
96*45748Smckusick 	vm_size_t size;
97*45748Smckusick 	vm_prot_t prot;
98*45748Smckusick {
99*45748Smckusick 	vm_pager_t pager;
100*45748Smckusick 	struct pagerops *ops;
101*45748Smckusick 
102*45748Smckusick 	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
103*45748Smckusick 	return((*ops->pgo_alloc)(handle, size, prot));
104*45748Smckusick }
105*45748Smckusick 
106*45748Smckusick void
107*45748Smckusick vm_pager_deallocate(pager)
108*45748Smckusick 	vm_pager_t	pager;
109*45748Smckusick {
110*45748Smckusick 	if (pager == vm_pager_null)
111*45748Smckusick 		panic("vm_pager_deallocate: null pager");
112*45748Smckusick 
113*45748Smckusick 	VM_PAGER_DEALLOC(pager);
114*45748Smckusick }
115*45748Smckusick 
116*45748Smckusick vm_pager_get(pager, m, sync)
117*45748Smckusick 	vm_pager_t	pager;
118*45748Smckusick 	vm_page_t	m;
119*45748Smckusick 	boolean_t	sync;
120*45748Smckusick {
121*45748Smckusick 	extern boolean_t vm_page_zero_fill();
122*45748Smckusick 
123*45748Smckusick 	if (pager == vm_pager_null)
124*45748Smckusick 		return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
125*45748Smckusick 	return(VM_PAGER_GET(pager, m, sync));
126*45748Smckusick }
127*45748Smckusick 
128*45748Smckusick vm_pager_put(pager, m, sync)
129*45748Smckusick 	vm_pager_t	pager;
130*45748Smckusick 	vm_page_t	m;
131*45748Smckusick 	boolean_t	sync;
132*45748Smckusick {
133*45748Smckusick 	if (pager == vm_pager_null)
134*45748Smckusick 		panic("vm_pager_put: null pager");
135*45748Smckusick 	return(VM_PAGER_PUT(pager, m, sync));
136*45748Smckusick }
137*45748Smckusick 
138*45748Smckusick boolean_t
139*45748Smckusick vm_pager_has_page(pager, offset)
140*45748Smckusick 	vm_pager_t	pager;
141*45748Smckusick 	vm_offset_t	offset;
142*45748Smckusick {
143*45748Smckusick 	if (pager == vm_pager_null)
144*45748Smckusick 		panic("vm_pager_has_page");
145*45748Smckusick 	return(VM_PAGER_HASPAGE(pager, offset));
146*45748Smckusick }
147*45748Smckusick 
148*45748Smckusick /*
149*45748Smckusick  * Called by pageout daemon before going back to sleep.
150*45748Smckusick  * Gives pagers a chance to clean up any completed async pageing operations.
151*45748Smckusick  */
152*45748Smckusick void
153*45748Smckusick vm_pager_sync()
154*45748Smckusick {
155*45748Smckusick 	struct pagerops **pgops;
156*45748Smckusick 
157*45748Smckusick 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
158*45748Smckusick 		(*(*pgops)->pgo_putpage)(VM_PAGER_NULL, VM_PAGE_NULL, FALSE);
159*45748Smckusick }
160*45748Smckusick 
161*45748Smckusick vm_offset_t
162*45748Smckusick vm_pager_map_page(m)
163*45748Smckusick 	vm_page_t	m;
164*45748Smckusick {
165*45748Smckusick 	vm_offset_t kva;
166*45748Smckusick 
167*45748Smckusick 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
168*45748Smckusick #if 1
169*45748Smckusick 	/*
170*45748Smckusick 	 * XXX: cannot use pmap_enter as the mapping would be
171*45748Smckusick 	 * removed by a pmap_remove_all().
172*45748Smckusick 	 */
173*45748Smckusick 	*(int *)kvtopte(kva) = VM_PAGE_TO_PHYS(m) | PG_CI | PG_V;
174*45748Smckusick 	TBIS(kva);
175*45748Smckusick #else
176*45748Smckusick 	pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
177*45748Smckusick 		   VM_PROT_DEFAULT, TRUE);
178*45748Smckusick #endif
179*45748Smckusick 	return(kva);
180*45748Smckusick }
181*45748Smckusick 
182*45748Smckusick void
183*45748Smckusick vm_pager_unmap_page(kva)
184*45748Smckusick 	vm_offset_t	kva;
185*45748Smckusick {
186*45748Smckusick #if 1
187*45748Smckusick 	*(int *)kvtopte(kva) = PG_NV;
188*45748Smckusick 	TBIS(kva);
189*45748Smckusick #endif
190*45748Smckusick 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
191*45748Smckusick }
192*45748Smckusick 
193*45748Smckusick vm_pager_t
194*45748Smckusick vm_pager_lookup(list, handle)
195*45748Smckusick 	register queue_head_t *list;
196*45748Smckusick 	caddr_t handle;
197*45748Smckusick {
198*45748Smckusick 	register vm_pager_t pager;
199*45748Smckusick 
200*45748Smckusick 	pager = (vm_pager_t) queue_first(list);
201*45748Smckusick 	while (!queue_end(list, (queue_entry_t)pager)) {
202*45748Smckusick 		if (pager->pg_handle == handle)
203*45748Smckusick 			return(pager);
204*45748Smckusick 		pager = (vm_pager_t) queue_next(&pager->pg_list);
205*45748Smckusick 	}
206*45748Smckusick 	return(VM_PAGER_NULL);
207*45748Smckusick }
208*45748Smckusick 
209*45748Smckusick /*
210*45748Smckusick  * This routine gains a reference to the object.
211*45748Smckusick  * Explicit deallocation is necessary.
212*45748Smckusick  */
213*45748Smckusick pager_cache(object, should_cache)
214*45748Smckusick 	vm_object_t	object;
215*45748Smckusick 	boolean_t	should_cache;
216*45748Smckusick {
217*45748Smckusick 	if (object == VM_OBJECT_NULL)
218*45748Smckusick 		return(KERN_INVALID_ARGUMENT);
219*45748Smckusick 
220*45748Smckusick 	vm_object_cache_lock();
221*45748Smckusick 	vm_object_lock(object);
222*45748Smckusick 	object->can_persist = should_cache;
223*45748Smckusick 	vm_object_unlock(object);
224*45748Smckusick 	vm_object_cache_unlock();
225*45748Smckusick 
226*45748Smckusick 	vm_object_deallocate(object);
227*45748Smckusick 
228*45748Smckusick 	return(KERN_SUCCESS);
229*45748Smckusick }
230