xref: /netbsd-src/sys/uvm/uvm_pager.h (revision 4472dbe5e3bd91ef2540bada7a7ca7384627ff9b)
1 /*	$NetBSD: uvm_pager.h,v 1.15 2000/05/19 03:45:04 thorpej Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * from: Id: uvm_pager.h,v 1.1.2.14 1998/01/13 19:00:50 chuck Exp
35  */
36 
37 #ifndef _UVM_UVM_PAGER_H_
38 #define _UVM_UVM_PAGER_H_
39 
40 /*
41  * uvm_pager.h
42  */
43 
44 /*
45  * async pager i/o descriptor structure
46  */
47 
48 TAILQ_HEAD(uvm_aiohead, uvm_aiodesc);
49 
50 struct uvm_aiodesc {
51 	void (*aiodone) __P((struct uvm_aiodesc *));
52 						/* aio done function */
53 	vaddr_t kva;			/* KVA of mapped page(s) */
54 	int npages;				/* # of pages in I/O req */
55 	void *pd_ptr;				/* pager-dependent pointer */
56 	TAILQ_ENTRY(uvm_aiodesc) aioq;		/* linked list of aio's */
57 };
58 
59 /*
60  * pager ops
61  */
62 
63 struct uvm_pagerops {
64 	void		(*pgo_init) __P((void));/* init pager */
65 	void		(*pgo_reference)	/* add reference to obj */
66 			 __P((struct uvm_object *));
67 	void			(*pgo_detach)	/* drop reference to obj */
68 			 __P((struct uvm_object *));
69 	int			(*pgo_fault)	/* special nonstd fault fn */
70 			 __P((struct uvm_faultinfo *, vaddr_t,
71 				 vm_page_t *, int, int, vm_fault_t,
72 				 vm_prot_t, int));
73 	boolean_t		(*pgo_flush)	/* flush pages out of obj */
74 			 __P((struct uvm_object *, voff_t, voff_t, int));
75 	int			(*pgo_get)	/* get/read page */
76 			 __P((struct uvm_object *, voff_t,
77 				 vm_page_t *, int *, int, vm_prot_t, int, int));
78 	int			(*pgo_asyncget)	/* start async get */
79 			 __P((struct uvm_object *, voff_t, int));
80 	int			(*pgo_put)	/* put/write page */
81 			 __P((struct uvm_object *, vm_page_t *,
82 				 int, boolean_t));
83 	void			(*pgo_cluster)	/* return range of cluster */
84 			__P((struct uvm_object *, voff_t, voff_t *,
85 				voff_t *));
86 	struct vm_page **	(*pgo_mk_pcluster)	/* make "put" cluster */
87 			 __P((struct uvm_object *, struct vm_page **,
88 				 int *, struct vm_page *, int, voff_t,
89 				 voff_t));
90 	void			(*pgo_aiodone)		/* async iodone */
91 			 __P((struct uvm_aiodesc *));
92 	boolean_t		(*pgo_releasepg)	/* release page */
93 			 __P((struct vm_page *, struct vm_page **));
94 };
95 
96 /* pager flags [mostly for flush] */
97 
98 #define PGO_CLEANIT	0x001	/* write dirty pages to backing store */
99 #define PGO_SYNCIO	0x002	/* if PGO_CLEAN: use sync I/O? */
100 /*
101  * obviously if neither PGO_INVALIDATE or PGO_FREE are set then the pages
102  * stay where they are.
103  */
104 #define PGO_DEACTIVATE	0x004	/* deactivate flushed pages */
105 #define PGO_FREE	0x008	/* free flushed pages */
106 
107 #define PGO_ALLPAGES	0x010	/* flush whole object/get all pages */
108 #define PGO_DOACTCLUST	0x020	/* flag to mk_pcluster to include active */
109 #define PGO_LOCKED	0x040	/* fault data structures are locked [get] */
110 #define PGO_PDFREECLUST	0x080	/* daemon's free cluster flag [uvm_pager_put] */
111 #define PGO_REALLOCSWAP	0x100	/* reallocate swap area [pager_dropcluster] */
112 
113 /* page we are not interested in getting */
114 #define PGO_DONTCARE ((struct vm_page *) -1)	/* [get only] */
115 
116 #ifdef _KERNEL
117 
118 /*
119  * handle inline options
120  */
121 
122 #ifdef UVM_PAGER_INLINE
123 #define PAGER_INLINE static __inline
124 #else
125 #define PAGER_INLINE /* nothing */
126 #endif /* UVM_PAGER_INLINE */
127 
128 /*
129  * prototypes
130  */
131 
132 void		uvm_pager_dropcluster __P((struct uvm_object *,
133 					struct vm_page *, struct vm_page **,
134 					int *, int));
135 void		uvm_pager_init __P((void));
136 int		uvm_pager_put __P((struct uvm_object *, struct vm_page *,
137 				   struct vm_page ***, int *, int,
138 				   voff_t, voff_t));
139 
140 PAGER_INLINE struct vm_page *uvm_pageratop __P((vaddr_t));
141 
142 vaddr_t	uvm_pagermapin __P((struct vm_page **, int,
143 				    struct uvm_aiodesc **, int));
144 void		uvm_pagermapout __P((vaddr_t, int));
145 struct vm_page **uvm_mk_pcluster  __P((struct uvm_object *, struct vm_page **,
146 				       int *, struct vm_page *, int,
147 				       voff_t, voff_t));
148 
149 /* Flags to uvm_pagermapin() */
150 #define	UVMPAGER_MAPIN_WAITOK	0x01	/* it's okay to wait */
151 #define	UVMPAGER_MAPIN_READ	0x02	/* host <- device */
152 #define	UVMPAGER_MAPIN_WRITE	0x00	/* device -> host (pseudo flag) */
153 
154 #endif /* _KERNEL */
155 
156 #endif /* _UVM_UVM_PAGER_H_ */
157