xref: /openbsd-src/sys/uvm/uvm_pager.h (revision b2ea75c1b17e1a9a339660e7ed45cd24946b230e)
1 /*	$OpenBSD: uvm_pager.h,v 1.11 2001/08/12 21:36:48 mickey Exp $	*/
2 /*	$NetBSD: uvm_pager.h,v 1.15 2000/05/19 03:45:04 thorpej Exp $	*/
3 
4 /*
5  *
6  * Copyright (c) 1997 Charles D. Cranor and Washington University.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Charles D. Cranor and
20  *      Washington University.
21  * 4. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * from: Id: uvm_pager.h,v 1.1.2.14 1998/01/13 19:00:50 chuck Exp
36  */
37 
38 #ifndef _UVM_UVM_PAGER_H_
39 #define _UVM_UVM_PAGER_H_
40 
41 /*
42  * uvm_pager.h
43  */
44 
45 /*
46  * async pager i/o descriptor structure
47  */
48 
49 TAILQ_HEAD(uvm_aiohead, uvm_aiodesc);
50 
51 struct uvm_aiodesc {
52 	void (*aiodone) __P((struct uvm_aiodesc *));
53 						/* aio done function */
54 	vaddr_t kva;			/* KVA of mapped page(s) */
55 	int npages;				/* # of pages in I/O req */
56 	void *pd_ptr;				/* pager-dependent pointer */
57 	TAILQ_ENTRY(uvm_aiodesc) aioq;		/* linked list of aio's */
58 };
59 
60 /*
61  * pager ops
62  */
63 
64 struct uvm_pagerops {
65 	void		(*pgo_init) __P((void));/* init pager */
66 	void		(*pgo_reference)	/* add reference to obj */
67 			 __P((struct uvm_object *));
68 	void			(*pgo_detach)	/* drop reference to obj */
69 			 __P((struct uvm_object *));
70 	int			(*pgo_fault)	/* special nonstd fault fn */
71 			 __P((struct uvm_faultinfo *, vaddr_t,
72 				 vm_page_t *, int, int, vm_fault_t,
73 				 vm_prot_t, int));
74 	boolean_t		(*pgo_flush)	/* flush pages out of obj */
75 			 __P((struct uvm_object *, voff_t, voff_t, int));
76 	int			(*pgo_get)	/* get/read page */
77 			 __P((struct uvm_object *, voff_t,
78 				 vm_page_t *, int *, int, vm_prot_t, int, int));
79 	int			(*pgo_asyncget)	/* start async get */
80 			 __P((struct uvm_object *, voff_t, int));
81 	int			(*pgo_put)	/* put/write page */
82 			 __P((struct uvm_object *, vm_page_t *,
83 				 int, boolean_t));
84 	void			(*pgo_cluster)	/* return range of cluster */
85 			__P((struct uvm_object *, voff_t, voff_t *,
86 				voff_t *));
87 	struct vm_page **	(*pgo_mk_pcluster)	/* make "put" cluster */
88 			 __P((struct uvm_object *, struct vm_page **,
89 				 int *, struct vm_page *, int, voff_t,
90 				 voff_t));
91 	void			(*pgo_aiodone)		/* async iodone */
92 			 __P((struct uvm_aiodesc *));
93 	boolean_t		(*pgo_releasepg)	/* release page */
94 			 __P((struct vm_page *, struct vm_page **));
95 };
96 
97 /* pager flags [mostly for flush] */
98 
99 #define PGO_CLEANIT	0x001	/* write dirty pages to backing store */
100 #define PGO_SYNCIO	0x002	/* if PGO_CLEAN: use sync I/O? */
101 /*
102  * obviously if neither PGO_INVALIDATE or PGO_FREE are set then the pages
103  * stay where they are.
104  */
105 #define PGO_DEACTIVATE	0x004	/* deactivate flushed pages */
106 #define PGO_FREE	0x008	/* free flushed pages */
107 
108 #define PGO_ALLPAGES	0x010	/* flush whole object/get all pages */
109 #define PGO_DOACTCLUST	0x020	/* flag to mk_pcluster to include active */
110 #define PGO_LOCKED	0x040	/* fault data structures are locked [get] */
111 #define PGO_PDFREECLUST	0x080	/* daemon's free cluster flag [uvm_pager_put] */
112 #define PGO_REALLOCSWAP	0x100	/* reallocate swap area [pager_dropcluster] */
113 
114 /* page we are not interested in getting */
115 #define PGO_DONTCARE ((struct vm_page *) -1)	/* [get only] */
116 
117 #ifdef _KERNEL
118 
119 /*
120  * get/put return values
121  * OK	   operation was successful
122  * BAD	   specified data was out of the accepted range
123  * FAIL	   specified data was in range, but doesn't exist
124  * PEND	   operations was initiated but not completed
125  * ERROR   error while accessing data that is in range and exists
126  * AGAIN   temporary resource shortage prevented operation from happening
127  * UNLOCK  unlock the map and try again
128  * REFAULT [uvm_fault internal use only!] unable to relock data structures,
129  *         thus the mapping needs to be reverified before we can procede
130  */
131 #define	VM_PAGER_OK		0
132 #define	VM_PAGER_BAD		1
133 #define	VM_PAGER_FAIL		2
134 #define	VM_PAGER_PEND		3
135 #define	VM_PAGER_ERROR		4
136 #define VM_PAGER_AGAIN		5
137 #define VM_PAGER_UNLOCK		6
138 #define VM_PAGER_REFAULT	7
139 
140 /*
141  * handle inline options
142  */
143 
144 #ifdef UVM_PAGER_INLINE
145 #define PAGER_INLINE static __inline
146 #else
147 #define PAGER_INLINE /* nothing */
148 #endif /* UVM_PAGER_INLINE */
149 
150 /*
151  * prototypes
152  */
153 
154 void		uvm_pager_dropcluster __P((struct uvm_object *,
155 					struct vm_page *, struct vm_page **,
156 					int *, int));
157 void		uvm_pager_init __P((void));
158 int		uvm_pager_put __P((struct uvm_object *, struct vm_page *,
159 				   struct vm_page ***, int *, int,
160 				   voff_t, voff_t));
161 
162 PAGER_INLINE struct vm_page *uvm_pageratop __P((vaddr_t));
163 
164 vaddr_t	uvm_pagermapin __P((struct vm_page **, int,
165 				    struct uvm_aiodesc **, int));
166 void		uvm_pagermapout __P((vaddr_t, int));
167 struct vm_page **uvm_mk_pcluster  __P((struct uvm_object *, struct vm_page **,
168 				       int *, struct vm_page *, int,
169 				       voff_t, voff_t));
170 
171 /* Flags to uvm_pagermapin() */
172 #define	UVMPAGER_MAPIN_WAITOK	0x01	/* it's okay to wait */
173 #define	UVMPAGER_MAPIN_READ	0x02	/* host <- device */
174 #define	UVMPAGER_MAPIN_WRITE	0x00	/* device -> host (pseudo flag) */
175 
176 #endif /* _KERNEL */
177 
178 #endif /* _UVM_UVM_PAGER_H_ */
179