xref: /netbsd-src/sys/uvm/uvm_page.h (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: uvm_page.h,v 1.60 2010/07/29 10:54:51 hannken Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Charles D. Cranor,
23  *      Washington University, the University of California, Berkeley and
24  *      its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
42  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  */
68 
69 #ifndef _UVM_UVM_PAGE_H_
70 #define _UVM_UVM_PAGE_H_
71 
72 /*
73  * uvm_page.h
74  */
75 
76 /*
77  *	Resident memory system definitions.
78  */
79 
80 /*
81  *	Management of resident (logical) pages.
82  *
83  *	A small structure is kept for each resident
84  *	page, indexed by page number.  Each structure
85  *	is an element of several lists:
86  *
87  *		A red-black tree rooted with the containing
88  *		object is used to quickly perform object+
89  *		offset lookups
90  *
91  *		A list of all pages for a given object,
92  *		so they can be quickly deactivated at
93  *		time of deallocation.
94  *
95  *		An ordered list of pages due for pageout.
96  *
97  *	In addition, the structure contains the object
98  *	and offset to which this page belongs (for pageout),
99  *	and sundry status bits.
100  *
101  *	Fields in this structure are locked either by the lock on the
102  *	object that the page belongs to (O) or by the lock on the page
103  *	queues (P) [or both].
104  */
105 
106 /*
107  * locking note: the mach version of this data structure had bit
108  * fields for the flags, and the bit fields were divided into two
109  * items (depending on who locked what).  some time, in BSD, the bit
110  * fields were dumped and all the flags were lumped into one short.
111  * that is fine for a single threaded uniprocessor OS, but bad if you
112  * want to actual make use of locking.  so, we've separated things
113  * back out again.
114  *
115  * note the page structure has no lock of its own.
116  */
117 
118 #include <uvm/uvm_extern.h>
119 #include <uvm/uvm_pglist.h>
120 
121 #include <sys/rb.h>
122 
123 struct vm_page {
124 	struct rb_node		rb_node;	/* tree of pages in obj (O) */
125 
126 	union {
127 		TAILQ_ENTRY(vm_page) queue;
128 		LIST_ENTRY(vm_page) list;
129 	} pageq;				/* queue info for FIFO
130 						 * queue or free list (P) */
131 	union {
132 		TAILQ_ENTRY(vm_page) queue;
133 		LIST_ENTRY(vm_page) list;
134 	} listq;				/* pages in same object (O)*/
135 
136 	struct vm_anon		*uanon;		/* anon (O,P) */
137 	struct uvm_object	*uobject;	/* object (O,P) */
138 	voff_t			offset;		/* offset into object (O,P) */
139 	uint16_t		flags;		/* object flags [O] */
140 	uint16_t		loan_count;	/* number of active loans
141 						 * to read: [O or P]
142 						 * to modify: [O _and_ P] */
143 	uint16_t		wire_count;	/* wired down map refs [P] */
144 	uint16_t		pqflags;	/* page queue flags [P] */
145 	paddr_t			phys_addr;	/* physical address of page */
146 
147 #ifdef __HAVE_VM_PAGE_MD
148 	struct vm_page_md	mdpage;		/* pmap-specific data */
149 #endif
150 
151 #if defined(UVM_PAGE_TRKOWN)
152 	/* debugging fields to track page ownership */
153 	pid_t			owner;		/* proc that set PG_BUSY */
154 	lwpid_t			lowner;		/* lwp that set PG_BUSY */
155 	const char		*owner_tag;	/* why it was set busy */
156 #endif
157 };
158 
159 /*
160  * These are the flags defined for vm_page.
161  */
162 
163 /*
164  * locking rules:
165  *   PG_ ==> locked by object lock
166  *   PQ_ ==> lock by page queue lock
167  *   PQ_FREE is locked by free queue lock and is mutex with all other PQs
168  *
169  * PG_ZERO is used to indicate that a page has been pre-zero'd.  This flag
170  * is only set when the page is on no queues, and is cleared when the page
171  * is placed on the free list.
172  */
173 
174 #define	PG_BUSY		0x0001		/* page is locked */
175 #define	PG_WANTED	0x0002		/* someone is waiting for page */
176 #define	PG_TABLED	0x0004		/* page is in VP table  */
177 #define	PG_CLEAN	0x0008		/* page has not been modified */
178 #define	PG_PAGEOUT	0x0010		/* page to be freed for pagedaemon */
179 #define PG_RELEASED	0x0020		/* page to be freed when unbusied */
180 #define	PG_FAKE		0x0040		/* page is not yet initialized */
181 #define	PG_RDONLY	0x0080		/* page must be mapped read-only */
182 #define	PG_ZERO		0x0100		/* page is pre-zero'd */
183 #define	PG_MARKER	0x0200		/* dummy marker page */
184 
185 #define PG_PAGER1	0x1000		/* pager-specific flag */
186 
187 #define	UVM_PGFLAGBITS \
188 	"\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
189 	"\11ZERO\12MARKER\15PAGER1"
190 
191 #define PQ_FREE		0x0001		/* page is on free list */
192 #define PQ_ANON		0x0002		/* page is part of an anon, rather
193 					   than an uvm_object */
194 #define PQ_AOBJ		0x0004		/* page is part of an anonymous
195 					   uvm_object */
196 #define PQ_SWAPBACKED	(PQ_ANON|PQ_AOBJ)
197 #define PQ_READAHEAD	0x0008	/* read-ahead but has not been "hit" yet */
198 
199 #define PQ_PRIVATE1	0x0100
200 #define PQ_PRIVATE2	0x0200
201 #define PQ_PRIVATE3	0x0400
202 #define PQ_PRIVATE4	0x0800
203 #define PQ_PRIVATE5	0x1000
204 #define PQ_PRIVATE6	0x2000
205 #define PQ_PRIVATE7	0x4000
206 #define PQ_PRIVATE8	0x8000
207 
208 #define	UVM_PQFLAGBITS \
209 	"\20\1FREE\2ANON\3AOBJ\4READAHEAD" \
210 	"\11PRIVATE1\12PRIVATE2\13PRIVATE3\14PRIVATE4" \
211 	"\15PRIVATE5\16PRIVATE6\17PRIVATE7\20PRIVATE8"
212 
213 /*
214  * physical memory layout structure
215  *
216  * MD vmparam.h must #define:
217  *   VM_PHYSEG_MAX = max number of physical memory segments we support
218  *		   (if this is "1" then we revert to a "contig" case)
219  *   VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
220  * 	- VM_PSTRAT_RANDOM:   linear search (random order)
221  *	- VM_PSTRAT_BSEARCH:  binary search (sorted by address)
222  *	- VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
223  *      - others?
224  *   XXXCDC: eventually we should purge all left-over global variables...
225  */
226 #define VM_PSTRAT_RANDOM	1
227 #define VM_PSTRAT_BSEARCH	2
228 #define VM_PSTRAT_BIGFIRST	3
229 
230 /*
231  * vm_physseg: describes one segment of physical memory
232  */
233 struct vm_physseg {
234 	paddr_t	start;			/* PF# of first page in segment */
235 	paddr_t	end;			/* (PF# of last page in segment) + 1 */
236 	paddr_t	avail_start;		/* PF# of first free page in segment */
237 	paddr_t	avail_end;		/* (PF# of last free page in segment) +1  */
238 	int	free_list;		/* which free list they belong on */
239 	struct	vm_page *pgs;		/* vm_page structures (from start) */
240 	struct	vm_page *lastpg;	/* vm_page structure for end */
241 #ifdef __HAVE_PMAP_PHYSSEG
242 	struct	pmap_physseg pmseg;	/* pmap specific (MD) data */
243 #endif
244 };
245 
246 #ifdef _KERNEL
247 
248 /*
249  * globals
250  */
251 
252 extern bool vm_page_zero_enable;
253 
254 /*
255  * physical memory config is stored in vm_physmem.
256  */
257 
258 extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
259 extern int vm_nphysseg;
260 
261 /*
262  * prototypes: the following prototypes define the interface to pages
263  */
264 
265 void uvm_page_init(vaddr_t *, vaddr_t *);
266 #if defined(UVM_PAGE_TRKOWN)
267 void uvm_page_own(struct vm_page *, const char *);
268 #endif
269 #if !defined(PMAP_STEAL_MEMORY)
270 bool uvm_page_physget(paddr_t *);
271 #endif
272 void uvm_page_recolor(int);
273 void uvm_pageidlezero(void);
274 
275 void uvm_pageactivate(struct vm_page *);
276 vaddr_t uvm_pageboot_alloc(vsize_t);
277 void uvm_pagecopy(struct vm_page *, struct vm_page *);
278 void uvm_pagedeactivate(struct vm_page *);
279 void uvm_pagedequeue(struct vm_page *);
280 void uvm_pageenqueue(struct vm_page *);
281 void uvm_pagefree(struct vm_page *);
282 void uvm_page_unbusy(struct vm_page **, int);
283 struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
284 void uvm_pageunwire(struct vm_page *);
285 void uvm_pagewait(struct vm_page *, int);
286 void uvm_pagewake(struct vm_page *);
287 void uvm_pagewire(struct vm_page *);
288 void uvm_pagezero(struct vm_page *);
289 bool uvm_pageismanaged(paddr_t);
290 
291 int uvm_page_lookup_freelist(struct vm_page *);
292 
293 static struct vm_page *PHYS_TO_VM_PAGE(paddr_t);
294 static int vm_physseg_find(paddr_t, int *);
295 
296 /*
297  * macros
298  */
299 
300 #define UVM_PAGE_TREE_PENALTY	4	/* XXX: a guess */
301 
302 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
303 
304 /*
305  * Compute the page color bucket for a given page.
306  */
307 #define	VM_PGCOLOR_BUCKET(pg) \
308 	(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
309 
310 /*
311  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
312  */
313 
314 #if VM_PHYSSEG_MAX == 1
315 static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *);
316 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
317 static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *);
318 #else
319 static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *);
320 #endif
321 
322 /*
323  * vm_physseg_find: find vm_physseg structure that belongs to a PA
324  */
325 static inline int
326 vm_physseg_find(paddr_t pframe, int *offp)
327 {
328 
329 #if VM_PHYSSEG_MAX == 1
330 	return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
331 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
332 	return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
333 #else
334 	return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
335 #endif
336 }
337 
338 #if VM_PHYSSEG_MAX == 1
339 static inline int
340 vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
341 {
342 
343 	/* 'contig' case */
344 	if (pframe >= segs[0].start && pframe < segs[0].end) {
345 		if (offp)
346 			*offp = pframe - segs[0].start;
347 		return(0);
348 	}
349 	return(-1);
350 }
351 
352 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
353 
354 static inline int
355 vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
356 {
357 	/* binary search for it */
358 	u_int	start, len, try;
359 
360 	/*
361 	 * if try is too large (thus target is less than try) we reduce
362 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
363 	 *
364 	 * if the try is too small (thus target is greater than try) then
365 	 * we set the new start to be (try + 1).   this means we need to
366 	 * reduce the length to (round(len/2) - 1).
367 	 *
368 	 * note "adjust" below which takes advantage of the fact that
369 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
370 	 * for any value of len we may have
371 	 */
372 
373 	for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
374 		try = start + (len / 2);	/* try in the middle */
375 
376 		/* start past our try? */
377 		if (pframe >= segs[try].start) {
378 			/* was try correct? */
379 			if (pframe < segs[try].end) {
380 				if (offp)
381 					*offp = pframe - segs[try].start;
382 				return(try);            /* got it */
383 			}
384 			start = try + 1;	/* next time, start here */
385 			len--;			/* "adjust" */
386 		} else {
387 			/*
388 			 * pframe before try, just reduce length of
389 			 * region, done in "for" loop
390 			 */
391 		}
392 	}
393 	return(-1);
394 }
395 
396 #else
397 
398 static inline int
399 vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
400 {
401 	/* linear search for it */
402 	int	lcv;
403 
404 	for (lcv = 0; lcv < nsegs; lcv++) {
405 		if (pframe >= segs[lcv].start &&
406 		    pframe < segs[lcv].end) {
407 			if (offp)
408 				*offp = pframe - segs[lcv].start;
409 			return(lcv);		   /* got it */
410 		}
411 	}
412 	return(-1);
413 }
414 #endif
415 
416 
417 /*
418  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
419  * back from an I/O mapping (ugh!).   used in some MD code as well.
420  */
421 static inline struct vm_page *
422 PHYS_TO_VM_PAGE(paddr_t pa)
423 {
424 	paddr_t pf = atop(pa);
425 	int	off;
426 	int	psi;
427 
428 	psi = vm_physseg_find(pf, &off);
429 	if (psi != -1)
430 		return(&vm_physmem[psi].pgs[off]);
431 	return(NULL);
432 }
433 
434 #define VM_PAGE_IS_FREE(entry)  ((entry)->pqflags & PQ_FREE)
435 #define	VM_FREE_PAGE_TO_CPU(pg)	((struct uvm_cpu *)((uintptr_t)pg->offset))
436 
437 #ifdef DEBUG
438 void uvm_pagezerocheck(struct vm_page *);
439 #endif /* DEBUG */
440 
441 #endif /* _KERNEL */
442 
443 #endif /* _UVM_UVM_PAGE_H_ */
444