xref: /netbsd-src/sys/uvm/uvm_page.h (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /*	$NetBSD: uvm_page.h,v 1.105 2020/06/14 21:41:42 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
37  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
38  *
39  *
40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41  * All rights reserved.
42  *
43  * Permission to use, copy, modify and distribute this software and
44  * its documentation is hereby granted, provided that both the copyright
45  * notice and this permission notice appear in all copies of the
46  * software, derivative works or modified versions, and any portions
47  * thereof, and that both notices appear in supporting documentation.
48  *
49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52  *
53  * Carnegie Mellon requests users of this software to return to
54  *
55  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56  *  School of Computer Science
57  *  Carnegie Mellon University
58  *  Pittsburgh PA 15213-3890
59  *
60  * any improvements or extensions that they make and grant Carnegie the
61  * rights to redistribute these changes.
62  */
63 
64 #ifndef _UVM_UVM_PAGE_H_
65 #define _UVM_UVM_PAGE_H_
66 
67 #ifdef _KERNEL_OPT
68 #include "opt_uvm_page_trkown.h"
69 #endif
70 
71 #include <sys/rwlock.h>
72 
73 #include <uvm/uvm_extern.h>
74 #include <uvm/uvm_pglist.h>
75 
76 /*
77  * Management of resident (logical) pages.
78  *
79  * Each resident page has a vm_page structure, indexed by page number.
80  * There are several lists in the structure:
81  *
82  * - A red-black tree rooted with the containing object is used to
83  *   quickly perform object+offset lookups.
84  * - A list of all pages for a given object, for a quick deactivation
85  *   at a time of deallocation.
86  * - An ordered list of pages due for pageout.
87  *
88  * In addition, the structure contains the object and offset to which
89  * this page belongs (for pageout) and sundry status bits.
90  *
91  * Note that the page structure has no lock of its own.  The page is
92  * generally protected by its owner's lock (UVM object or amap/anon).
93  * It should be noted that UVM has to serialize pmap(9) operations on
94  * the managed pages, e.g. for pmap_enter() calls.  Hence, the lock
95  * order is as follows:
96  *
97  *	[vmpage-owner-lock] ->
98  *		any pmap locks (e.g. PV hash lock)
99  *
100  * Since the kernel is always self-consistent, no serialization is
101  * required for unmanaged mappings, e.g. for pmap_kenter_pa() calls.
102  *
103  * Field markings and the corresponding locks:
104  *
105  * f:	free page queue lock, uvm_fpageqlock
106  * o:	page owner (uvm_object::vmobjlock, vm_amap::am_lock, vm_anon::an_lock)
107  * i:	vm_page::interlock
108  *        => flags set and cleared only with o&i held can
109  *           safely be tested for with only o held.
110  * o,i:	o|i for read, o&i for write (depends on context - if could be loaned)
111  *	  => see uvm_loan.c
112  * w:	wired page queue or uvm_pglistalloc:
113  *	  => wired page queue: o&i to change, stable from wire to unwire
114  *		XXX What about concurrent or nested wire?
115  *	  => uvm_pglistalloc: owned by caller
116  * ?:	locked by pmap or assumed page owner's lock
117  * p:	locked by pagedaemon policy module (pdpolicy)
118  * c:	cpu private
119  * s:	stable, does not change
120  *
121  * UVM and pmap(9) may use uvm_page_owner_locked_p() to assert whether the
122  * page owner's lock is acquired.
123  *
124  * A page can have one of four identities:
125  *
126  * o free
127  *   => pageq.list is entry on global free page queue
128  *   => uanon is unused (or (void *)0xdeadbeef for DEBUG)
129  *   => uobject is unused (or (void *)0xdeadbeef for DEBUG)
130  *   => PG_FREE is set in flags
131  * o owned by a uvm_object
132  *   => pageq.queue is entry on wired page queue, if any
133  *   => uanon is NULL or the vm_anon to which it has been O->A loaned
134  *   => uobject is owner
135  * o owned by a vm_anon
136  *   => pageq is unused (XXX correct?)
137  *   => uanon is owner
138  *   => uobject is NULL
139  *   => PG_ANON is set in flags
140  * o allocated by uvm_pglistalloc
141  *   => pageq.queue is entry on resulting pglist, owned by caller
142  *   => uanon is unused
143  *   => uobject is unused
144  *
145  * The following transitions are allowed:
146  *
147  * - uvm_pagealloc: free -> owned by a uvm_object/vm_anon
148  * - uvm_pagefree: owned by a uvm_object/vm_anon -> free
149  * - uvm_pglistalloc: free -> allocated by uvm_pglistalloc
150  * - uvm_pglistfree: allocated by uvm_pglistalloc -> free
151  *
152  * On the ordering of fields:
153  *
154  * The fields most heavily used during fault processing are clustered
155  * together at the start of the structure to reduce cache misses.
156  * XXX This entire thing should be shrunk to fit in one cache line.
157  */
158 
159 struct vm_page {
160 	/* _LP64: first cache line */
161 	union {
162 		TAILQ_ENTRY(vm_page) queue;	/* w: wired page queue
163 						 * or uvm_pglistalloc output */
164 		LIST_ENTRY(vm_page) list;	/* f: global free page queue */
165 	} pageq;
166 	uint32_t		pqflags;	/* i: pagedaemon flags */
167 	uint32_t		flags;		/* o: object flags */
168 	paddr_t			phys_addr;	/* o: physical address of pg */
169 	uint32_t		loan_count;	/* o,i: num. active loans */
170 	uint32_t		wire_count;	/* o,i: wired down map refs */
171 	struct vm_anon		*uanon;		/* o,i: anon */
172 	struct uvm_object	*uobject;	/* o,i: object */
173 	voff_t			offset;		/* o: offset into object */
174 
175 	/* _LP64: second cache line */
176 	kmutex_t		interlock;	/* s: lock on identity */
177 	TAILQ_ENTRY(vm_page)	pdqueue;	/* p: pagedaemon queue */
178 
179 #ifdef __HAVE_VM_PAGE_MD
180 	struct vm_page_md	mdpage;		/* ?: pmap-specific data */
181 #endif
182 
183 #if defined(UVM_PAGE_TRKOWN)
184 	/* debugging fields to track page ownership */
185 	pid_t			owner;		/* proc that set PG_BUSY */
186 	lwpid_t			lowner;		/* lwp that set PG_BUSY */
187 	const char		*owner_tag;	/* why it was set busy */
188 #endif
189 };
190 
191 /*
192  * Overview of UVM page flags, stored in pg->flags.
193  *
194  * Locking notes:
195  *
196  * PG_, struct vm_page::flags	=> locked by owner
197  * PG_AOBJ			=> additionally locked by vm_page::interlock
198  * PG_ANON			=> additionally locked by vm_page::interlock
199  * PG_FREE			=> additionally locked by uvm_fpageqlock
200  *				   for uvm_pglistalloc()
201  *
202  * Flag descriptions:
203  *
204  * PG_CLEAN:
205  *	Page is known clean.
206  *	The contents of the page is consistent with its backing store.
207  *
208  * PG_DIRTY:
209  *	Page is known dirty.
210  *	To avoid losing data, the contents of the page should be written
211  *	back to the backing store before freeing the page.
212  *
213  * PG_BUSY:
214  *	Page is long-term locked, usually because of I/O (transfer from the
215  *	page memory to the backing store) is in progress.  LWP attempting
216  *	to access the page shall set PQ_WANTED and wait.  PG_BUSY may only
217  *	be set with a write lock held on the object.
218  *
219  * PG_PAGEOUT:
220  *	Indicates that the page is being paged-out in preparation for
221  *	being freed.
222  *
223  * PG_RELEASED:
224  *	Indicates that the page, which is currently PG_BUSY, should be freed
225  *	after the release of long-term lock.  It is responsibility of the
226  *	owning LWP (i.e. which set PG_BUSY) to do it.
227  *
228  * PG_FAKE:
229  *	Page has been allocated, but not yet initialised.  The flag is used
230  *	to avoid overwriting of valid data, e.g. to prevent read from the
231  *	backing store when in-core data is newer.
232  *
233  * PG_RDONLY:
234  *	Indicates that the page must be mapped read-only.
235  *
236  * PG_MARKER:
237  *	Dummy marker page, generally used for list traversal.
238  */
239 
240 /*
241  * if you want to renumber PG_CLEAN and PG_DIRTY, check __CTASSERTs in
242  * uvm_page_status.c first.
243  */
244 
245 #define	PG_CLEAN	0x00000001	/* page is known clean */
246 #define	PG_DIRTY	0x00000002	/* page is known dirty */
247 #define	PG_BUSY		0x00000004	/* page is locked */
248 #define	PG_PAGEOUT	0x00000010	/* page to be freed for pagedaemon */
249 #define	PG_RELEASED	0x00000020	/* page to be freed when unbusied */
250 #define	PG_FAKE		0x00000040	/* page is not yet initialized */
251 #define	PG_RDONLY	0x00000080	/* page must be mapped read-only */
252 #define	PG_TABLED	0x00000200	/* page is tabled in object */
253 #define	PG_AOBJ		0x00000400	/* page is part of an anonymous
254 					   uvm_object */
255 #define	PG_ANON		0x00000800	/* page is part of an anon, rather
256 					   than an uvm_object */
257 #define	PG_FILE		0x00001000	/* file backed (non-anonymous) */
258 #define	PG_READAHEAD	0x00002000	/* read-ahead but not "hit" yet */
259 #define	PG_FREE		0x00004000	/* page is on free list */
260 #define	PG_MARKER	0x00008000	/* dummy marker page */
261 #define	PG_PAGER1	0x00010000	/* pager-specific flag */
262 
263 #define	PG_STAT		(PG_ANON|PG_AOBJ|PG_FILE)
264 #define	PG_SWAPBACKED	(PG_ANON|PG_AOBJ)
265 
266 #define	UVM_PGFLAGBITS \
267 	"\20\1CLEAN\2DIRTY\3BUSY" \
268 	"\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
269 	"\11ZERO\12TABLED\13AOBJ\14ANON" \
270 	"\15FILE\16READAHEAD\17FREE\20MARKER" \
271 	"\21PAGER1"
272 
273 /*
274  * Flags stored in pg->pqflags, which is protected by pg->interlock.
275  *
276  * PQ_PRIVATE:
277  *	... is for uvmpdpol to do whatever it wants with.
278  *
279  * PQ_INTENT_SET:
280  *	Indicates that the intent set on the page has not yet been realized.
281  *
282  * PQ_INTENT_QUEUED:
283  *	Indicates that the page is, or will soon be, on a per-CPU queue for
284  *	the intent to be realized.
285  *
286  * PQ_WANTED:
287  *	Indicates that the page, which is currently PG_BUSY, is wanted by
288  *	some other LWP.  The page owner (i.e. LWP which set PG_BUSY) is
289  *	responsible to clear both flags and wake up any waiters once it has
290  *	released the long-term lock (PG_BUSY).
291  */
292 
293 #define	PQ_INTENT_A		0x00000000	/* intend activation */
294 #define	PQ_INTENT_I		0x00000001	/* intend deactivation */
295 #define	PQ_INTENT_E		0x00000002	/* intend enqueue */
296 #define	PQ_INTENT_D		0x00000003	/* intend dequeue */
297 #define	PQ_INTENT_MASK		0x00000003	/* mask of intended state */
298 #define	PQ_INTENT_SET		0x00000004	/* not realized yet */
299 #define	PQ_INTENT_QUEUED	0x00000008	/* queued for processing */
300 #define	PQ_PRIVATE		0x00000ff0	/* private for pdpolicy */
301 #define	PQ_WANTED		0x00001000	/* someone is waiting for page */
302 
303 #define	UVM_PQFLAGBITS \
304 	"\20\1INTENT_0\2INTENT_1\3INTENT_SET\4INTENT_QUEUED" \
305 	"\5PRIVATE1\6PRIVATE2\7PRIVATE3\10PRIVATE4" \
306 	"\11PRIVATE5\12PRIVATE6\13PRIVATE7\14PRIVATE8" \
307 	"\15WANTED"
308 
309 /*
310  * physical memory layout structure
311  *
312  * MD vmparam.h must #define:
313  *   VM_PHYSEG_MAX = max number of physical memory segments we support
314  *		   (if this is "1" then we revert to a "contig" case)
315  *   VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
316  * 	- VM_PSTRAT_RANDOM:   linear search (random order)
317  *	- VM_PSTRAT_BSEARCH:  binary search (sorted by address)
318  *	- VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
319  *      - others?
320  *   XXXCDC: eventually we should purge all left-over global variables...
321  */
322 #define VM_PSTRAT_RANDOM	1
323 #define VM_PSTRAT_BSEARCH	2
324 #define VM_PSTRAT_BIGFIRST	3
325 
326 #ifdef _KERNEL
327 
328 /*
329  * prototypes: the following prototypes define the interface to pages
330  */
331 
332 void uvm_page_init(vaddr_t *, vaddr_t *);
333 #if defined(UVM_PAGE_TRKOWN)
334 void uvm_page_own(struct vm_page *, const char *);
335 #endif
336 #if !defined(PMAP_STEAL_MEMORY)
337 bool uvm_page_physget(paddr_t *);
338 #endif
339 void uvm_page_recolor(int);
340 void uvm_page_rebucket(void);
341 
342 void uvm_pageactivate(struct vm_page *);
343 vaddr_t uvm_pageboot_alloc(vsize_t);
344 void uvm_pagecopy(struct vm_page *, struct vm_page *);
345 void uvm_pagedeactivate(struct vm_page *);
346 void uvm_pagedequeue(struct vm_page *);
347 void uvm_pageenqueue(struct vm_page *);
348 void uvm_pagefree(struct vm_page *);
349 void uvm_pagelock(struct vm_page *);
350 void uvm_pagelock2(struct vm_page *, struct vm_page *);
351 void uvm_pageunlock(struct vm_page *);
352 void uvm_pageunlock2(struct vm_page *, struct vm_page *);
353 void uvm_page_unbusy(struct vm_page **, int);
354 struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
355 void uvm_pageunwire(struct vm_page *);
356 void uvm_pagewire(struct vm_page *);
357 void uvm_pagezero(struct vm_page *);
358 bool uvm_pageismanaged(paddr_t);
359 bool uvm_page_owner_locked_p(struct vm_page *, bool);
360 void uvm_pgfl_lock(void);
361 void uvm_pgfl_unlock(void);
362 unsigned int uvm_pagegetdirty(struct vm_page *);
363 void uvm_pagemarkdirty(struct vm_page *, unsigned int);
364 bool uvm_pagecheckdirty(struct vm_page *, bool);
365 bool uvm_pagereadonly_p(struct vm_page *);
366 bool uvm_page_locked_p(struct vm_page *);
367 void uvm_pagewakeup(struct vm_page *);
368 bool uvm_pagewanted_p(struct vm_page *);
369 void uvm_pagewait(struct vm_page *, krwlock_t *, const char *);
370 
371 int uvm_page_lookup_freelist(struct vm_page *);
372 
373 struct vm_page *uvm_phys_to_vm_page(paddr_t);
374 paddr_t uvm_vm_page_to_phys(const struct vm_page *);
375 
376 #if defined(PMAP_DIRECT)
377 extern bool ubc_direct;
378 int uvm_direct_process(struct vm_page **, u_int, voff_t, vsize_t,
379 	    int (*)(void *, size_t, void *), void *);
380 #endif
381 
382 /*
383  * page dirtiness status for uvm_pagegetdirty and uvm_pagemarkdirty
384  *
385  * UNKNOWN means that we need to consult pmap to know if the page is
386  * dirty or not.
387  * basically, UVM_PAGE_STATUS_CLEAN implies that the page has no writable
388  * mapping.
389  *
390  * if you want to renumber these, check __CTASSERTs in
391  * uvm_page_status.c first.
392  */
393 
394 #define	UVM_PAGE_STATUS_UNKNOWN	0
395 #define	UVM_PAGE_STATUS_CLEAN	1
396 #define	UVM_PAGE_STATUS_DIRTY	2
397 #define	UVM_PAGE_NUM_STATUS	3
398 
399 /*
400  * macros
401  */
402 
403 #define VM_PAGE_TO_PHYS(entry)	uvm_vm_page_to_phys(entry)
404 
405 #ifdef __HAVE_VM_PAGE_MD
406 #define	VM_PAGE_TO_MD(pg)	(&(pg)->mdpage)
407 #endif
408 
409 /*
410  * Compute the page color for a given page.
411  */
412 #define	VM_PGCOLOR(pg) \
413 	(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
414 #define	PHYS_TO_VM_PAGE(pa)	uvm_phys_to_vm_page(pa)
415 
416 /*
417  * VM_PAGE_IS_FREE() can't tell if the page is on global free list, or a
418  * per-CPU cache.  If you need to be certain, pause caching.
419  */
420 #define VM_PAGE_IS_FREE(entry)  ((entry)->flags & PG_FREE)
421 
422 /*
423  * Use the lower 10 bits of pg->phys_addr to cache some some locators for
424  * the page.  This implies that the smallest possible page size is 1kB, and
425  * that nobody should use pg->phys_addr directly (use VM_PAGE_TO_PHYS()).
426  *
427  * - 5 bits for the freelist index, because uvm_page_lookup_freelist()
428  *   traverses an rbtree and therefore features prominently in traces
429  *   captured during performance test.  It would probably be more useful to
430  *   cache physseg index here because freelist can be inferred from physseg,
431  *   but it requires changes to allocation for UVM_HOTPLUG, so for now we'll
432  *   go with freelist.
433  *
434  * - 5 bits for "bucket", a way for us to categorise pages further as
435  *   needed (e.g. NUMA node).
436  *
437  * None of this is set in stone; it can be adjusted as needed.
438  */
439 
440 #define	UVM_PHYSADDR_FREELIST	__BITS(0,4)
441 #define	UVM_PHYSADDR_BUCKET	__BITS(5,9)
442 
443 static inline unsigned
444 uvm_page_get_freelist(struct vm_page *pg)
445 {
446 	unsigned fl = __SHIFTOUT(pg->phys_addr, UVM_PHYSADDR_FREELIST);
447 	KASSERT(fl == (unsigned)uvm_page_lookup_freelist(pg));
448 	return fl;
449 }
450 
451 static inline unsigned
452 uvm_page_get_bucket(struct vm_page *pg)
453 {
454 	return __SHIFTOUT(pg->phys_addr, UVM_PHYSADDR_BUCKET);
455 }
456 
457 static inline void
458 uvm_page_set_freelist(struct vm_page *pg, unsigned fl)
459 {
460 	KASSERT(fl < 32);
461 	pg->phys_addr &= ~UVM_PHYSADDR_FREELIST;
462 	pg->phys_addr |= __SHIFTIN(fl, UVM_PHYSADDR_FREELIST);
463 }
464 
465 static inline void
466 uvm_page_set_bucket(struct vm_page *pg, unsigned b)
467 {
468 	KASSERT(b < 32);
469 	pg->phys_addr &= ~UVM_PHYSADDR_BUCKET;
470 	pg->phys_addr |= __SHIFTIN(b, UVM_PHYSADDR_BUCKET);
471 }
472 
473 #ifdef DEBUG
474 void uvm_pagezerocheck(struct vm_page *);
475 #endif /* DEBUG */
476 
477 #endif /* _KERNEL */
478 
479 #endif /* _UVM_UVM_PAGE_H_ */
480