xref: /openbsd-src/sys/uvm/uvm_pmemrange.h (revision e1dac33edac189603ccc09bf6f5e07ab8b86c2c2)
1*e1dac33eSmpi /*	$OpenBSD: uvm_pmemrange.h,v 1.19 2024/11/07 11:12:46 mpi Exp $	*/
2a3544580Soga 
3a3544580Soga /*
4a3544580Soga  * Copyright (c) 2009 Ariane van der Steldt <ariane@stack.nl>
5a3544580Soga  *
6a3544580Soga  * Permission to use, copy, modify, and distribute this software for any
7a3544580Soga  * purpose with or without fee is hereby granted, provided that the above
8a3544580Soga  * copyright notice and this permission notice appear in all copies.
9a3544580Soga  *
10a3544580Soga  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11a3544580Soga  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12a3544580Soga  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13a3544580Soga  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14a3544580Soga  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15a3544580Soga  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16a3544580Soga  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17a3544580Soga  */
18a3544580Soga 
19a3544580Soga /*
20a3544580Soga  * uvm_pmemrange.h: describe and manage free physical memory.
21a3544580Soga  */
22a3544580Soga 
23a3544580Soga #ifndef _UVM_UVM_PMEMRANGE_H_
24a3544580Soga #define _UVM_UVM_PMEMRANGE_H_
25a3544580Soga 
26262a556aSdlg RBT_HEAD(uvm_pmr_addr, vm_page);
27262a556aSdlg RBT_HEAD(uvm_pmr_size, vm_page);
28a3544580Soga 
29a3544580Soga /*
30a3544580Soga  * Page types available:
31a3544580Soga  * - DIRTY: this page may contain random data.
32a3544580Soga  * - ZERO: this page has been zeroed.
33a3544580Soga  */
34a3544580Soga #define UVM_PMR_MEMTYPE_DIRTY	0
35a3544580Soga #define UVM_PMR_MEMTYPE_ZERO	1
36a3544580Soga #define UVM_PMR_MEMTYPE_MAX	2
37a3544580Soga 
38a3544580Soga /*
39a3544580Soga  * An address range of memory.
40a3544580Soga  */
41a3544580Soga struct uvm_pmemrange {
42a3544580Soga 	struct	uvm_pmr_addr addr;	/* Free page chunks, sorted by addr. */
43a3544580Soga 	struct	uvm_pmr_size size[UVM_PMR_MEMTYPE_MAX];
44a3544580Soga 					/* Free page chunks, sorted by size. */
45a3544580Soga 	TAILQ_HEAD(, vm_page) single[UVM_PMR_MEMTYPE_MAX];
46a3544580Soga 					/* single page regions (uses pageq) */
47a3544580Soga 
48a3544580Soga 	paddr_t	low;			/* Start of address range (pgno). */
49a3544580Soga 	paddr_t	high;			/* End +1 (pgno). */
50a3544580Soga 	int	use;			/* Use counter. */
51a3544580Soga 	psize_t	nsegs;			/* Current range count. */
52a3544580Soga 
53a3544580Soga 	TAILQ_ENTRY(uvm_pmemrange) pmr_use;
54a3544580Soga 					/* pmr, sorted by use */
553accc929Sdlg 	RBT_ENTRY(uvm_pmemrange) pmr_addr;
56a3544580Soga 					/* pmr, sorted by address */
57a3544580Soga };
58a3544580Soga 
5990ee2fe0Sbeck /*
6090ee2fe0Sbeck  * Description of failing memory allocation.
6190ee2fe0Sbeck  *
62*e1dac33eSmpi  * Every descriptor corresponds to a request for the page daemon to release
63*e1dac33eSmpi  * pages in a given memory range.  There is one global descriptor for nowait
64*e1dac33eSmpi  * allocations, all others are sitting on the stack of processes waiting for
65*e1dac33eSmpi  * physical pages.
66*e1dac33eSmpi  *
67*e1dac33eSmpi  * There are multiple ways physical pages can become available:
68*e1dac33eSmpi  * [1] unmanaged pages are released by shrinkers (bufbackoff(), drmbackoff()...)
69*e1dac33eSmpi  * [2] page daemon drops them (we notice because they are freed)
70*e1dac33eSmpi  * [3] a process calls free or exit
7190ee2fe0Sbeck  *
7290ee2fe0Sbeck  * The buffer cache and page daemon can decide that they don't have the
7390ee2fe0Sbeck  * ability to make pages available in the requested range. In that case,
7490ee2fe0Sbeck  * the FAIL bit will be set.
7590ee2fe0Sbeck  * XXX There's a possibility that a page is no longer on the queues but
7690ee2fe0Sbeck  * XXX has not yet been freed, or that a page was busy.
7790ee2fe0Sbeck  * XXX Also, wired pages are not considered for paging, so they could
7890ee2fe0Sbeck  * XXX cause a failure that may be recoverable.
79*e1dac33eSmpi  *
80*e1dac33eSmpi  * Locks used to protect struct members in this file:
81*e1dac33eSmpi  *	F	uvm_lock_fpageq
82*e1dac33eSmpi  *	I	immutable after creation
8390ee2fe0Sbeck  */
8490ee2fe0Sbeck struct uvm_pmalloc {
85*e1dac33eSmpi 	TAILQ_ENTRY(uvm_pmalloc) pmq;			/* [F] next request */
86*e1dac33eSmpi 	struct uvm_constraint_range pm_constraint;	/* [I] memory range */
87*e1dac33eSmpi 	psize_t	pm_size;				/* [I] # pages */
88*e1dac33eSmpi 	int	pm_flags;				/* [F] states flags */
8990ee2fe0Sbeck };
9090ee2fe0Sbeck 
9190ee2fe0Sbeck /*
92*e1dac33eSmpi  * Indicate to the page daemon that a nowait call failed and it should
93*e1dac33eSmpi  * recover at least some memory in the most restricted region (assumed
94*e1dac33eSmpi  * to be dma_constraint).
95*e1dac33eSmpi  */
96*e1dac33eSmpi extern struct uvm_pmalloc nowait_pma;			/* [F] */
97*e1dac33eSmpi 
98*e1dac33eSmpi 
99*e1dac33eSmpi /*
10090ee2fe0Sbeck  * uvm_pmalloc flags.
10190ee2fe0Sbeck  */
10290ee2fe0Sbeck #define UVM_PMA_LINKED	0x01	/* uvm_pmalloc is on list */
10390ee2fe0Sbeck #define UVM_PMA_BUSY	0x02	/* entry is busy with fpageq unlocked */
10490ee2fe0Sbeck #define UVM_PMA_FAIL	0x10	/* page daemon cannot free pages */
10590ee2fe0Sbeck #define UVM_PMA_FREED	0x20	/* at least one page in the range was freed */
10690ee2fe0Sbeck 
1073accc929Sdlg RBT_HEAD(uvm_pmemrange_addr, uvm_pmemrange);
108a3544580Soga TAILQ_HEAD(uvm_pmemrange_use, uvm_pmemrange);
109a3544580Soga 
110a3544580Soga /*
111a3544580Soga  * pmr control structure. Contained in uvm.pmr_control.
112a3544580Soga  */
113a3544580Soga struct uvm_pmr_control {
114a3544580Soga 	struct	uvm_pmemrange_addr addr;
115a3544580Soga 	struct	uvm_pmemrange_use use;
11690ee2fe0Sbeck 
11790ee2fe0Sbeck 	/* Only changed while fpageq is locked. */
11890ee2fe0Sbeck 	TAILQ_HEAD(, uvm_pmalloc) allocs;
119a3544580Soga };
120a3544580Soga 
121a3544580Soga void	uvm_pmr_freepages(struct vm_page *, psize_t);
12290ee2fe0Sbeck void	uvm_pmr_freepageq(struct pglist *);
123a3544580Soga int	uvm_pmr_getpages(psize_t, paddr_t, paddr_t, paddr_t, paddr_t,
124a3544580Soga 	    int, int, struct pglist *);
125a3544580Soga void	uvm_pmr_init(void);
12690ee2fe0Sbeck int	uvm_wait_pla(paddr_t, paddr_t, paddr_t, int);
12790ee2fe0Sbeck void	uvm_wakeup_pla(paddr_t, psize_t);
128a3544580Soga 
129db9d8d58Sguenther #if defined(DDB) || defined(DEBUG)
130a3544580Soga int	uvm_pmr_isfree(struct vm_page *pg);
131a3544580Soga #endif
132a3544580Soga 
133c2e3ed87Sariane /*
134c2e3ed87Sariane  * Internal tree logic.
135c2e3ed87Sariane  */
136c2e3ed87Sariane 
137262a556aSdlg int	uvm_pmr_addr_cmp(const struct vm_page *, const struct vm_page *);
138262a556aSdlg int	uvm_pmr_size_cmp(const struct vm_page *, const struct vm_page *);
139c2e3ed87Sariane 
140262a556aSdlg RBT_PROTOTYPE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp);
141262a556aSdlg RBT_PROTOTYPE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp);
1423accc929Sdlg RBT_PROTOTYPE(uvm_pmemrange_addr, uvm_pmemrange, pmr_addr,
143c2e3ed87Sariane     uvm_pmemrange_addr_cmp);
144c2e3ed87Sariane 
145c2e3ed87Sariane struct vm_page		*uvm_pmr_insert_addr(struct uvm_pmemrange *,
146c2e3ed87Sariane 			    struct vm_page *, int);
147c2e3ed87Sariane void			 uvm_pmr_insert_size(struct uvm_pmemrange *,
148c2e3ed87Sariane 			    struct vm_page *);
149c2e3ed87Sariane struct vm_page		*uvm_pmr_insert(struct uvm_pmemrange *,
150c2e3ed87Sariane 			    struct vm_page *, int);
151c2e3ed87Sariane void			 uvm_pmr_remove_addr(struct uvm_pmemrange *,
152c2e3ed87Sariane 			    struct vm_page *);
153c2e3ed87Sariane void			 uvm_pmr_remove_size(struct uvm_pmemrange *,
154c2e3ed87Sariane 			    struct vm_page *);
155c2e3ed87Sariane void			 uvm_pmr_remove(struct uvm_pmemrange *,
156c2e3ed87Sariane 			    struct vm_page *);
157c2e3ed87Sariane struct vm_page		*uvm_pmr_extract_range(struct uvm_pmemrange *,
158c2e3ed87Sariane 			    struct vm_page *, paddr_t, paddr_t,
159c2e3ed87Sariane 			    struct pglist *);
16082673a18Smpi struct vm_page		*uvm_pmr_cache_get(int);
16182673a18Smpi void			 uvm_pmr_cache_put(struct vm_page *);
162804e3d51Smpi unsigned int		 uvm_pmr_cache_drain(void);
16382673a18Smpi 
164c2e3ed87Sariane 
165a3544580Soga #endif /* _UVM_UVM_PMEMRANGE_H_ */
166