xref: /openbsd-src/sys/uvm/uvm_anon.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*	$OpenBSD: uvm_anon.c,v 1.50 2020/11/24 13:49:09 mpi Exp $	*/
2 /*	$NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * uvm_anon.c: uvm anon ops
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/pool.h>
37 #include <sys/kernel.h>
38 #include <sys/atomic.h>
39 
40 #include <uvm/uvm.h>
41 #include <uvm/uvm_swap.h>
42 
43 struct pool uvm_anon_pool;
44 
45 /*
46  * allocate anons
47  */
48 void
49 uvm_anon_init(void)
50 {
51 	pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, IPL_NONE,
52 	    PR_WAITOK, "anonpl", NULL);
53 	pool_sethiwat(&uvm_anon_pool, uvmexp.free / 16);
54 }
55 
56 /*
57  * allocate an anon
58  */
59 struct vm_anon *
60 uvm_analloc(void)
61 {
62 	struct vm_anon *anon;
63 
64 	anon = pool_get(&uvm_anon_pool, PR_NOWAIT);
65 	if (anon) {
66 		anon->an_ref = 1;
67 		anon->an_page = NULL;
68 		anon->an_swslot = 0;
69 	}
70 	return(anon);
71 }
72 
73 /*
74  * uvm_anfree: free a single anon structure
75  *
76  * => caller must remove anon from its amap before calling (if it was in
77  *	an amap).
78  * => we may lock the pageq's.
79  */
80 void
81 uvm_anfree_list(struct vm_anon *anon, struct pglist *pgl)
82 {
83 	struct vm_page *pg;
84 
85 	/* get page */
86 	pg = anon->an_page;
87 
88 	/*
89 	 * if we have a resident page, we must dispose of it before freeing
90 	 * the anon.
91 	 */
92 	if (pg) {
93 		/*
94 		 * if page is busy then we just mark it as released (who ever
95 		 * has it busy must check for this when they wake up). if the
96 		 * page is not busy then we can free it now.
97 		 */
98 		if ((pg->pg_flags & PG_BUSY) != 0) {
99 			/* tell them to dump it when done */
100 			atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
101 			return;
102 		}
103 		pmap_page_protect(pg, PROT_NONE);
104 		if (pgl != NULL) {
105 			/*
106 			 * clean page, and put on on pglist
107 			 * for later freeing.
108 			 */
109 			uvm_lock_pageq();
110 			uvm_pageclean(pg);
111 			uvm_unlock_pageq();
112 			TAILQ_INSERT_HEAD(pgl, pg, pageq);
113 		} else {
114 			uvm_lock_pageq();	/* lock out pagedaemon */
115 			uvm_pagefree(pg);	/* bye bye */
116 			uvm_unlock_pageq();	/* free the daemon */
117 		}
118 	}
119 	if (pg == NULL && anon->an_swslot != 0) {
120 		/* this page is no longer only in swap. */
121 		KASSERT(uvmexp.swpgonly > 0);
122 		uvmexp.swpgonly--;
123 	}
124 
125 	/* free any swap resources. */
126 	uvm_anon_dropswap(anon);
127 
128 	/*
129 	 * now that we've stripped the data areas from the anon, free the anon
130 	 * itself!
131 	 */
132 	KASSERT(anon->an_page == NULL);
133 	KASSERT(anon->an_swslot == 0);
134 
135 	pool_put(&uvm_anon_pool, anon);
136 }
137 
138 void
139 uvm_anfree(struct vm_anon *anon)
140 {
141 	uvm_anfree_list(anon, NULL);
142 }
143 
144 /*
145  * uvm_anwait: wait for memory to become available to allocate an anon.
146  */
147 void
148 uvm_anwait(void)
149 {
150 	struct vm_anon *anon;
151 
152 	/* XXX: Want something like pool_wait()? */
153 	anon = pool_get(&uvm_anon_pool, PR_WAITOK);
154 	pool_put(&uvm_anon_pool, anon);
155 }
156 
157 /*
158  * uvm_anon_dropswap:  release any swap resources from this anon.
159  */
160 void
161 uvm_anon_dropswap(struct vm_anon *anon)
162 {
163 
164 	if (anon->an_swslot == 0)
165 		return;
166 
167 	uvm_swap_free(anon->an_swslot, 1);
168 	anon->an_swslot = 0;
169 }
170 
171 /*
172  * fetch an anon's page.
173  *
174  * => returns TRUE if pagein was aborted due to lack of memory.
175  */
176 
177 boolean_t
178 uvm_anon_pagein(struct vm_anon *anon)
179 {
180 	struct vm_page *pg;
181 	int rv;
182 
183 	rv = uvmfault_anonget(NULL, NULL, anon);
184 
185 	switch (rv) {
186 	case VM_PAGER_OK:
187 		break;
188 	case VM_PAGER_ERROR:
189 	case VM_PAGER_REFAULT:
190 		/*
191 		 * nothing more to do on errors.
192 		 * VM_PAGER_REFAULT can only mean that the anon was freed,
193 		 * so again there's nothing to do.
194 		 */
195 		return FALSE;
196 	default:
197 #ifdef DIAGNOSTIC
198 		panic("anon_pagein: uvmfault_anonget -> %d", rv);
199 #else
200 		return FALSE;
201 #endif
202 	}
203 
204 	/*
205 	 * ok, we've got the page now.
206 	 * mark it as dirty, clear its swslot and un-busy it.
207 	 */
208 	pg = anon->an_page;
209 	uvm_swap_free(anon->an_swslot, 1);
210 	anon->an_swslot = 0;
211 	atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
212 
213 	/* deactivate the page (to put it on a page queue) */
214 	pmap_clear_reference(pg);
215 	pmap_page_protect(pg, PROT_NONE);
216 	uvm_lock_pageq();
217 	uvm_pagedeactivate(pg);
218 	uvm_unlock_pageq();
219 
220 	return FALSE;
221 }
222