xref: /openbsd-src/sys/uvm/uvm_anon.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: uvm_anon.c,v 1.35 2011/07/03 18:34:14 oga Exp $	*/
2 /*	$NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $	*/
3 
4 /*
5  *
6  * Copyright (c) 1997 Charles D. Cranor and Washington University.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Charles D. Cranor and
20  *      Washington University.
21  * 4. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*
37  * uvm_anon.c: uvm anon ops
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/malloc.h>
44 #include <sys/pool.h>
45 #include <sys/kernel.h>
46 
47 #include <uvm/uvm.h>
48 #include <uvm/uvm_swap.h>
49 
50 struct pool uvm_anon_pool;
51 
52 /*
53  * allocate anons
54  */
55 void
56 uvm_anon_init(void)
57 {
58 	pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, 0, 0, "anonpl",
59 	    &pool_allocator_nointr);
60 	pool_sethiwat(&uvm_anon_pool, uvmexp.free / 16);
61 }
62 
63 /*
64  * allocate an anon
65  */
66 struct vm_anon *
67 uvm_analloc(void)
68 {
69 	struct vm_anon *anon;
70 
71 	anon = pool_get(&uvm_anon_pool, PR_NOWAIT);
72 	if (anon) {
73 		simple_lock_init(&anon->an_lock);
74 		anon->an_ref = 1;
75 		anon->an_page = NULL;
76 		anon->an_swslot = 0;
77 		simple_lock(&anon->an_lock);
78 	}
79 	return(anon);
80 }
81 
82 /*
83  * uvm_anfree: free a single anon structure
84  *
85  * => caller must remove anon from its amap before calling (if it was in
86  *	an amap).
87  * => anon must be unlocked and have a zero reference count.
88  * => we may lock the pageq's.
89  */
90 void
91 uvm_anfree(struct vm_anon *anon)
92 {
93 	struct vm_page *pg;
94 
95 	/*
96 	 * get page
97 	 */
98 
99 	pg = anon->an_page;
100 
101 	/*
102 	 * if there is a resident page and it is loaned, then anon may not
103 	 * own it.   call out to uvm_anon_lockpage() to ensure the real owner
104  	 * of the page has been identified and locked.
105 	 */
106 
107 	if (pg && pg->loan_count)
108 		pg = uvm_anon_lockloanpg(anon);
109 
110 	/*
111 	 * if we have a resident page, we must dispose of it before freeing
112 	 * the anon.
113 	 */
114 
115 	if (pg) {
116 
117 		/*
118 		 * if the page is owned by a uobject (now locked), then we must
119 		 * kill the loan on the page rather than free it.
120 		 */
121 
122 		if (pg->uobject) {
123 			uvm_lock_pageq();
124 			KASSERT(pg->loan_count > 0);
125 			pg->loan_count--;
126 			pg->uanon = NULL;
127 			uvm_unlock_pageq();
128 			simple_unlock(&pg->uobject->vmobjlock);
129 		} else {
130 
131 			/*
132 			 * page has no uobject, so we must be the owner of it.
133 			 *
134 			 * if page is busy then we just mark it as released
135 			 * (who ever has it busy must check for this when they
136 			 * wake up).    if the page is not busy then we can
137 			 * free it now.
138 			 */
139 
140 			if ((pg->pg_flags & PG_BUSY) != 0) {
141 				/* tell them to dump it when done */
142 				atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
143 				return;
144 			}
145 			pmap_page_protect(pg, VM_PROT_NONE);
146 			uvm_lock_pageq();	/* lock out pagedaemon */
147 			uvm_pagefree(pg);	/* bye bye */
148 			uvm_unlock_pageq();	/* free the daemon */
149 		}
150 	}
151 	if (pg == NULL && anon->an_swslot != 0) {
152 		/* this page is no longer only in swap. */
153 		simple_lock(&uvm.swap_data_lock);
154 		KASSERT(uvmexp.swpgonly > 0);
155 		uvmexp.swpgonly--;
156 		simple_unlock(&uvm.swap_data_lock);
157 	}
158 
159 	/*
160 	 * free any swap resources.
161 	 */
162 	uvm_anon_dropswap(anon);
163 
164 	/*
165 	 * now that we've stripped the data areas from the anon, free the anon
166 	 * itself!
167 	 */
168 	KASSERT(anon->an_page == NULL);
169 	KASSERT(anon->an_swslot == 0);
170 
171 	pool_put(&uvm_anon_pool, anon);
172 }
173 
174 /*
175  * uvm_anon_dropswap:  release any swap resources from this anon.
176  *
177  * => anon must be locked or have a reference count of 0.
178  */
179 void
180 uvm_anon_dropswap(struct vm_anon *anon)
181 {
182 
183 	if (anon->an_swslot == 0)
184 		return;
185 
186 	uvm_swap_free(anon->an_swslot, 1);
187 	anon->an_swslot = 0;
188 }
189 
190 /*
191  * uvm_anon_lockloanpg: given a locked anon, lock its resident page
192  *
193  * => anon is locked by caller
194  * => on return: anon is locked
195  *		 if there is a resident page:
196  *			if it has a uobject, it is locked by us
197  *			if it is ownerless, we take over as owner
198  *		 we return the resident page (it can change during
199  *		 this function)
200  * => note that the only time an anon has an ownerless resident page
201  *	is if the page was loaned from a uvm_object and the uvm_object
202  *	disowned it
203  * => this only needs to be called when you want to do an operation
204  *	on an anon's resident page and that page has a non-zero loan
205  *	count.
206  */
207 struct vm_page *
208 uvm_anon_lockloanpg(struct vm_anon *anon)
209 {
210 	struct vm_page *pg;
211 	boolean_t locked = FALSE;
212 
213 	/*
214 	 * loop while we have a resident page that has a non-zero loan count.
215 	 * if we successfully get our lock, we will "break" the loop.
216 	 * note that the test for pg->loan_count is not protected -- this
217 	 * may produce false positive results.   note that a false positive
218 	 * result may cause us to do more work than we need to, but it will
219 	 * not produce an incorrect result.
220 	 */
221 
222 	while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
223 
224 		/*
225 		 * quickly check to see if the page has an object before
226 		 * bothering to lock the page queues.   this may also produce
227 		 * a false positive result, but that's ok because we do a real
228 		 * check after that.
229 		 *
230 		 * XXX: quick check -- worth it?   need volatile?
231 		 */
232 
233 		if (pg->uobject) {
234 
235 			uvm_lock_pageq();
236 			if (pg->uobject) {	/* the "real" check */
237 				locked =
238 				    simple_lock_try(&pg->uobject->vmobjlock);
239 			} else {
240 				/* object disowned before we got PQ lock */
241 				locked = TRUE;
242 			}
243 			uvm_unlock_pageq();
244 
245 			/*
246 			 * if we didn't get a lock (try lock failed), then we
247 			 * toggle our anon lock and try again
248 			 */
249 
250 			if (!locked) {
251 				simple_unlock(&anon->an_lock);
252 
253 				/*
254 				 * someone locking the object has a chance to
255 				 * lock us right now
256 				 */
257 
258 				simple_lock(&anon->an_lock);
259 				continue;
260 			}
261 		}
262 
263 		/*
264 		 * if page is un-owned [i.e. the object dropped its ownership],
265 		 * then we can take over as owner!
266 		 */
267 
268 		if (pg->uobject == NULL && (pg->pg_flags & PQ_ANON) == 0) {
269 			uvm_lock_pageq();
270 			atomic_setbits_int(&pg->pg_flags, PQ_ANON);
271 			pg->loan_count--;	/* ... and drop our loan */
272 			uvm_unlock_pageq();
273 		}
274 
275 		/*
276 		 * we did it!   break the loop
277 		 */
278 
279 		break;
280 	}
281 	return(pg);
282 }
283 
284 /*
285  * fetch an anon's page.
286  *
287  * => anon must be locked, and is unlocked upon return.
288  * => returns TRUE if pagein was aborted due to lack of memory.
289  */
290 
291 boolean_t
292 uvm_anon_pagein(struct vm_anon *anon)
293 {
294 	struct vm_page *pg;
295 	struct uvm_object *uobj;
296 	int rv;
297 
298 	/* locked: anon */
299 	rv = uvmfault_anonget(NULL, NULL, anon);
300 	/*
301 	 * if rv == VM_PAGER_OK, anon is still locked, else anon
302 	 * is unlocked
303 	 */
304 
305 	switch (rv) {
306 	case VM_PAGER_OK:
307 		break;
308 
309 	case VM_PAGER_ERROR:
310 	case VM_PAGER_REFAULT:
311 
312 		/*
313 		 * nothing more to do on errors.
314 		 * VM_PAGER_REFAULT can only mean that the anon was freed,
315 		 * so again there's nothing to do.
316 		 */
317 
318 		return FALSE;
319 
320 	default:
321 #ifdef DIAGNOSTIC
322 		panic("anon_pagein: uvmfault_anonget -> %d", rv);
323 #else
324 		return FALSE;
325 #endif
326 	}
327 
328 	/*
329 	 * ok, we've got the page now.
330 	 * mark it as dirty, clear its swslot and un-busy it.
331 	 */
332 
333 	pg = anon->an_page;
334 	uobj = pg->uobject;
335 	uvm_swap_free(anon->an_swslot, 1);
336 	anon->an_swslot = 0;
337 	atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
338 
339 	/*
340 	 * deactivate the page (to put it on a page queue)
341 	 */
342 
343 	pmap_clear_reference(pg);
344 	pmap_page_protect(pg, VM_PROT_NONE);
345 	uvm_lock_pageq();
346 	uvm_pagedeactivate(pg);
347 	uvm_unlock_pageq();
348 
349 	/*
350 	 * unlock the anon and we're done.
351 	 */
352 
353 	simple_unlock(&anon->an_lock);
354 	if (uobj) {
355 		simple_unlock(&uobj->vmobjlock);
356 	}
357 	return FALSE;
358 }
359