xref: /netbsd-src/sys/uvm/uvm_anon.c (revision 3b01aba77a7a698587faaae455bbfe740923c1f5)
1 /*	$NetBSD: uvm_anon.c,v 1.17 2001/05/25 04:06:12 chs Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * uvm_anon.c: uvm anon ops
37  */
38 
39 #include "opt_uvmhist.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/pool.h>
46 #include <sys/kernel.h>
47 
48 #include <uvm/uvm.h>
49 #include <uvm/uvm_swap.h>
50 
51 /*
52  * anonblock_list: global list of anon blocks,
53  * locked by swap_syscall_lock (since we never remove
54  * anything from this list and we only add to it via swapctl(2)).
55  */
56 
57 struct uvm_anonblock {
58 	LIST_ENTRY(uvm_anonblock) list;
59 	int count;
60 	struct vm_anon *anons;
61 };
62 static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list;
63 
64 
65 static boolean_t anon_pagein __P((struct vm_anon *));
66 
67 
68 /*
69  * allocate anons
70  */
71 void
72 uvm_anon_init()
73 {
74 	int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
75 
76 	simple_lock_init(&uvm.afreelock);
77 	LIST_INIT(&anonblock_list);
78 
79 	/*
80 	 * Allocate the initial anons.
81 	 */
82 	uvm_anon_add(nanon);
83 }
84 
85 /*
86  * add some more anons to the free pool.  called when we add
87  * more swap space.
88  *
89  * => swap_syscall_lock should be held (protects anonblock_list).
90  */
91 int
92 uvm_anon_add(count)
93 	int	count;
94 {
95 	struct uvm_anonblock *anonblock;
96 	struct vm_anon *anon;
97 	int lcv, needed;
98 
99 	simple_lock(&uvm.afreelock);
100 	uvmexp.nanonneeded += count;
101 	needed = uvmexp.nanonneeded - uvmexp.nanon;
102 	simple_unlock(&uvm.afreelock);
103 
104 	if (needed <= 0) {
105 		return 0;
106 	}
107 	anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed);
108 	if (anon == NULL) {
109 		simple_lock(&uvm.afreelock);
110 		uvmexp.nanonneeded -= count;
111 		simple_unlock(&uvm.afreelock);
112 		return ENOMEM;
113 	}
114 	MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
115 
116 	anonblock->count = needed;
117 	anonblock->anons = anon;
118 	LIST_INSERT_HEAD(&anonblock_list, anonblock, list);
119 	memset(anon, 0, sizeof(*anon) * needed);
120 
121 	simple_lock(&uvm.afreelock);
122 	uvmexp.nanon += needed;
123 	uvmexp.nfreeanon += needed;
124 	for (lcv = 0; lcv < needed; lcv++) {
125 		simple_lock_init(&anon->an_lock);
126 		anon[lcv].u.an_nxt = uvm.afree;
127 		uvm.afree = &anon[lcv];
128 		simple_lock_init(&uvm.afree->an_lock);
129 	}
130 	simple_unlock(&uvm.afreelock);
131 	return 0;
132 }
133 
134 /*
135  * remove anons from the free pool.
136  */
137 void
138 uvm_anon_remove(count)
139 	int count;
140 {
141 	/*
142 	 * we never actually free any anons, to avoid allocation overhead.
143 	 * XXX someday we might want to try to free anons.
144 	 */
145 
146 	simple_lock(&uvm.afreelock);
147 	uvmexp.nanonneeded -= count;
148 	simple_unlock(&uvm.afreelock);
149 }
150 
151 /*
152  * allocate an anon
153  *
154  * => new anon is returned locked!
155  */
156 struct vm_anon *
157 uvm_analloc()
158 {
159 	struct vm_anon *a;
160 
161 	simple_lock(&uvm.afreelock);
162 	a = uvm.afree;
163 	if (a) {
164 		uvm.afree = a->u.an_nxt;
165 		uvmexp.nfreeanon--;
166 		a->an_ref = 1;
167 		a->an_swslot = 0;
168 		a->u.an_page = NULL;		/* so we can free quickly */
169 		LOCK_ASSERT(simple_lock_held(&a->an_lock) == 0);
170 		simple_lock(&a->an_lock);
171 	}
172 	simple_unlock(&uvm.afreelock);
173 	return(a);
174 }
175 
176 /*
177  * uvm_anfree: free a single anon structure
178  *
179  * => caller must remove anon from its amap before calling (if it was in
180  *	an amap).
181  * => anon must be unlocked and have a zero reference count.
182  * => we may lock the pageq's.
183  */
184 void
185 uvm_anfree(anon)
186 	struct vm_anon *anon;
187 {
188 	struct vm_page *pg;
189 	UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
190 	UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
191 
192 	KASSERT(anon->an_ref == 0);
193 	LOCK_ASSERT(simple_lock_held(&anon->an_lock) == 0);
194 
195 	/*
196 	 * get page
197 	 */
198 
199 	pg = anon->u.an_page;
200 
201 	/*
202 	 * if there is a resident page and it is loaned, then anon may not
203 	 * own it.   call out to uvm_anon_lockpage() to ensure the real owner
204  	 * of the page has been identified and locked.
205 	 */
206 
207 	if (pg && pg->loan_count)
208 		pg = uvm_anon_lockloanpg(anon);
209 
210 	/*
211 	 * if we have a resident page, we must dispose of it before freeing
212 	 * the anon.
213 	 */
214 
215 	if (pg) {
216 
217 		/*
218 		 * if the page is owned by a uobject (now locked), then we must
219 		 * kill the loan on the page rather than free it.
220 		 */
221 
222 		if (pg->uobject) {
223 			uvm_lock_pageq();
224 			KASSERT(pg->loan_count > 0);
225 			pg->loan_count--;
226 			pg->uanon = NULL;
227 			uvm_unlock_pageq();
228 			simple_unlock(&pg->uobject->vmobjlock);
229 		} else {
230 
231 			/*
232 			 * page has no uobject, so we must be the owner of it.
233 			 *
234 			 * if page is busy then we just mark it as released
235 			 * (who ever has it busy must check for this when they
236 			 * wake up).    if the page is not busy then we can
237 			 * free it now.
238 			 */
239 
240 			if ((pg->flags & PG_BUSY) != 0) {
241 				/* tell them to dump it when done */
242 				pg->flags |= PG_RELEASED;
243 				UVMHIST_LOG(maphist,
244 				    "  anon 0x%x, page 0x%x: BUSY (released!)",
245 				    anon, pg, 0, 0);
246 				return;
247 			}
248 			pmap_page_protect(pg, VM_PROT_NONE);
249 			uvm_lock_pageq();	/* lock out pagedaemon */
250 			uvm_pagefree(pg);	/* bye bye */
251 			uvm_unlock_pageq();	/* free the daemon */
252 			UVMHIST_LOG(maphist,"anon 0x%x, page 0x%x: freed now!",
253 			    anon, pg, 0, 0);
254 		}
255 	}
256 
257 	/*
258 	 * free any swap resources.
259 	 */
260 	uvm_anon_dropswap(anon);
261 
262 	/*
263 	 * now that we've stripped the data areas from the anon, free the anon
264 	 * itself!
265 	 */
266 	simple_lock(&uvm.afreelock);
267 	anon->u.an_nxt = uvm.afree;
268 	uvm.afree = anon;
269 	uvmexp.nfreeanon++;
270 	simple_unlock(&uvm.afreelock);
271 	UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
272 }
273 
274 /*
275  * uvm_anon_dropswap:  release any swap resources from this anon.
276  *
277  * => anon must be locked or have a reference count of 0.
278  */
279 void
280 uvm_anon_dropswap(anon)
281 	struct vm_anon *anon;
282 {
283 	UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
284 
285 	if (anon->an_swslot == 0)
286 		return;
287 
288 	UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
289 		    anon, anon->an_swslot, 0, 0);
290 	uvm_swap_free(anon->an_swslot, 1);
291 	anon->an_swslot = 0;
292 
293 	if (anon->u.an_page == NULL) {
294 		/* this page is no longer only in swap. */
295 		simple_lock(&uvm.swap_data_lock);
296 		uvmexp.swpgonly--;
297 		simple_unlock(&uvm.swap_data_lock);
298 	}
299 }
300 
301 /*
302  * uvm_anon_lockloanpg: given a locked anon, lock its resident page
303  *
304  * => anon is locked by caller
305  * => on return: anon is locked
306  *		 if there is a resident page:
307  *			if it has a uobject, it is locked by us
308  *			if it is ownerless, we take over as owner
309  *		 we return the resident page (it can change during
310  *		 this function)
311  * => note that the only time an anon has an ownerless resident page
312  *	is if the page was loaned from a uvm_object and the uvm_object
313  *	disowned it
314  * => this only needs to be called when you want to do an operation
315  *	on an anon's resident page and that page has a non-zero loan
316  *	count.
317  */
318 struct vm_page *
319 uvm_anon_lockloanpg(anon)
320 	struct vm_anon *anon;
321 {
322 	struct vm_page *pg;
323 	boolean_t locked = FALSE;
324 
325 	LOCK_ASSERT(simple_lock_held(&anon->an_lock));
326 
327 	/*
328 	 * loop while we have a resident page that has a non-zero loan count.
329 	 * if we successfully get our lock, we will "break" the loop.
330 	 * note that the test for pg->loan_count is not protected -- this
331 	 * may produce false positive results.   note that a false positive
332 	 * result may cause us to do more work than we need to, but it will
333 	 * not produce an incorrect result.
334 	 */
335 
336 	while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
337 
338 		/*
339 		 * quickly check to see if the page has an object before
340 		 * bothering to lock the page queues.   this may also produce
341 		 * a false positive result, but that's ok because we do a real
342 		 * check after that.
343 		 *
344 		 * XXX: quick check -- worth it?   need volatile?
345 		 */
346 
347 		if (pg->uobject) {
348 
349 			uvm_lock_pageq();
350 			if (pg->uobject) {	/* the "real" check */
351 				locked =
352 				    simple_lock_try(&pg->uobject->vmobjlock);
353 			} else {
354 				/* object disowned before we got PQ lock */
355 				locked = TRUE;
356 			}
357 			uvm_unlock_pageq();
358 
359 			/*
360 			 * if we didn't get a lock (try lock failed), then we
361 			 * toggle our anon lock and try again
362 			 */
363 
364 			if (!locked) {
365 				simple_unlock(&anon->an_lock);
366 
367 				/*
368 				 * someone locking the object has a chance to
369 				 * lock us right now
370 				 */
371 
372 				simple_lock(&anon->an_lock);
373 				continue;
374 			}
375 		}
376 
377 		/*
378 		 * if page is un-owned [i.e. the object dropped its ownership],
379 		 * then we can take over as owner!
380 		 */
381 
382 		if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
383 			uvm_lock_pageq();
384 			pg->pqflags |= PQ_ANON;		/* take ownership... */
385 			pg->loan_count--;	/* ... and drop our loan */
386 			uvm_unlock_pageq();
387 		}
388 
389 		/*
390 		 * we did it!   break the loop
391 		 */
392 
393 		break;
394 	}
395 	return(pg);
396 }
397 
398 
399 
400 /*
401  * page in every anon that is paged out to a range of swslots.
402  *
403  * swap_syscall_lock should be held (protects anonblock_list).
404  */
405 
406 boolean_t
407 anon_swap_off(startslot, endslot)
408 	int startslot, endslot;
409 {
410 	struct uvm_anonblock *anonblock;
411 
412 	for (anonblock = LIST_FIRST(&anonblock_list);
413 	     anonblock != NULL;
414 	     anonblock = LIST_NEXT(anonblock, list)) {
415 		int i;
416 
417 		/*
418 		 * loop thru all the anons in the anonblock,
419 		 * paging in where needed.
420 		 */
421 
422 		for (i = 0; i < anonblock->count; i++) {
423 			struct vm_anon *anon = &anonblock->anons[i];
424 			int slot;
425 
426 			/*
427 			 * lock anon to work on it.
428 			 */
429 
430 			simple_lock(&anon->an_lock);
431 
432 			/*
433 			 * is this anon's swap slot in range?
434 			 */
435 
436 			slot = anon->an_swslot;
437 			if (slot >= startslot && slot < endslot) {
438 				boolean_t rv;
439 
440 				/*
441 				 * yup, page it in.
442 				 */
443 
444 				/* locked: anon */
445 				rv = anon_pagein(anon);
446 				/* unlocked: anon */
447 
448 				if (rv) {
449 					return rv;
450 				}
451 			} else {
452 
453 				/*
454 				 * nope, unlock and proceed.
455 				 */
456 
457 				simple_unlock(&anon->an_lock);
458 			}
459 		}
460 	}
461 	return FALSE;
462 }
463 
464 
465 /*
466  * fetch an anon's page.
467  *
468  * => anon must be locked, and is unlocked upon return.
469  * => returns TRUE if pagein was aborted due to lack of memory.
470  */
471 
472 static boolean_t
473 anon_pagein(anon)
474 	struct vm_anon *anon;
475 {
476 	struct vm_page *pg;
477 	struct uvm_object *uobj;
478 	int rv;
479 
480 	/* locked: anon */
481 	LOCK_ASSERT(simple_lock_held(&anon->an_lock));
482 
483 	rv = uvmfault_anonget(NULL, NULL, anon);
484 
485 	/*
486 	 * if rv == 0, anon is still locked, else anon
487 	 * is unlocked
488 	 */
489 
490 	switch (rv) {
491 	case 0:
492 		break;
493 
494 	case EIO:
495 	case ERESTART:
496 
497 		/*
498 		 * nothing more to do on errors.
499 		 * ERESTART can only mean that the anon was freed,
500 		 * so again there's nothing to do.
501 		 */
502 
503 		return FALSE;
504 	}
505 
506 	/*
507 	 * ok, we've got the page now.
508 	 * mark it as dirty, clear its swslot and un-busy it.
509 	 */
510 
511 	pg = anon->u.an_page;
512 	uobj = pg->uobject;
513 	uvm_swap_free(anon->an_swslot, 1);
514 	anon->an_swslot = 0;
515 	pg->flags &= ~(PG_CLEAN);
516 
517 	/*
518 	 * deactivate the page (to put it on a page queue)
519 	 */
520 
521 	pmap_clear_reference(pg);
522 	uvm_lock_pageq();
523 	uvm_pagedeactivate(pg);
524 	uvm_unlock_pageq();
525 
526 	/*
527 	 * unlock the anon and we're done.
528 	 */
529 
530 	simple_unlock(&anon->an_lock);
531 	if (uobj) {
532 		simple_unlock(&uobj->vmobjlock);
533 	}
534 	return FALSE;
535 }
536