xref: /netbsd-src/sys/uvm/uvm_aobj.c (revision 5aefcfdc06931dd97e76246d2fe0302f7b3fe094)
1 /*	$NetBSD: uvm_aobj.c,v 1.37 2000/11/25 06:27:59 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5  *                    Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
35  */
36 /*
37  * uvm_aobj.c: anonymous memory uvm_object pager
38  *
39  * author: Chuck Silvers <chuq@chuq.com>
40  * started: Jan-1998
41  *
42  * - design mostly from Chuck Cranor
43  */
44 
45 
46 
47 #include "opt_uvmhist.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/pool.h>
55 #include <sys/kernel.h>
56 
57 #include <uvm/uvm.h>
58 
59 /*
60  * an aobj manages anonymous-memory backed uvm_objects.   in addition
61  * to keeping the list of resident pages, it also keeps a list of
62  * allocated swap blocks.  depending on the size of the aobj this list
63  * of allocated swap blocks is either stored in an array (small objects)
64  * or in a hash table (large objects).
65  */
66 
67 /*
68  * local structures
69  */
70 
71 /*
72  * for hash tables, we break the address space of the aobj into blocks
73  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
74  * be a power of two.
75  */
76 
77 #define UAO_SWHASH_CLUSTER_SHIFT 4
78 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
79 
80 /* get the "tag" for this page index */
81 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
82 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
83 
84 /* given an ELT and a page index, find the swap slot */
85 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
86 	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
87 
88 /* given an ELT, return its pageidx base */
89 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
90 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
91 
92 /*
93  * the swhash hash function
94  */
95 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
96 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
97 			    & (AOBJ)->u_swhashmask)])
98 
99 /*
100  * the swhash threshhold determines if we will use an array or a
101  * hash table to store the list of allocated swap blocks.
102  */
103 
104 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
105 #define UAO_USES_SWHASH(AOBJ) \
106 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
107 
108 /*
109  * the number of buckets in a swhash, with an upper bound
110  */
111 #define UAO_SWHASH_MAXBUCKETS 256
112 #define UAO_SWHASH_BUCKETS(AOBJ) \
113 	(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
114 	     UAO_SWHASH_MAXBUCKETS))
115 
116 
117 /*
118  * uao_swhash_elt: when a hash table is being used, this structure defines
119  * the format of an entry in the bucket list.
120  */
121 
122 struct uao_swhash_elt {
123 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
124 	voff_t tag;				/* our 'tag' */
125 	int count;				/* our number of active slots */
126 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
127 };
128 
129 /*
130  * uao_swhash: the swap hash table structure
131  */
132 
133 LIST_HEAD(uao_swhash, uao_swhash_elt);
134 
135 /*
136  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
137  */
138 
139 struct pool uao_swhash_elt_pool;
140 
141 /*
142  * uvm_aobj: the actual anon-backed uvm_object
143  *
144  * => the uvm_object is at the top of the structure, this allows
145  *   (struct uvm_device *) == (struct uvm_object *)
146  * => only one of u_swslots and u_swhash is used in any given aobj
147  */
148 
149 struct uvm_aobj {
150 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
151 	int u_pages;		 /* number of pages in entire object */
152 	int u_flags;		 /* the flags (see uvm_aobj.h) */
153 	int *u_swslots;		 /* array of offset->swapslot mappings */
154 				 /*
155 				  * hashtable of offset->swapslot mappings
156 				  * (u_swhash is an array of bucket heads)
157 				  */
158 	struct uao_swhash *u_swhash;
159 	u_long u_swhashmask;		/* mask for hashtable */
160 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
161 };
162 
163 /*
164  * uvm_aobj_pool: pool of uvm_aobj structures
165  */
166 
167 struct pool uvm_aobj_pool;
168 
169 /*
170  * local functions
171  */
172 
173 static struct uao_swhash_elt	*uao_find_swhash_elt __P((struct uvm_aobj *,
174 							  int, boolean_t));
175 static int			 uao_find_swslot __P((struct uvm_aobj *, int));
176 static boolean_t		 uao_flush __P((struct uvm_object *,
177 						voff_t, voff_t, int));
178 static void			 uao_free __P((struct uvm_aobj *));
179 static int			 uao_get __P((struct uvm_object *, voff_t,
180 					      vm_page_t *, int *, int,
181 					      vm_prot_t, int, int));
182 static boolean_t		 uao_releasepg __P((struct vm_page *,
183 						    struct vm_page **));
184 static boolean_t		 uao_pagein __P((struct uvm_aobj *, int, int));
185 static boolean_t		 uao_pagein_page __P((struct uvm_aobj *, int));
186 
187 /*
188  * aobj_pager
189  *
190  * note that some functions (e.g. put) are handled elsewhere
191  */
192 
193 struct uvm_pagerops aobj_pager = {
194 	NULL,			/* init */
195 	uao_reference,		/* reference */
196 	uao_detach,		/* detach */
197 	NULL,			/* fault */
198 	uao_flush,		/* flush */
199 	uao_get,		/* get */
200 	NULL,			/* put (done by pagedaemon) */
201 	NULL,			/* cluster */
202 	NULL,			/* mk_pcluster */
203 	uao_releasepg		/* releasepg */
204 };
205 
206 /*
207  * uao_list: global list of active aobjs, locked by uao_list_lock
208  */
209 
210 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
211 static simple_lock_data_t uao_list_lock;
212 
213 
214 /*
215  * functions
216  */
217 
218 /*
219  * hash table/array related functions
220  */
221 
222 /*
223  * uao_find_swhash_elt: find (or create) a hash table entry for a page
224  * offset.
225  *
226  * => the object should be locked by the caller
227  */
228 
229 static struct uao_swhash_elt *
230 uao_find_swhash_elt(aobj, pageidx, create)
231 	struct uvm_aobj *aobj;
232 	int pageidx;
233 	boolean_t create;
234 {
235 	struct uao_swhash *swhash;
236 	struct uao_swhash_elt *elt;
237 	voff_t page_tag;
238 
239 	swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
240 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);	/* tag to search for */
241 
242 	/*
243 	 * now search the bucket for the requested tag
244 	 */
245 	LIST_FOREACH(elt, swhash, list) {
246 		if (elt->tag == page_tag)
247 			return(elt);
248 	}
249 
250 	/* fail now if we are not allowed to create a new entry in the bucket */
251 	if (!create)
252 		return NULL;
253 
254 
255 	/*
256 	 * allocate a new entry for the bucket and init/insert it in
257 	 */
258 	elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
259 	LIST_INSERT_HEAD(swhash, elt, list);
260 	elt->tag = page_tag;
261 	elt->count = 0;
262 	memset(elt->slots, 0, sizeof(elt->slots));
263 
264 	return(elt);
265 }
266 
267 /*
268  * uao_find_swslot: find the swap slot number for an aobj/pageidx
269  *
270  * => object must be locked by caller
271  */
272 __inline static int
273 uao_find_swslot(aobj, pageidx)
274 	struct uvm_aobj *aobj;
275 	int pageidx;
276 {
277 
278 	/*
279 	 * if noswap flag is set, then we never return a slot
280 	 */
281 
282 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
283 		return(0);
284 
285 	/*
286 	 * if hashing, look in hash table.
287 	 */
288 
289 	if (UAO_USES_SWHASH(aobj)) {
290 		struct uao_swhash_elt *elt =
291 		    uao_find_swhash_elt(aobj, pageidx, FALSE);
292 
293 		if (elt)
294 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
295 		else
296 			return(0);
297 	}
298 
299 	/*
300 	 * otherwise, look in the array
301 	 */
302 	return(aobj->u_swslots[pageidx]);
303 }
304 
305 /*
306  * uao_set_swslot: set the swap slot for a page in an aobj.
307  *
308  * => setting a slot to zero frees the slot
309  * => object must be locked by caller
310  */
311 int
312 uao_set_swslot(uobj, pageidx, slot)
313 	struct uvm_object *uobj;
314 	int pageidx, slot;
315 {
316 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
317 	int oldslot;
318 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
319 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
320 	    aobj, pageidx, slot, 0);
321 
322 	/*
323 	 * if noswap flag is set, then we can't set a slot
324 	 */
325 
326 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
327 
328 		if (slot == 0)
329 			return(0);		/* a clear is ok */
330 
331 		/* but a set is not */
332 		printf("uao_set_swslot: uobj = %p\n", uobj);
333 	    panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
334 	}
335 
336 	/*
337 	 * are we using a hash table?  if so, add it in the hash.
338 	 */
339 
340 	if (UAO_USES_SWHASH(aobj)) {
341 		/*
342 		 * Avoid allocating an entry just to free it again if
343 		 * the page had not swap slot in the first place, and
344 		 * we are freeing.
345 		 */
346 		struct uao_swhash_elt *elt =
347 		    uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
348 		if (elt == NULL) {
349 #ifdef DIAGNOSTIC
350 			if (slot)
351 				panic("uao_set_swslot: didn't create elt");
352 #endif
353 			return (0);
354 		}
355 
356 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
357 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
358 
359 		/*
360 		 * now adjust the elt's reference counter and free it if we've
361 		 * dropped it to zero.
362 		 */
363 
364 		/* an allocation? */
365 		if (slot) {
366 			if (oldslot == 0)
367 				elt->count++;
368 		} else {		/* freeing slot ... */
369 			if (oldslot)	/* to be safe */
370 				elt->count--;
371 
372 			if (elt->count == 0) {
373 				LIST_REMOVE(elt, list);
374 				pool_put(&uao_swhash_elt_pool, elt);
375 			}
376 		}
377 	} else {
378 		/* we are using an array */
379 		oldslot = aobj->u_swslots[pageidx];
380 		aobj->u_swslots[pageidx] = slot;
381 	}
382 	return (oldslot);
383 }
384 
385 /*
386  * end of hash/array functions
387  */
388 
389 /*
390  * uao_free: free all resources held by an aobj, and then free the aobj
391  *
392  * => the aobj should be dead
393  */
394 static void
395 uao_free(aobj)
396 	struct uvm_aobj *aobj;
397 {
398 
399 	simple_unlock(&aobj->u_obj.vmobjlock);
400 
401 	if (UAO_USES_SWHASH(aobj)) {
402 		int i, hashbuckets = aobj->u_swhashmask + 1;
403 
404 		/*
405 		 * free the swslots from each hash bucket,
406 		 * then the hash bucket, and finally the hash table itself.
407 		 */
408 		for (i = 0; i < hashbuckets; i++) {
409 			struct uao_swhash_elt *elt, *next;
410 
411 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
412 			     elt != NULL;
413 			     elt = next) {
414 				int j;
415 
416 				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
417 					int slot = elt->slots[j];
418 
419 					if (slot == 0) {
420 						continue;
421 					}
422 					uvm_swap_free(slot, 1);
423 
424 					/*
425 					 * this page is no longer
426 					 * only in swap.
427 					 */
428 					simple_lock(&uvm.swap_data_lock);
429 					uvmexp.swpgonly--;
430 					simple_unlock(&uvm.swap_data_lock);
431 				}
432 
433 				next = LIST_NEXT(elt, list);
434 				pool_put(&uao_swhash_elt_pool, elt);
435 			}
436 		}
437 		free(aobj->u_swhash, M_UVMAOBJ);
438 	} else {
439 		int i;
440 
441 		/*
442 		 * free the array
443 		 */
444 
445 		for (i = 0; i < aobj->u_pages; i++) {
446 			int slot = aobj->u_swslots[i];
447 
448 			if (slot) {
449 				uvm_swap_free(slot, 1);
450 
451 				/* this page is no longer only in swap. */
452 				simple_lock(&uvm.swap_data_lock);
453 				uvmexp.swpgonly--;
454 				simple_unlock(&uvm.swap_data_lock);
455 			}
456 		}
457 		free(aobj->u_swslots, M_UVMAOBJ);
458 	}
459 
460 	/*
461 	 * finally free the aobj itself
462 	 */
463 	pool_put(&uvm_aobj_pool, aobj);
464 }
465 
466 /*
467  * pager functions
468  */
469 
470 /*
471  * uao_create: create an aobj of the given size and return its uvm_object.
472  *
473  * => for normal use, flags are always zero
474  * => for the kernel object, the flags are:
475  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
476  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
477  */
478 struct uvm_object *
479 uao_create(size, flags)
480 	vsize_t size;
481 	int flags;
482 {
483 	static struct uvm_aobj kernel_object_store; /* home of kernel_object */
484 	static int kobj_alloced = 0;			/* not allocated yet */
485 	int pages = round_page(size) >> PAGE_SHIFT;
486 	struct uvm_aobj *aobj;
487 
488 	/*
489 	 * malloc a new aobj unless we are asked for the kernel object
490 	 */
491 	if (flags & UAO_FLAG_KERNOBJ) {		/* want kernel object? */
492 		if (kobj_alloced)
493 			panic("uao_create: kernel object already allocated");
494 
495 		aobj = &kernel_object_store;
496 		aobj->u_pages = pages;
497 		aobj->u_flags = UAO_FLAG_NOSWAP;	/* no swap to start */
498 		/* we are special, we never die */
499 		aobj->u_obj.uo_refs = UVM_OBJ_KERN;
500 		kobj_alloced = UAO_FLAG_KERNOBJ;
501 	} else if (flags & UAO_FLAG_KERNSWAP) {
502 		aobj = &kernel_object_store;
503 		if (kobj_alloced != UAO_FLAG_KERNOBJ)
504 		    panic("uao_create: asked to enable swap on kernel object");
505 		kobj_alloced = UAO_FLAG_KERNSWAP;
506 	} else {	/* normal object */
507 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
508 		aobj->u_pages = pages;
509 		aobj->u_flags = 0;		/* normal object */
510 		aobj->u_obj.uo_refs = 1;	/* start with 1 reference */
511 	}
512 
513 	/*
514  	 * allocate hash/array if necessary
515  	 *
516  	 * note: in the KERNSWAP case no need to worry about locking since
517  	 * we are still booting we should be the only thread around.
518  	 */
519 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
520 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
521 		    M_NOWAIT : M_WAITOK;
522 
523 		/* allocate hash table or array depending on object size */
524 		if (UAO_USES_SWHASH(aobj)) {
525 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
526 			    HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
527 			if (aobj->u_swhash == NULL)
528 				panic("uao_create: hashinit swhash failed");
529 		} else {
530 			aobj->u_swslots = malloc(pages * sizeof(int),
531 			    M_UVMAOBJ, mflags);
532 			if (aobj->u_swslots == NULL)
533 				panic("uao_create: malloc swslots failed");
534 			memset(aobj->u_swslots, 0, pages * sizeof(int));
535 		}
536 
537 		if (flags) {
538 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
539 			return(&aobj->u_obj);
540 			/* done! */
541 		}
542 	}
543 
544 	/*
545  	 * init aobj fields
546  	 */
547 	simple_lock_init(&aobj->u_obj.vmobjlock);
548 	aobj->u_obj.pgops = &aobj_pager;
549 	TAILQ_INIT(&aobj->u_obj.memq);
550 	aobj->u_obj.uo_npages = 0;
551 
552 	/*
553  	 * now that aobj is ready, add it to the global list
554  	 */
555 	simple_lock(&uao_list_lock);
556 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
557 	simple_unlock(&uao_list_lock);
558 
559 	/*
560  	 * done!
561  	 */
562 	return(&aobj->u_obj);
563 }
564 
565 
566 
567 /*
568  * uao_init: set up aobj pager subsystem
569  *
570  * => called at boot time from uvm_pager_init()
571  */
572 void
573 uao_init()
574 {
575 	static int uao_initialized;
576 
577 	if (uao_initialized)
578 		return;
579 	uao_initialized = TRUE;
580 
581 	LIST_INIT(&uao_list);
582 	simple_lock_init(&uao_list_lock);
583 
584 	/*
585 	 * NOTE: Pages fror this pool must not come from a pageable
586 	 * kernel map!
587 	 */
588 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
589 	    0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
590 
591 	pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
592 	    "aobjpl", 0,
593 	    pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
594 }
595 
596 /*
597  * uao_reference: add a ref to an aobj
598  *
599  * => aobj must be unlocked
600  * => just lock it and call the locked version
601  */
602 void
603 uao_reference(uobj)
604 	struct uvm_object *uobj;
605 {
606 	simple_lock(&uobj->vmobjlock);
607 	uao_reference_locked(uobj);
608 	simple_unlock(&uobj->vmobjlock);
609 }
610 
611 /*
612  * uao_reference_locked: add a ref to an aobj that is already locked
613  *
614  * => aobj must be locked
615  * this needs to be separate from the normal routine
616  * since sometimes we need to add a reference to an aobj when
617  * it's already locked.
618  */
619 void
620 uao_reference_locked(uobj)
621 	struct uvm_object *uobj;
622 {
623 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
624 
625 	/*
626  	 * kernel_object already has plenty of references, leave it alone.
627  	 */
628 
629 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
630 		return;
631 
632 	uobj->uo_refs++;		/* bump! */
633 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
634 		    uobj, uobj->uo_refs,0,0);
635 }
636 
637 
638 /*
639  * uao_detach: drop a reference to an aobj
640  *
641  * => aobj must be unlocked
642  * => just lock it and call the locked version
643  */
644 void
645 uao_detach(uobj)
646 	struct uvm_object *uobj;
647 {
648 	simple_lock(&uobj->vmobjlock);
649 	uao_detach_locked(uobj);
650 }
651 
652 
653 /*
654  * uao_detach_locked: drop a reference to an aobj
655  *
656  * => aobj must be locked, and is unlocked (or freed) upon return.
657  * this needs to be separate from the normal routine
658  * since sometimes we need to detach from an aobj when
659  * it's already locked.
660  */
661 void
662 uao_detach_locked(uobj)
663 	struct uvm_object *uobj;
664 {
665 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
666 	struct vm_page *pg;
667 	boolean_t busybody;
668 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
669 
670 	/*
671  	 * detaching from kernel_object is a noop.
672  	 */
673 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
674 		simple_unlock(&uobj->vmobjlock);
675 		return;
676 	}
677 
678 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
679 	uobj->uo_refs--;				/* drop ref! */
680 	if (uobj->uo_refs) {				/* still more refs? */
681 		simple_unlock(&uobj->vmobjlock);
682 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
683 		return;
684 	}
685 
686 	/*
687  	 * remove the aobj from the global list.
688  	 */
689 	simple_lock(&uao_list_lock);
690 	LIST_REMOVE(aobj, u_list);
691 	simple_unlock(&uao_list_lock);
692 
693 	/*
694  	 * free all the pages that aren't PG_BUSY,
695 	 * mark for release any that are.
696  	 */
697 	busybody = FALSE;
698 	for (pg = TAILQ_FIRST(&uobj->memq);
699 	     pg != NULL;
700 	     pg = TAILQ_NEXT(pg, listq)) {
701 		if (pg->flags & PG_BUSY) {
702 			pg->flags |= PG_RELEASED;
703 			busybody = TRUE;
704 			continue;
705 		}
706 
707 		/* zap the mappings, free the swap slot, free the page */
708 		pmap_page_protect(pg, VM_PROT_NONE);
709 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
710 		uvm_lock_pageq();
711 		uvm_pagefree(pg);
712 		uvm_unlock_pageq();
713 	}
714 
715 	/*
716  	 * if we found any busy pages, we're done for now.
717  	 * mark the aobj for death, releasepg will finish up for us.
718  	 */
719 	if (busybody) {
720 		aobj->u_flags |= UAO_FLAG_KILLME;
721 		simple_unlock(&aobj->u_obj.vmobjlock);
722 		return;
723 	}
724 
725 	/*
726  	 * finally, free the rest.
727  	 */
728 	uao_free(aobj);
729 }
730 
731 /*
732  * uao_flush: "flush" pages out of a uvm object
733  *
734  * => object should be locked by caller.  we may _unlock_ the object
735  *	if (and only if) we need to clean a page (PGO_CLEANIT).
736  *	XXXJRT Currently, however, we don't.  In the case of cleaning
737  *	XXXJRT a page, we simply just deactivate it.  Should probably
738  *	XXXJRT handle this better, in the future (although "flushing"
739  *	XXXJRT anonymous memory isn't terribly important).
740  * => if PGO_CLEANIT is not set, then we will neither unlock the object
741  *	or block.
742  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
743  *	for flushing.
744  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
745  *	that new pages are inserted on the tail end of the list.  thus,
746  *	we can make a complete pass through the object in one go by starting
747  *	at the head and working towards the tail (new pages are put in
748  *	front of us).
749  * => NOTE: we are allowed to lock the page queues, so the caller
750  *	must not be holding the lock on them [e.g. pagedaemon had
751  *	better not call us with the queues locked]
752  * => we return TRUE unless we encountered some sort of I/O error
753  *	XXXJRT currently never happens, as we never directly initiate
754  *	XXXJRT I/O
755  *
756  * comment on "cleaning" object and PG_BUSY pages:
757  *	this routine is holding the lock on the object.  the only time
758  *	that is can run into a PG_BUSY page that it does not own is if
759  *	some other process has started I/O on the page (e.g. either
760  *	a pagein or a pageout).  if the PG_BUSY page is being paged
761  *	in, then it can not be dirty (!PG_CLEAN) because no one has
762  *	had a change to modify it yet.  if the PG_BUSY page is being
763  *	paged out then it means that someone else has already started
764  *	cleaning the page for us (how nice!).  in this case, if we
765  *	have syncio specified, then after we make our pass through the
766  *	object we need to wait for the other PG_BUSY pages to clear
767  *	off (i.e. we need to do an iosync).  also note that once a
768  *	page is PG_BUSY is must stary in its object until it is un-busyed.
769  *	XXXJRT We never actually do this, as we are "flushing" anonymous
770  *	XXXJRT memory, which doesn't have persistent backing store.
771  *
772  * note on page traversal:
773  *	we can traverse the pages in an object either by going down the
774  *	linked list in "uobj->memq", or we can go over the address range
775  *	by page doing hash table lookups for each address.  depending
776  *	on how many pages are in the object it may be cheaper to do one
777  *	or the other.  we set "by_list" to true if we are using memq.
778  *	if the cost of a hash lookup was equal to the cost of the list
779  *	traversal we could compare the number of pages in the start->stop
780  *	range to the total number of pages in the object.  however, it
781  *	seems that a hash table lookup is more expensive than the linked
782  *	list traversal, so we multiply the number of pages in the
783  *	start->stop range by a penalty which we define below.
784  */
785 
786 #define	UAO_HASH_PENALTY 4	/* XXX: a guess */
787 
788 boolean_t
789 uao_flush(uobj, start, stop, flags)
790 	struct uvm_object *uobj;
791 	voff_t start, stop;
792 	int flags;
793 {
794 	struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
795 	struct vm_page *pp, *ppnext;
796 	boolean_t retval, by_list;
797 	voff_t curoff;
798 	UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
799 
800 	curoff = 0;	/* XXX: shut up gcc */
801 
802 	retval = TRUE;	/* default to success */
803 
804 	if (flags & PGO_ALLPAGES) {
805 		start = 0;
806 		stop = aobj->u_pages << PAGE_SHIFT;
807 		by_list = TRUE;		/* always go by the list */
808 	} else {
809 		start = trunc_page(start);
810 		stop = round_page(stop);
811 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
812 			printf("uao_flush: strange, got an out of range "
813 			    "flush (fixed)\n");
814 			stop = aobj->u_pages << PAGE_SHIFT;
815 		}
816 		by_list = (uobj->uo_npages <=
817 		    ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
818 	}
819 
820 	UVMHIST_LOG(maphist,
821 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
822 	    start, stop, by_list, flags);
823 
824 	/*
825 	 * Don't need to do any work here if we're not freeing
826 	 * or deactivating pages.
827 	 */
828 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
829 		UVMHIST_LOG(maphist,
830 		    "<- done (no work to do)",0,0,0,0);
831 		return (retval);
832 	}
833 
834 	/*
835 	 * now do it.  note: we must update ppnext in the body of loop or we
836 	 * will get stuck.  we need to use ppnext because we may free "pp"
837 	 * before doing the next loop.
838 	 */
839 
840 	if (by_list) {
841 		pp = uobj->memq.tqh_first;
842 	} else {
843 		curoff = start;
844 		pp = uvm_pagelookup(uobj, curoff);
845 	}
846 
847 	ppnext = NULL;	/* XXX: shut up gcc */
848 	uvm_lock_pageq();	/* page queues locked */
849 
850 	/* locked: both page queues and uobj */
851 	for ( ; (by_list && pp != NULL) ||
852 	    (!by_list && curoff < stop) ; pp = ppnext) {
853 		if (by_list) {
854 			ppnext = TAILQ_NEXT(pp, listq);
855 
856 			/* range check */
857 			if (pp->offset < start || pp->offset >= stop)
858 				continue;
859 		} else {
860 			curoff += PAGE_SIZE;
861 			if (curoff < stop)
862 				ppnext = uvm_pagelookup(uobj, curoff);
863 
864 			/* null check */
865 			if (pp == NULL)
866 				continue;
867 		}
868 
869 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
870 		/*
871 		 * XXX In these first 3 cases, we always just
872 		 * XXX deactivate the page.  We may want to
873 		 * XXX handle the different cases more specifically
874 		 * XXX in the future.
875 		 */
876 		case PGO_CLEANIT|PGO_FREE:
877 		case PGO_CLEANIT|PGO_DEACTIVATE:
878 		case PGO_DEACTIVATE:
879  deactivate_it:
880 			/* skip the page if it's loaned or wired */
881 			if (pp->loan_count != 0 ||
882 			    pp->wire_count != 0)
883 				continue;
884 
885 			/* zap all mappings for the page. */
886 			pmap_page_protect(pp, VM_PROT_NONE);
887 
888 			/* ...and deactivate the page. */
889 			uvm_pagedeactivate(pp);
890 
891 			continue;
892 
893 		case PGO_FREE:
894 			/*
895 			 * If there are multiple references to
896 			 * the object, just deactivate the page.
897 			 */
898 			if (uobj->uo_refs > 1)
899 				goto deactivate_it;
900 
901 			/* XXX skip the page if it's loaned or wired */
902 			if (pp->loan_count != 0 ||
903 			    pp->wire_count != 0)
904 				continue;
905 
906 			/*
907 			 * mark the page as released if its busy.
908 			 */
909 			if (pp->flags & PG_BUSY) {
910 				pp->flags |= PG_RELEASED;
911 				continue;
912 			}
913 
914 			/* zap all mappings for the page. */
915 			pmap_page_protect(pp, VM_PROT_NONE);
916 
917 			uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
918 			uvm_pagefree(pp);
919 
920 			continue;
921 
922 		default:
923 			panic("uao_flush: weird flags");
924 		}
925 #ifdef DIAGNOSTIC
926 		panic("uao_flush: unreachable code");
927 #endif
928 	}
929 
930 	uvm_unlock_pageq();
931 
932 	UVMHIST_LOG(maphist,
933 	    "<- done, rv=%d",retval,0,0,0);
934 	return (retval);
935 }
936 
937 /*
938  * uao_get: fetch me a page
939  *
940  * we have three cases:
941  * 1: page is resident     -> just return the page.
942  * 2: page is zero-fill    -> allocate a new page and zero it.
943  * 3: page is swapped out  -> fetch the page from swap.
944  *
945  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
946  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
947  * then we will need to return VM_PAGER_UNLOCK.
948  *
949  * => prefer map unlocked (not required)
950  * => object must be locked!  we will _unlock_ it before starting any I/O.
951  * => flags: PGO_ALLPAGES: get all of the pages
952  *           PGO_LOCKED: fault data structures are locked
953  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
954  * => NOTE: caller must check for released pages!!
955  */
956 static int
957 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
958 	struct uvm_object *uobj;
959 	voff_t offset;
960 	struct vm_page **pps;
961 	int *npagesp;
962 	int centeridx, advice, flags;
963 	vm_prot_t access_type;
964 {
965 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
966 	voff_t current_offset;
967 	vm_page_t ptmp;
968 	int lcv, gotpages, maxpages, swslot, rv, pageidx;
969 	boolean_t done;
970 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
971 
972 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
973 		    aobj, offset, flags,0);
974 
975 	/*
976  	 * get number of pages
977  	 */
978 	maxpages = *npagesp;
979 
980 	/*
981  	 * step 1: handled the case where fault data structures are locked.
982  	 */
983 
984 	if (flags & PGO_LOCKED) {
985 		/*
986  		 * step 1a: get pages that are already resident.   only do
987 		 * this if the data structures are locked (i.e. the first
988 		 * time through).
989  		 */
990 
991 		done = TRUE;	/* be optimistic */
992 		gotpages = 0;	/* # of pages we got so far */
993 
994 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
995 		    lcv++, current_offset += PAGE_SIZE) {
996 			/* do we care about this page?  if not, skip it */
997 			if (pps[lcv] == PGO_DONTCARE)
998 				continue;
999 
1000 			ptmp = uvm_pagelookup(uobj, current_offset);
1001 
1002 			/*
1003  			 * if page is new, attempt to allocate the page,
1004 			 * zero-fill'd.
1005  			 */
1006 			if (ptmp == NULL && uao_find_swslot(aobj,
1007 			    current_offset >> PAGE_SHIFT) == 0) {
1008 				ptmp = uvm_pagealloc(uobj, current_offset,
1009 				    NULL, UVM_PGA_ZERO);
1010 				if (ptmp) {
1011 					/* new page */
1012 					ptmp->flags &= ~(PG_BUSY|PG_FAKE);
1013 					ptmp->pqflags |= PQ_AOBJ;
1014 					UVM_PAGE_OWN(ptmp, NULL);
1015 				}
1016 			}
1017 
1018 			/*
1019 			 * to be useful must get a non-busy, non-released page
1020 			 */
1021 			if (ptmp == NULL ||
1022 			    (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
1023 				if (lcv == centeridx ||
1024 				    (flags & PGO_ALLPAGES) != 0)
1025 					/* need to do a wait or I/O! */
1026 					done = FALSE;
1027 					continue;
1028 			}
1029 
1030 			/*
1031 			 * useful page: busy/lock it and plug it in our
1032 			 * result array
1033 			 */
1034 			/* caller must un-busy this page */
1035 			ptmp->flags |= PG_BUSY;
1036 			UVM_PAGE_OWN(ptmp, "uao_get1");
1037 			pps[lcv] = ptmp;
1038 			gotpages++;
1039 
1040 		}	/* "for" lcv loop */
1041 
1042 		/*
1043  		 * step 1b: now we've either done everything needed or we
1044 		 * to unlock and do some waiting or I/O.
1045  		 */
1046 
1047 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
1048 
1049 		*npagesp = gotpages;
1050 		if (done)
1051 			/* bingo! */
1052 			return(VM_PAGER_OK);
1053 		else
1054 			/* EEK!   Need to unlock and I/O */
1055 			return(VM_PAGER_UNLOCK);
1056 	}
1057 
1058 	/*
1059  	 * step 2: get non-resident or busy pages.
1060  	 * object is locked.   data structures are unlocked.
1061  	 */
1062 
1063 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
1064 	    lcv++, current_offset += PAGE_SIZE) {
1065 
1066 		/*
1067 		 * - skip over pages we've already gotten or don't want
1068 		 * - skip over pages we don't _have_ to get
1069 		 */
1070 
1071 		if (pps[lcv] != NULL ||
1072 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
1073 			continue;
1074 
1075 		pageidx = current_offset >> PAGE_SHIFT;
1076 
1077 		/*
1078  		 * we have yet to locate the current page (pps[lcv]).   we
1079 		 * first look for a page that is already at the current offset.
1080 		 * if we find a page, we check to see if it is busy or
1081 		 * released.  if that is the case, then we sleep on the page
1082 		 * until it is no longer busy or released and repeat the lookup.
1083 		 * if the page we found is neither busy nor released, then we
1084 		 * busy it (so we own it) and plug it into pps[lcv].   this
1085 		 * 'break's the following while loop and indicates we are
1086 		 * ready to move on to the next page in the "lcv" loop above.
1087  		 *
1088  		 * if we exit the while loop with pps[lcv] still set to NULL,
1089 		 * then it means that we allocated a new busy/fake/clean page
1090 		 * ptmp in the object and we need to do I/O to fill in the data.
1091  		 */
1092 
1093 		/* top of "pps" while loop */
1094 		while (pps[lcv] == NULL) {
1095 			/* look for a resident page */
1096 			ptmp = uvm_pagelookup(uobj, current_offset);
1097 
1098 			/* not resident?   allocate one now (if we can) */
1099 			if (ptmp == NULL) {
1100 
1101 				ptmp = uvm_pagealloc(uobj, current_offset,
1102 				    NULL, 0);
1103 
1104 				/* out of RAM? */
1105 				if (ptmp == NULL) {
1106 					simple_unlock(&uobj->vmobjlock);
1107 					UVMHIST_LOG(pdhist,
1108 					    "sleeping, ptmp == NULL\n",0,0,0,0);
1109 					uvm_wait("uao_getpage");
1110 					simple_lock(&uobj->vmobjlock);
1111 					/* goto top of pps while loop */
1112 					continue;
1113 				}
1114 
1115 				/*
1116 				 * safe with PQ's unlocked: because we just
1117 				 * alloc'd the page
1118 				 */
1119 				ptmp->pqflags |= PQ_AOBJ;
1120 
1121 				/*
1122 				 * got new page ready for I/O.  break pps while
1123 				 * loop.  pps[lcv] is still NULL.
1124 				 */
1125 				break;
1126 			}
1127 
1128 			/* page is there, see if we need to wait on it */
1129 			if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
1130 				ptmp->flags |= PG_WANTED;
1131 				UVMHIST_LOG(pdhist,
1132 				    "sleeping, ptmp->flags 0x%x\n",
1133 				    ptmp->flags,0,0,0);
1134 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
1135 				    FALSE, "uao_get", 0);
1136 				simple_lock(&uobj->vmobjlock);
1137 				continue;	/* goto top of pps while loop */
1138 			}
1139 
1140 			/*
1141  			 * if we get here then the page has become resident and
1142 			 * unbusy between steps 1 and 2.  we busy it now (so we
1143 			 * own it) and set pps[lcv] (so that we exit the while
1144 			 * loop).
1145  			 */
1146 			/* we own it, caller must un-busy */
1147 			ptmp->flags |= PG_BUSY;
1148 			UVM_PAGE_OWN(ptmp, "uao_get2");
1149 			pps[lcv] = ptmp;
1150 		}
1151 
1152 		/*
1153  		 * if we own the valid page at the correct offset, pps[lcv] will
1154  		 * point to it.   nothing more to do except go to the next page.
1155  		 */
1156 		if (pps[lcv])
1157 			continue;			/* next lcv */
1158 
1159 		/*
1160  		 * we have a "fake/busy/clean" page that we just allocated.
1161  		 * do the needed "i/o", either reading from swap or zeroing.
1162  		 */
1163 		swslot = uao_find_swslot(aobj, pageidx);
1164 
1165 		/*
1166  		 * just zero the page if there's nothing in swap.
1167  		 */
1168 		if (swslot == 0)
1169 		{
1170 			/*
1171 			 * page hasn't existed before, just zero it.
1172 			 */
1173 			uvm_pagezero(ptmp);
1174 		} else {
1175 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
1176 			     swslot, 0,0,0);
1177 
1178 			/*
1179 			 * page in the swapped-out page.
1180 			 * unlock object for i/o, relock when done.
1181 			 */
1182 			simple_unlock(&uobj->vmobjlock);
1183 			rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1184 			simple_lock(&uobj->vmobjlock);
1185 
1186 			/*
1187 			 * I/O done.  check for errors.
1188 			 */
1189 			if (rv != VM_PAGER_OK)
1190 			{
1191 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
1192 				    rv,0,0,0);
1193 				if (ptmp->flags & PG_WANTED)
1194 					wakeup(ptmp);
1195 
1196 				/*
1197 				 * remove the swap slot from the aobj
1198 				 * and mark the aobj as having no real slot.
1199 				 * don't free the swap slot, thus preventing
1200 				 * it from being used again.
1201 				 */
1202 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
1203 							SWSLOT_BAD);
1204 				uvm_swap_markbad(swslot, 1);
1205 
1206 				ptmp->flags &= ~(PG_WANTED|PG_BUSY);
1207 				UVM_PAGE_OWN(ptmp, NULL);
1208 				uvm_lock_pageq();
1209 				uvm_pagefree(ptmp);
1210 				uvm_unlock_pageq();
1211 
1212 				simple_unlock(&uobj->vmobjlock);
1213 				return (rv);
1214 			}
1215 		}
1216 
1217 		/*
1218  		 * we got the page!   clear the fake flag (indicates valid
1219 		 * data now in page) and plug into our result array.   note
1220 		 * that page is still busy.
1221  		 *
1222  		 * it is the callers job to:
1223  		 * => check if the page is released
1224  		 * => unbusy the page
1225  		 * => activate the page
1226  		 */
1227 
1228 		ptmp->flags &= ~PG_FAKE;		/* data is valid ... */
1229 		pmap_clear_modify(ptmp);		/* ... and clean */
1230 		pps[lcv] = ptmp;
1231 
1232 	}	/* lcv loop */
1233 
1234 	/*
1235  	 * finally, unlock object and return.
1236  	 */
1237 
1238 	simple_unlock(&uobj->vmobjlock);
1239 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1240 	return(VM_PAGER_OK);
1241 }
1242 
1243 /*
1244  * uao_releasepg: handle released page in an aobj
1245  *
1246  * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
1247  *      to dispose of.
1248  * => caller must handle PG_WANTED case
1249  * => called with page's object locked, pageq's unlocked
1250  * => returns TRUE if page's object is still alive, FALSE if we
1251  *      killed the page's object.    if we return TRUE, then we
1252  *      return with the object locked.
1253  * => if (nextpgp != NULL) => we return the next page on the queue, and return
1254  *                              with the page queues locked [for pagedaemon]
1255  * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
1256  * => we kill the aobj if it is not referenced and we are suppose to
1257  *      kill it ("KILLME").
1258  */
1259 static boolean_t
1260 uao_releasepg(pg, nextpgp)
1261 	struct vm_page *pg;
1262 	struct vm_page **nextpgp;	/* OUT */
1263 {
1264 	struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
1265 
1266 #ifdef DIAGNOSTIC
1267 	if ((pg->flags & PG_RELEASED) == 0)
1268 		panic("uao_releasepg: page not released!");
1269 #endif
1270 
1271 	/*
1272  	 * dispose of the page [caller handles PG_WANTED] and swap slot.
1273  	 */
1274 	pmap_page_protect(pg, VM_PROT_NONE);
1275 	uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
1276 	uvm_lock_pageq();
1277 	if (nextpgp)
1278 		*nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */
1279 	uvm_pagefree(pg);
1280 	if (!nextpgp)
1281 		uvm_unlock_pageq();		/* keep locked for daemon */
1282 
1283 	/*
1284  	 * if we're not killing the object, we're done.
1285  	 */
1286 	if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
1287 		return TRUE;
1288 	KASSERT(aobj->u_obj.uo_refs == 0);
1289 
1290 	/*
1291  	 * if there are still pages in the object, we're done for now.
1292  	 */
1293 	if (aobj->u_obj.uo_npages != 0)
1294 		return TRUE;
1295 
1296 #ifdef DIAGNOSTIC
1297 	if (TAILQ_FIRST(&aobj->u_obj.memq))
1298 		panic("uvn_releasepg: pages in object with npages == 0");
1299 #endif
1300 
1301 	/*
1302  	 * finally, free the rest.
1303  	 */
1304 	uao_free(aobj);
1305 
1306 	return FALSE;
1307 }
1308 
1309 
1310 /*
1311  * uao_dropswap:  release any swap resources from this aobj page.
1312  *
1313  * => aobj must be locked or have a reference count of 0.
1314  */
1315 
1316 void
1317 uao_dropswap(uobj, pageidx)
1318 	struct uvm_object *uobj;
1319 	int pageidx;
1320 {
1321 	int slot;
1322 
1323 	slot = uao_set_swslot(uobj, pageidx, 0);
1324 	if (slot) {
1325 		uvm_swap_free(slot, 1);
1326 	}
1327 }
1328 
1329 
1330 /*
1331  * page in every page in every aobj that is paged-out to a range of swslots.
1332  *
1333  * => nothing should be locked.
1334  * => returns TRUE if pagein was aborted due to lack of memory.
1335  */
1336 boolean_t
1337 uao_swap_off(startslot, endslot)
1338 	int startslot, endslot;
1339 {
1340 	struct uvm_aobj *aobj, *nextaobj;
1341 
1342 	/*
1343 	 * walk the list of all aobjs.
1344 	 */
1345 
1346 restart:
1347 	simple_lock(&uao_list_lock);
1348 
1349 	for (aobj = LIST_FIRST(&uao_list);
1350 	     aobj != NULL;
1351 	     aobj = nextaobj) {
1352 		boolean_t rv;
1353 
1354 		/*
1355 		 * try to get the object lock,
1356 		 * start all over if we fail.
1357 		 * most of the time we'll get the aobj lock,
1358 		 * so this should be a rare case.
1359 		 */
1360 		if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
1361 			simple_unlock(&uao_list_lock);
1362 			goto restart;
1363 		}
1364 
1365 		/*
1366 		 * add a ref to the aobj so it doesn't disappear
1367 		 * while we're working.
1368 		 */
1369 		uao_reference_locked(&aobj->u_obj);
1370 
1371 		/*
1372 		 * now it's safe to unlock the uao list.
1373 		 */
1374 		simple_unlock(&uao_list_lock);
1375 
1376 		/*
1377 		 * page in any pages in the swslot range.
1378 		 * if there's an error, abort and return the error.
1379 		 */
1380 		rv = uao_pagein(aobj, startslot, endslot);
1381 		if (rv) {
1382 			uao_detach_locked(&aobj->u_obj);
1383 			return rv;
1384 		}
1385 
1386 		/*
1387 		 * we're done with this aobj.
1388 		 * relock the list and drop our ref on the aobj.
1389 		 */
1390 		simple_lock(&uao_list_lock);
1391 		nextaobj = LIST_NEXT(aobj, u_list);
1392 		uao_detach_locked(&aobj->u_obj);
1393 	}
1394 
1395 	/*
1396 	 * done with traversal, unlock the list
1397 	 */
1398 	simple_unlock(&uao_list_lock);
1399 	return FALSE;
1400 }
1401 
1402 
1403 /*
1404  * page in any pages from aobj in the given range.
1405  *
1406  * => aobj must be locked and is returned locked.
1407  * => returns TRUE if pagein was aborted due to lack of memory.
1408  */
1409 static boolean_t
1410 uao_pagein(aobj, startslot, endslot)
1411 	struct uvm_aobj *aobj;
1412 	int startslot, endslot;
1413 {
1414 	boolean_t rv;
1415 
1416 	if (UAO_USES_SWHASH(aobj)) {
1417 		struct uao_swhash_elt *elt;
1418 		int bucket;
1419 
1420 restart:
1421 		for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
1422 			for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
1423 			     elt != NULL;
1424 			     elt = LIST_NEXT(elt, list)) {
1425 				int i;
1426 
1427 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1428 					int slot = elt->slots[i];
1429 
1430 					/*
1431 					 * if the slot isn't in range, skip it.
1432 					 */
1433 					if (slot < startslot ||
1434 					    slot >= endslot) {
1435 						continue;
1436 					}
1437 
1438 					/*
1439 					 * process the page,
1440 					 * the start over on this object
1441 					 * since the swhash elt
1442 					 * may have been freed.
1443 					 */
1444 					rv = uao_pagein_page(aobj,
1445 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1446 					if (rv) {
1447 						return rv;
1448 					}
1449 					goto restart;
1450 				}
1451 			}
1452 		}
1453 	} else {
1454 		int i;
1455 
1456 		for (i = 0; i < aobj->u_pages; i++) {
1457 			int slot = aobj->u_swslots[i];
1458 
1459 			/*
1460 			 * if the slot isn't in range, skip it
1461 			 */
1462 			if (slot < startslot || slot >= endslot) {
1463 				continue;
1464 			}
1465 
1466 			/*
1467 			 * process the page.
1468 			 */
1469 			rv = uao_pagein_page(aobj, i);
1470 			if (rv) {
1471 				return rv;
1472 			}
1473 		}
1474 	}
1475 
1476 	return FALSE;
1477 }
1478 
1479 /*
1480  * page in a page from an aobj.  used for swap_off.
1481  * returns TRUE if pagein was aborted due to lack of memory.
1482  *
1483  * => aobj must be locked and is returned locked.
1484  */
1485 static boolean_t
1486 uao_pagein_page(aobj, pageidx)
1487 	struct uvm_aobj *aobj;
1488 	int pageidx;
1489 {
1490 	struct vm_page *pg;
1491 	int rv, slot, npages;
1492 
1493 	pg = NULL;
1494 	npages = 1;
1495 	/* locked: aobj */
1496 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
1497 		     &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
1498 	/* unlocked: aobj */
1499 
1500 	/*
1501 	 * relock and finish up.
1502 	 */
1503 	simple_lock(&aobj->u_obj.vmobjlock);
1504 
1505 	switch (rv) {
1506 	case VM_PAGER_OK:
1507 		break;
1508 
1509 	case VM_PAGER_ERROR:
1510 	case VM_PAGER_REFAULT:
1511 		/*
1512 		 * nothing more to do on errors.
1513 		 * VM_PAGER_REFAULT can only mean that the anon was freed,
1514 		 * so again there's nothing to do.
1515 		 */
1516 		return FALSE;
1517 
1518 #ifdef DIAGNOSTIC
1519 	default:
1520 		panic("uao_pagein_page: uao_get -> %d\n", rv);
1521 #endif
1522 	}
1523 
1524 #ifdef DIAGNOSTIC
1525 	/*
1526 	 * this should never happen, since we have a reference on the aobj.
1527 	 */
1528 	if (pg->flags & PG_RELEASED) {
1529 		panic("uao_pagein_page: found PG_RELEASED page?\n");
1530 	}
1531 #endif
1532 
1533 	/*
1534 	 * ok, we've got the page now.
1535 	 * mark it as dirty, clear its swslot and un-busy it.
1536 	 */
1537 	slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
1538 	uvm_swap_free(slot, 1);
1539 	pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE);
1540 	UVM_PAGE_OWN(pg, NULL);
1541 
1542 	/*
1543 	 * deactivate the page (to put it on a page queue).
1544 	 */
1545 	pmap_clear_reference(pg);
1546 	pmap_page_protect(pg, VM_PROT_NONE);
1547 	uvm_lock_pageq();
1548 	uvm_pagedeactivate(pg);
1549 	uvm_unlock_pageq();
1550 
1551 	return FALSE;
1552 }
1553