xref: /openbsd-src/sys/uvm/uvm_aobj.c (revision 3a3fbb3f2e2521ab7c4a56b7ff7462ebd9095ec5)
1 /*	$OpenBSD: uvm_aobj.c,v 1.24 2001/12/19 08:58:07 art Exp $	*/
2 /*	$NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
6  *                    Washington University.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Charles D. Cranor and
20  *      Washington University.
21  * 4. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
36  */
37 /*
38  * uvm_aobj.c: anonymous memory uvm_object pager
39  *
40  * author: Chuck Silvers <chuq@chuq.com>
41  * started: Jan-1998
42  *
43  * - design mostly from Chuck Cranor
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/malloc.h>
50 #include <sys/kernel.h>
51 #include <sys/pool.h>
52 #include <sys/kernel.h>
53 
54 #include <uvm/uvm.h>
55 
56 /*
57  * an aobj manages anonymous-memory backed uvm_objects.   in addition
58  * to keeping the list of resident pages, it also keeps a list of
59  * allocated swap blocks.  depending on the size of the aobj this list
60  * of allocated swap blocks is either stored in an array (small objects)
61  * or in a hash table (large objects).
62  */
63 
64 /*
65  * local structures
66  */
67 
68 /*
69  * for hash tables, we break the address space of the aobj into blocks
70  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
71  * be a power of two.
72  */
73 
74 #define UAO_SWHASH_CLUSTER_SHIFT 4
75 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
76 
77 /* get the "tag" for this page index */
78 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
79 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
80 
81 /* given an ELT and a page index, find the swap slot */
82 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
83 	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
84 
85 /* given an ELT, return its pageidx base */
86 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
87 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
88 
89 /*
90  * the swhash hash function
91  */
92 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
93 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
94 			    & (AOBJ)->u_swhashmask)])
95 
96 /*
97  * the swhash threshhold determines if we will use an array or a
98  * hash table to store the list of allocated swap blocks.
99  */
100 
101 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
102 #define UAO_USES_SWHASH(AOBJ) \
103 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
104 
105 /*
106  * the number of buckets in a swhash, with an upper bound
107  */
108 #define UAO_SWHASH_MAXBUCKETS 256
109 #define UAO_SWHASH_BUCKETS(AOBJ) \
110 	(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
111 	     UAO_SWHASH_MAXBUCKETS))
112 
113 
114 /*
115  * uao_swhash_elt: when a hash table is being used, this structure defines
116  * the format of an entry in the bucket list.
117  */
118 
119 struct uao_swhash_elt {
120 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
121 	voff_t tag;				/* our 'tag' */
122 	int count;				/* our number of active slots */
123 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
124 };
125 
126 /*
127  * uao_swhash: the swap hash table structure
128  */
129 
130 LIST_HEAD(uao_swhash, uao_swhash_elt);
131 
132 /*
133  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
134  */
135 
136 struct pool uao_swhash_elt_pool;
137 
138 /*
139  * uvm_aobj: the actual anon-backed uvm_object
140  *
141  * => the uvm_object is at the top of the structure, this allows
142  *   (struct uvm_device *) == (struct uvm_object *)
143  * => only one of u_swslots and u_swhash is used in any given aobj
144  */
145 
146 struct uvm_aobj {
147 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
148 	int u_pages;		 /* number of pages in entire object */
149 	int u_flags;		 /* the flags (see uvm_aobj.h) */
150 	int *u_swslots;		 /* array of offset->swapslot mappings */
151 				 /*
152 				  * hashtable of offset->swapslot mappings
153 				  * (u_swhash is an array of bucket heads)
154 				  */
155 	struct uao_swhash *u_swhash;
156 	u_long u_swhashmask;		/* mask for hashtable */
157 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
158 };
159 
160 /*
161  * uvm_aobj_pool: pool of uvm_aobj structures
162  */
163 
164 struct pool uvm_aobj_pool;
165 
166 /*
167  * local functions
168  */
169 
170 static struct uao_swhash_elt	*uao_find_swhash_elt __P((struct uvm_aobj *,
171 							  int, boolean_t));
172 static int			 uao_find_swslot __P((struct uvm_aobj *, int));
173 static boolean_t		 uao_flush __P((struct uvm_object *,
174 						voff_t, voff_t, int));
175 static void			 uao_free __P((struct uvm_aobj *));
176 static int			 uao_get __P((struct uvm_object *, voff_t,
177 					      vm_page_t *, int *, int,
178 					      vm_prot_t, int, int));
179 static boolean_t		 uao_releasepg __P((struct vm_page *,
180 						    struct vm_page **));
181 static boolean_t		 uao_pagein __P((struct uvm_aobj *, int, int));
182 static boolean_t		 uao_pagein_page __P((struct uvm_aobj *, int));
183 
184 /*
185  * aobj_pager
186  *
187  * note that some functions (e.g. put) are handled elsewhere
188  */
189 
190 struct uvm_pagerops aobj_pager = {
191 	NULL,			/* init */
192 	uao_reference,		/* reference */
193 	uao_detach,		/* detach */
194 	NULL,			/* fault */
195 	uao_flush,		/* flush */
196 	uao_get,		/* get */
197 	NULL,			/* put (done by pagedaemon) */
198 	NULL,			/* cluster */
199 	NULL,			/* mk_pcluster */
200 	uao_releasepg		/* releasepg */
201 };
202 
203 /*
204  * uao_list: global list of active aobjs, locked by uao_list_lock
205  */
206 
207 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
208 static simple_lock_data_t uao_list_lock;
209 
210 
211 /*
212  * functions
213  */
214 
215 /*
216  * hash table/array related functions
217  */
218 
219 /*
220  * uao_find_swhash_elt: find (or create) a hash table entry for a page
221  * offset.
222  *
223  * => the object should be locked by the caller
224  */
225 
226 static struct uao_swhash_elt *
227 uao_find_swhash_elt(aobj, pageidx, create)
228 	struct uvm_aobj *aobj;
229 	int pageidx;
230 	boolean_t create;
231 {
232 	struct uao_swhash *swhash;
233 	struct uao_swhash_elt *elt;
234 	voff_t page_tag;
235 
236 	swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
237 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);	/* tag to search for */
238 
239 	/*
240 	 * now search the bucket for the requested tag
241 	 */
242 	LIST_FOREACH(elt, swhash, list) {
243 		if (elt->tag == page_tag)
244 			return(elt);
245 	}
246 
247 	/* fail now if we are not allowed to create a new entry in the bucket */
248 	if (!create)
249 		return NULL;
250 
251 
252 	/*
253 	 * allocate a new entry for the bucket and init/insert it in
254 	 */
255 	elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
256 	LIST_INSERT_HEAD(swhash, elt, list);
257 	elt->tag = page_tag;
258 	elt->count = 0;
259 	memset(elt->slots, 0, sizeof(elt->slots));
260 
261 	return(elt);
262 }
263 
264 /*
265  * uao_find_swslot: find the swap slot number for an aobj/pageidx
266  *
267  * => object must be locked by caller
268  */
269 __inline static int
270 uao_find_swslot(aobj, pageidx)
271 	struct uvm_aobj *aobj;
272 	int pageidx;
273 {
274 
275 	/*
276 	 * if noswap flag is set, then we never return a slot
277 	 */
278 
279 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
280 		return(0);
281 
282 	/*
283 	 * if hashing, look in hash table.
284 	 */
285 
286 	if (UAO_USES_SWHASH(aobj)) {
287 		struct uao_swhash_elt *elt =
288 		    uao_find_swhash_elt(aobj, pageidx, FALSE);
289 
290 		if (elt)
291 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
292 		else
293 			return(0);
294 	}
295 
296 	/*
297 	 * otherwise, look in the array
298 	 */
299 	return(aobj->u_swslots[pageidx]);
300 }
301 
302 /*
303  * uao_set_swslot: set the swap slot for a page in an aobj.
304  *
305  * => setting a slot to zero frees the slot
306  * => object must be locked by caller
307  */
308 int
309 uao_set_swslot(uobj, pageidx, slot)
310 	struct uvm_object *uobj;
311 	int pageidx, slot;
312 {
313 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
314 	int oldslot;
315 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
316 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
317 	    aobj, pageidx, slot, 0);
318 
319 	/*
320 	 * if noswap flag is set, then we can't set a slot
321 	 */
322 
323 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
324 
325 		if (slot == 0)
326 			return(0);		/* a clear is ok */
327 
328 		/* but a set is not */
329 		printf("uao_set_swslot: uobj = %p\n", uobj);
330 	    panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
331 	}
332 
333 	/*
334 	 * are we using a hash table?  if so, add it in the hash.
335 	 */
336 
337 	if (UAO_USES_SWHASH(aobj)) {
338 
339 		/*
340 		 * Avoid allocating an entry just to free it again if
341 		 * the page had not swap slot in the first place, and
342 		 * we are freeing.
343 		 */
344 
345 		struct uao_swhash_elt *elt =
346 		    uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
347 		if (elt == NULL) {
348 			KASSERT(slot == 0);
349 			return (0);
350 		}
351 
352 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
353 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
354 
355 		/*
356 		 * now adjust the elt's reference counter and free it if we've
357 		 * dropped it to zero.
358 		 */
359 
360 		/* an allocation? */
361 		if (slot) {
362 			if (oldslot == 0)
363 				elt->count++;
364 		} else {		/* freeing slot ... */
365 			if (oldslot)	/* to be safe */
366 				elt->count--;
367 
368 			if (elt->count == 0) {
369 				LIST_REMOVE(elt, list);
370 				pool_put(&uao_swhash_elt_pool, elt);
371 			}
372 		}
373 	} else {
374 		/* we are using an array */
375 		oldslot = aobj->u_swslots[pageidx];
376 		aobj->u_swslots[pageidx] = slot;
377 	}
378 	return (oldslot);
379 }
380 
381 /*
382  * end of hash/array functions
383  */
384 
385 /*
386  * uao_free: free all resources held by an aobj, and then free the aobj
387  *
388  * => the aobj should be dead
389  */
390 static void
391 uao_free(aobj)
392 	struct uvm_aobj *aobj;
393 {
394 
395 	simple_unlock(&aobj->u_obj.vmobjlock);
396 
397 	if (UAO_USES_SWHASH(aobj)) {
398 		int i, hashbuckets = aobj->u_swhashmask + 1;
399 
400 		/*
401 		 * free the swslots from each hash bucket,
402 		 * then the hash bucket, and finally the hash table itself.
403 		 */
404 		for (i = 0; i < hashbuckets; i++) {
405 			struct uao_swhash_elt *elt, *next;
406 
407 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
408 			     elt != NULL;
409 			     elt = next) {
410 				int j;
411 
412 				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
413 					int slot = elt->slots[j];
414 
415 					if (slot == 0) {
416 						continue;
417 					}
418 					uvm_swap_free(slot, 1);
419 
420 					/*
421 					 * this page is no longer
422 					 * only in swap.
423 					 */
424 					simple_lock(&uvm.swap_data_lock);
425 					uvmexp.swpgonly--;
426 					simple_unlock(&uvm.swap_data_lock);
427 				}
428 
429 				next = LIST_NEXT(elt, list);
430 				pool_put(&uao_swhash_elt_pool, elt);
431 			}
432 		}
433 		free(aobj->u_swhash, M_UVMAOBJ);
434 	} else {
435 		int i;
436 
437 		/*
438 		 * free the array
439 		 */
440 
441 		for (i = 0; i < aobj->u_pages; i++) {
442 			int slot = aobj->u_swslots[i];
443 
444 			if (slot) {
445 				uvm_swap_free(slot, 1);
446 
447 				/* this page is no longer only in swap. */
448 				simple_lock(&uvm.swap_data_lock);
449 				uvmexp.swpgonly--;
450 				simple_unlock(&uvm.swap_data_lock);
451 			}
452 		}
453 		free(aobj->u_swslots, M_UVMAOBJ);
454 	}
455 
456 	/*
457 	 * finally free the aobj itself
458 	 */
459 	pool_put(&uvm_aobj_pool, aobj);
460 }
461 
462 /*
463  * pager functions
464  */
465 
466 /*
467  * uao_create: create an aobj of the given size and return its uvm_object.
468  *
469  * => for normal use, flags are always zero
470  * => for the kernel object, the flags are:
471  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
472  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
473  */
474 struct uvm_object *
475 uao_create(size, flags)
476 	vsize_t size;
477 	int flags;
478 {
479 	static struct uvm_aobj kernel_object_store; /* home of kernel_object */
480 	static int kobj_alloced = 0;			/* not allocated yet */
481 	int pages = round_page(size) >> PAGE_SHIFT;
482 	struct uvm_aobj *aobj;
483 
484 	/*
485 	 * malloc a new aobj unless we are asked for the kernel object
486 	 */
487 	if (flags & UAO_FLAG_KERNOBJ) {		/* want kernel object? */
488 		if (kobj_alloced)
489 			panic("uao_create: kernel object already allocated");
490 
491 		aobj = &kernel_object_store;
492 		aobj->u_pages = pages;
493 		aobj->u_flags = UAO_FLAG_NOSWAP;	/* no swap to start */
494 		/* we are special, we never die */
495 		aobj->u_obj.uo_refs = UVM_OBJ_KERN;
496 		kobj_alloced = UAO_FLAG_KERNOBJ;
497 	} else if (flags & UAO_FLAG_KERNSWAP) {
498 		aobj = &kernel_object_store;
499 		if (kobj_alloced != UAO_FLAG_KERNOBJ)
500 		    panic("uao_create: asked to enable swap on kernel object");
501 		kobj_alloced = UAO_FLAG_KERNSWAP;
502 	} else {	/* normal object */
503 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
504 		aobj->u_pages = pages;
505 		aobj->u_flags = 0;		/* normal object */
506 		aobj->u_obj.uo_refs = 1;	/* start with 1 reference */
507 	}
508 
509 	/*
510  	 * allocate hash/array if necessary
511  	 *
512  	 * note: in the KERNSWAP case no need to worry about locking since
513  	 * we are still booting we should be the only thread around.
514  	 */
515 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
516 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
517 		    M_NOWAIT : M_WAITOK;
518 
519 		/* allocate hash table or array depending on object size */
520 		if (UAO_USES_SWHASH(aobj)) {
521 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
522 			    M_UVMAOBJ, mflags, &aobj->u_swhashmask);
523 			if (aobj->u_swhash == NULL)
524 				panic("uao_create: hashinit swhash failed");
525 		} else {
526 			aobj->u_swslots = malloc(pages * sizeof(int),
527 			    M_UVMAOBJ, mflags);
528 			if (aobj->u_swslots == NULL)
529 				panic("uao_create: malloc swslots failed");
530 			memset(aobj->u_swslots, 0, pages * sizeof(int));
531 		}
532 
533 		if (flags) {
534 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
535 			return(&aobj->u_obj);
536 			/* done! */
537 		}
538 	}
539 
540 	/*
541  	 * init aobj fields
542  	 */
543 	simple_lock_init(&aobj->u_obj.vmobjlock);
544 	aobj->u_obj.pgops = &aobj_pager;
545 	TAILQ_INIT(&aobj->u_obj.memq);
546 	aobj->u_obj.uo_npages = 0;
547 
548 	/*
549  	 * now that aobj is ready, add it to the global list
550  	 */
551 	simple_lock(&uao_list_lock);
552 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
553 	simple_unlock(&uao_list_lock);
554 
555 	/*
556  	 * done!
557  	 */
558 	return(&aobj->u_obj);
559 }
560 
561 
562 
563 /*
564  * uao_init: set up aobj pager subsystem
565  *
566  * => called at boot time from uvm_pager_init()
567  */
568 void
569 uao_init()
570 {
571 	static int uao_initialized;
572 
573 	if (uao_initialized)
574 		return;
575 	uao_initialized = TRUE;
576 
577 	LIST_INIT(&uao_list);
578 	simple_lock_init(&uao_list_lock);
579 
580 	/*
581 	 * NOTE: Pages fror this pool must not come from a pageable
582 	 * kernel map!
583 	 */
584 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
585 	    0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
586 
587 	pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
588 	    "aobjpl", 0,
589 	    pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
590 }
591 
592 /*
593  * uao_reference: add a ref to an aobj
594  *
595  * => aobj must be unlocked
596  * => just lock it and call the locked version
597  */
598 void
599 uao_reference(uobj)
600 	struct uvm_object *uobj;
601 {
602 	simple_lock(&uobj->vmobjlock);
603 	uao_reference_locked(uobj);
604 	simple_unlock(&uobj->vmobjlock);
605 }
606 
607 /*
608  * uao_reference_locked: add a ref to an aobj that is already locked
609  *
610  * => aobj must be locked
611  * this needs to be separate from the normal routine
612  * since sometimes we need to add a reference to an aobj when
613  * it's already locked.
614  */
615 void
616 uao_reference_locked(uobj)
617 	struct uvm_object *uobj;
618 {
619 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
620 
621 	/*
622  	 * kernel_object already has plenty of references, leave it alone.
623  	 */
624 
625 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
626 		return;
627 
628 	uobj->uo_refs++;		/* bump! */
629 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
630 		    uobj, uobj->uo_refs,0,0);
631 }
632 
633 
634 /*
635  * uao_detach: drop a reference to an aobj
636  *
637  * => aobj must be unlocked
638  * => just lock it and call the locked version
639  */
640 void
641 uao_detach(uobj)
642 	struct uvm_object *uobj;
643 {
644 	simple_lock(&uobj->vmobjlock);
645 	uao_detach_locked(uobj);
646 }
647 
648 
649 /*
650  * uao_detach_locked: drop a reference to an aobj
651  *
652  * => aobj must be locked, and is unlocked (or freed) upon return.
653  * this needs to be separate from the normal routine
654  * since sometimes we need to detach from an aobj when
655  * it's already locked.
656  */
657 void
658 uao_detach_locked(uobj)
659 	struct uvm_object *uobj;
660 {
661 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
662 	struct vm_page *pg;
663 	boolean_t busybody;
664 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
665 
666 	/*
667  	 * detaching from kernel_object is a noop.
668  	 */
669 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
670 		simple_unlock(&uobj->vmobjlock);
671 		return;
672 	}
673 
674 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
675 	uobj->uo_refs--;				/* drop ref! */
676 	if (uobj->uo_refs) {				/* still more refs? */
677 		simple_unlock(&uobj->vmobjlock);
678 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
679 		return;
680 	}
681 
682 	/*
683  	 * remove the aobj from the global list.
684  	 */
685 	simple_lock(&uao_list_lock);
686 	LIST_REMOVE(aobj, u_list);
687 	simple_unlock(&uao_list_lock);
688 
689 	/*
690  	 * free all the pages that aren't PG_BUSY,
691 	 * mark for release any that are.
692  	 */
693 	busybody = FALSE;
694 	for (pg = TAILQ_FIRST(&uobj->memq);
695 	     pg != NULL;
696 	     pg = TAILQ_NEXT(pg, listq)) {
697 		if (pg->flags & PG_BUSY) {
698 			pg->flags |= PG_RELEASED;
699 			busybody = TRUE;
700 			continue;
701 		}
702 
703 		/* zap the mappings, free the swap slot, free the page */
704 		pmap_page_protect(pg, VM_PROT_NONE);
705 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
706 		uvm_lock_pageq();
707 		uvm_pagefree(pg);
708 		uvm_unlock_pageq();
709 	}
710 
711 	/*
712  	 * if we found any busy pages, we're done for now.
713  	 * mark the aobj for death, releasepg will finish up for us.
714  	 */
715 	if (busybody) {
716 		aobj->u_flags |= UAO_FLAG_KILLME;
717 		simple_unlock(&aobj->u_obj.vmobjlock);
718 		return;
719 	}
720 
721 	/*
722  	 * finally, free the rest.
723  	 */
724 	uao_free(aobj);
725 }
726 
727 /*
728  * uao_flush: "flush" pages out of a uvm object
729  *
730  * => object should be locked by caller.  we may _unlock_ the object
731  *	if (and only if) we need to clean a page (PGO_CLEANIT).
732  *	XXXJRT Currently, however, we don't.  In the case of cleaning
733  *	XXXJRT a page, we simply just deactivate it.  Should probably
734  *	XXXJRT handle this better, in the future (although "flushing"
735  *	XXXJRT anonymous memory isn't terribly important).
736  * => if PGO_CLEANIT is not set, then we will neither unlock the object
737  *	or block.
738  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
739  *	for flushing.
740  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
741  *	that new pages are inserted on the tail end of the list.  thus,
742  *	we can make a complete pass through the object in one go by starting
743  *	at the head and working towards the tail (new pages are put in
744  *	front of us).
745  * => NOTE: we are allowed to lock the page queues, so the caller
746  *	must not be holding the lock on them [e.g. pagedaemon had
747  *	better not call us with the queues locked]
748  * => we return TRUE unless we encountered some sort of I/O error
749  *	XXXJRT currently never happens, as we never directly initiate
750  *	XXXJRT I/O
751  *
752  * comment on "cleaning" object and PG_BUSY pages:
753  *	this routine is holding the lock on the object.  the only time
754  *	that is can run into a PG_BUSY page that it does not own is if
755  *	some other process has started I/O on the page (e.g. either
756  *	a pagein or a pageout).  if the PG_BUSY page is being paged
757  *	in, then it can not be dirty (!PG_CLEAN) because no one has
758  *	had a change to modify it yet.  if the PG_BUSY page is being
759  *	paged out then it means that someone else has already started
760  *	cleaning the page for us (how nice!).  in this case, if we
761  *	have syncio specified, then after we make our pass through the
762  *	object we need to wait for the other PG_BUSY pages to clear
763  *	off (i.e. we need to do an iosync).  also note that once a
764  *	page is PG_BUSY is must stary in its object until it is un-busyed.
765  *	XXXJRT We never actually do this, as we are "flushing" anonymous
766  *	XXXJRT memory, which doesn't have persistent backing store.
767  *
768  * note on page traversal:
769  *	we can traverse the pages in an object either by going down the
770  *	linked list in "uobj->memq", or we can go over the address range
771  *	by page doing hash table lookups for each address.  depending
772  *	on how many pages are in the object it may be cheaper to do one
773  *	or the other.  we set "by_list" to true if we are using memq.
774  *	if the cost of a hash lookup was equal to the cost of the list
775  *	traversal we could compare the number of pages in the start->stop
776  *	range to the total number of pages in the object.  however, it
777  *	seems that a hash table lookup is more expensive than the linked
778  *	list traversal, so we multiply the number of pages in the
779  *	start->stop range by a penalty which we define below.
780  */
781 
782 #define	UAO_HASH_PENALTY 4	/* XXX: a guess */
783 
784 boolean_t
785 uao_flush(uobj, start, stop, flags)
786 	struct uvm_object *uobj;
787 	voff_t start, stop;
788 	int flags;
789 {
790 	struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
791 	struct vm_page *pp, *ppnext;
792 	boolean_t retval, by_list;
793 	voff_t curoff;
794 	UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
795 
796 	curoff = 0;	/* XXX: shut up gcc */
797 
798 	retval = TRUE;	/* default to success */
799 
800 	if (flags & PGO_ALLPAGES) {
801 		start = 0;
802 		stop = aobj->u_pages << PAGE_SHIFT;
803 		by_list = TRUE;		/* always go by the list */
804 	} else {
805 		start = trunc_page(start);
806 		stop = round_page(stop);
807 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
808 			printf("uao_flush: strange, got an out of range "
809 			    "flush (fixed)\n");
810 			stop = aobj->u_pages << PAGE_SHIFT;
811 		}
812 		by_list = (uobj->uo_npages <=
813 		    ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
814 	}
815 
816 	UVMHIST_LOG(maphist,
817 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
818 	    start, stop, by_list, flags);
819 
820 	/*
821 	 * Don't need to do any work here if we're not freeing
822 	 * or deactivating pages.
823 	 */
824 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
825 		UVMHIST_LOG(maphist,
826 		    "<- done (no work to do)",0,0,0,0);
827 		return (retval);
828 	}
829 
830 	/*
831 	 * now do it.  note: we must update ppnext in the body of loop or we
832 	 * will get stuck.  we need to use ppnext because we may free "pp"
833 	 * before doing the next loop.
834 	 */
835 
836 	if (by_list) {
837 		pp = uobj->memq.tqh_first;
838 	} else {
839 		curoff = start;
840 		pp = uvm_pagelookup(uobj, curoff);
841 	}
842 
843 	ppnext = NULL;	/* XXX: shut up gcc */
844 	uvm_lock_pageq();	/* page queues locked */
845 
846 	/* locked: both page queues and uobj */
847 	for ( ; (by_list && pp != NULL) ||
848 	    (!by_list && curoff < stop) ; pp = ppnext) {
849 		if (by_list) {
850 			ppnext = TAILQ_NEXT(pp, listq);
851 
852 			/* range check */
853 			if (pp->offset < start || pp->offset >= stop)
854 				continue;
855 		} else {
856 			curoff += PAGE_SIZE;
857 			if (curoff < stop)
858 				ppnext = uvm_pagelookup(uobj, curoff);
859 
860 			/* null check */
861 			if (pp == NULL)
862 				continue;
863 		}
864 
865 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
866 		/*
867 		 * XXX In these first 3 cases, we always just
868 		 * XXX deactivate the page.  We may want to
869 		 * XXX handle the different cases more specifically
870 		 * XXX in the future.
871 		 */
872 		case PGO_CLEANIT|PGO_FREE:
873 		case PGO_CLEANIT|PGO_DEACTIVATE:
874 		case PGO_DEACTIVATE:
875  deactivate_it:
876 			/* skip the page if it's loaned or wired */
877 			if (pp->loan_count != 0 ||
878 			    pp->wire_count != 0)
879 				continue;
880 
881 #ifdef UBC
882 			/* ...and deactivate the page. */
883 			pmap_clear_reference(pp);
884 #else
885 			/* zap all mappings for the page. */
886 			pmap_page_protect(pp, VM_PROT_NONE);
887 
888 			/* ...and deactivate the page. */
889 #endif
890 			uvm_pagedeactivate(pp);
891 
892 			continue;
893 
894 		case PGO_FREE:
895 			/*
896 			 * If there are multiple references to
897 			 * the object, just deactivate the page.
898 			 */
899 			if (uobj->uo_refs > 1)
900 				goto deactivate_it;
901 
902 			/* XXX skip the page if it's loaned or wired */
903 			if (pp->loan_count != 0 ||
904 			    pp->wire_count != 0)
905 				continue;
906 
907 			/*
908 			 * mark the page as released if its busy.
909 			 */
910 			if (pp->flags & PG_BUSY) {
911 				pp->flags |= PG_RELEASED;
912 				continue;
913 			}
914 
915 			/* zap all mappings for the page. */
916 			pmap_page_protect(pp, VM_PROT_NONE);
917 
918 			uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
919 			uvm_pagefree(pp);
920 
921 			continue;
922 
923 		default:
924 			panic("uao_flush: weird flags");
925 		}
926 	}
927 
928 	uvm_unlock_pageq();
929 
930 	UVMHIST_LOG(maphist,
931 	    "<- done, rv=%d",retval,0,0,0);
932 	return (retval);
933 }
934 
935 /*
936  * uao_get: fetch me a page
937  *
938  * we have three cases:
939  * 1: page is resident     -> just return the page.
940  * 2: page is zero-fill    -> allocate a new page and zero it.
941  * 3: page is swapped out  -> fetch the page from swap.
942  *
943  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
944  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
945  * then we will need to return VM_PAGER_UNLOCK.
946  *
947  * => prefer map unlocked (not required)
948  * => object must be locked!  we will _unlock_ it before starting any I/O.
949  * => flags: PGO_ALLPAGES: get all of the pages
950  *           PGO_LOCKED: fault data structures are locked
951  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
952  * => NOTE: caller must check for released pages!!
953  */
954 static int
955 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
956 	struct uvm_object *uobj;
957 	voff_t offset;
958 	struct vm_page **pps;
959 	int *npagesp;
960 	int centeridx, advice, flags;
961 	vm_prot_t access_type;
962 {
963 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
964 	voff_t current_offset;
965 	vm_page_t ptmp;
966 	int lcv, gotpages, maxpages, swslot, rv, pageidx;
967 	boolean_t done;
968 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
969 
970 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
971 		    aobj, offset, flags,0);
972 
973 	/*
974  	 * get number of pages
975  	 */
976 	maxpages = *npagesp;
977 
978 	/*
979  	 * step 1: handled the case where fault data structures are locked.
980  	 */
981 
982 	if (flags & PGO_LOCKED) {
983 		/*
984  		 * step 1a: get pages that are already resident.   only do
985 		 * this if the data structures are locked (i.e. the first
986 		 * time through).
987  		 */
988 
989 		done = TRUE;	/* be optimistic */
990 		gotpages = 0;	/* # of pages we got so far */
991 
992 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
993 		    lcv++, current_offset += PAGE_SIZE) {
994 			/* do we care about this page?  if not, skip it */
995 			if (pps[lcv] == PGO_DONTCARE)
996 				continue;
997 
998 			ptmp = uvm_pagelookup(uobj, current_offset);
999 
1000 			/*
1001  			 * if page is new, attempt to allocate the page,
1002 			 * zero-fill'd.
1003  			 */
1004 			if (ptmp == NULL && uao_find_swslot(aobj,
1005 			    current_offset >> PAGE_SHIFT) == 0) {
1006 				ptmp = uvm_pagealloc(uobj, current_offset,
1007 				    NULL, UVM_PGA_ZERO);
1008 				if (ptmp) {
1009 					/* new page */
1010 					ptmp->flags &= ~(PG_BUSY|PG_FAKE);
1011 					ptmp->pqflags |= PQ_AOBJ;
1012 					UVM_PAGE_OWN(ptmp, NULL);
1013 				}
1014 			}
1015 
1016 			/*
1017 			 * to be useful must get a non-busy, non-released page
1018 			 */
1019 			if (ptmp == NULL ||
1020 			    (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
1021 				if (lcv == centeridx ||
1022 				    (flags & PGO_ALLPAGES) != 0)
1023 					/* need to do a wait or I/O! */
1024 					done = FALSE;
1025 					continue;
1026 			}
1027 
1028 			/*
1029 			 * useful page: busy/lock it and plug it in our
1030 			 * result array
1031 			 */
1032 			/* caller must un-busy this page */
1033 			ptmp->flags |= PG_BUSY;
1034 			UVM_PAGE_OWN(ptmp, "uao_get1");
1035 			pps[lcv] = ptmp;
1036 			gotpages++;
1037 
1038 		}	/* "for" lcv loop */
1039 
1040 		/*
1041  		 * step 1b: now we've either done everything needed or we
1042 		 * to unlock and do some waiting or I/O.
1043  		 */
1044 
1045 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
1046 
1047 		*npagesp = gotpages;
1048 		if (done)
1049 			/* bingo! */
1050 			return(VM_PAGER_OK);
1051 		else
1052 			/* EEK!   Need to unlock and I/O */
1053 			return(VM_PAGER_UNLOCK);
1054 	}
1055 
1056 	/*
1057  	 * step 2: get non-resident or busy pages.
1058  	 * object is locked.   data structures are unlocked.
1059  	 */
1060 
1061 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
1062 	    lcv++, current_offset += PAGE_SIZE) {
1063 
1064 		/*
1065 		 * - skip over pages we've already gotten or don't want
1066 		 * - skip over pages we don't _have_ to get
1067 		 */
1068 
1069 		if (pps[lcv] != NULL ||
1070 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
1071 			continue;
1072 
1073 		pageidx = current_offset >> PAGE_SHIFT;
1074 
1075 		/*
1076  		 * we have yet to locate the current page (pps[lcv]).   we
1077 		 * first look for a page that is already at the current offset.
1078 		 * if we find a page, we check to see if it is busy or
1079 		 * released.  if that is the case, then we sleep on the page
1080 		 * until it is no longer busy or released and repeat the lookup.
1081 		 * if the page we found is neither busy nor released, then we
1082 		 * busy it (so we own it) and plug it into pps[lcv].   this
1083 		 * 'break's the following while loop and indicates we are
1084 		 * ready to move on to the next page in the "lcv" loop above.
1085  		 *
1086  		 * if we exit the while loop with pps[lcv] still set to NULL,
1087 		 * then it means that we allocated a new busy/fake/clean page
1088 		 * ptmp in the object and we need to do I/O to fill in the data.
1089  		 */
1090 
1091 		/* top of "pps" while loop */
1092 		while (pps[lcv] == NULL) {
1093 			/* look for a resident page */
1094 			ptmp = uvm_pagelookup(uobj, current_offset);
1095 
1096 			/* not resident?   allocate one now (if we can) */
1097 			if (ptmp == NULL) {
1098 
1099 				ptmp = uvm_pagealloc(uobj, current_offset,
1100 				    NULL, 0);
1101 
1102 				/* out of RAM? */
1103 				if (ptmp == NULL) {
1104 					simple_unlock(&uobj->vmobjlock);
1105 					UVMHIST_LOG(pdhist,
1106 					    "sleeping, ptmp == NULL\n",0,0,0,0);
1107 					uvm_wait("uao_getpage");
1108 					simple_lock(&uobj->vmobjlock);
1109 					/* goto top of pps while loop */
1110 					continue;
1111 				}
1112 
1113 				/*
1114 				 * safe with PQ's unlocked: because we just
1115 				 * alloc'd the page
1116 				 */
1117 				ptmp->pqflags |= PQ_AOBJ;
1118 
1119 				/*
1120 				 * got new page ready for I/O.  break pps while
1121 				 * loop.  pps[lcv] is still NULL.
1122 				 */
1123 				break;
1124 			}
1125 
1126 			/* page is there, see if we need to wait on it */
1127 			if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
1128 				ptmp->flags |= PG_WANTED;
1129 				UVMHIST_LOG(pdhist,
1130 				    "sleeping, ptmp->flags 0x%x\n",
1131 				    ptmp->flags,0,0,0);
1132 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
1133 				    FALSE, "uao_get", 0);
1134 				simple_lock(&uobj->vmobjlock);
1135 				continue;	/* goto top of pps while loop */
1136 			}
1137 
1138 			/*
1139  			 * if we get here then the page has become resident and
1140 			 * unbusy between steps 1 and 2.  we busy it now (so we
1141 			 * own it) and set pps[lcv] (so that we exit the while
1142 			 * loop).
1143  			 */
1144 			/* we own it, caller must un-busy */
1145 			ptmp->flags |= PG_BUSY;
1146 			UVM_PAGE_OWN(ptmp, "uao_get2");
1147 			pps[lcv] = ptmp;
1148 		}
1149 
1150 		/*
1151  		 * if we own the valid page at the correct offset, pps[lcv] will
1152  		 * point to it.   nothing more to do except go to the next page.
1153  		 */
1154 		if (pps[lcv])
1155 			continue;			/* next lcv */
1156 
1157 		/*
1158  		 * we have a "fake/busy/clean" page that we just allocated.
1159  		 * do the needed "i/o", either reading from swap or zeroing.
1160  		 */
1161 		swslot = uao_find_swslot(aobj, pageidx);
1162 
1163 		/*
1164  		 * just zero the page if there's nothing in swap.
1165  		 */
1166 		if (swslot == 0)
1167 		{
1168 			/*
1169 			 * page hasn't existed before, just zero it.
1170 			 */
1171 			uvm_pagezero(ptmp);
1172 		} else {
1173 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
1174 			     swslot, 0,0,0);
1175 
1176 			/*
1177 			 * page in the swapped-out page.
1178 			 * unlock object for i/o, relock when done.
1179 			 */
1180 			simple_unlock(&uobj->vmobjlock);
1181 			rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1182 			simple_lock(&uobj->vmobjlock);
1183 
1184 			/*
1185 			 * I/O done.  check for errors.
1186 			 */
1187 			if (rv != VM_PAGER_OK)
1188 			{
1189 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
1190 				    rv,0,0,0);
1191 				if (ptmp->flags & PG_WANTED)
1192 					wakeup(ptmp);
1193 
1194 				/*
1195 				 * remove the swap slot from the aobj
1196 				 * and mark the aobj as having no real slot.
1197 				 * don't free the swap slot, thus preventing
1198 				 * it from being used again.
1199 				 */
1200 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
1201 							SWSLOT_BAD);
1202 				uvm_swap_markbad(swslot, 1);
1203 
1204 				ptmp->flags &= ~(PG_WANTED|PG_BUSY);
1205 				UVM_PAGE_OWN(ptmp, NULL);
1206 				uvm_lock_pageq();
1207 				uvm_pagefree(ptmp);
1208 				uvm_unlock_pageq();
1209 
1210 				simple_unlock(&uobj->vmobjlock);
1211 				return (rv);
1212 			}
1213 		}
1214 
1215 		/*
1216  		 * we got the page!   clear the fake flag (indicates valid
1217 		 * data now in page) and plug into our result array.   note
1218 		 * that page is still busy.
1219  		 *
1220  		 * it is the callers job to:
1221  		 * => check if the page is released
1222  		 * => unbusy the page
1223  		 * => activate the page
1224  		 */
1225 
1226 		ptmp->flags &= ~PG_FAKE;		/* data is valid ... */
1227 		pmap_clear_modify(ptmp);		/* ... and clean */
1228 		pps[lcv] = ptmp;
1229 
1230 	}	/* lcv loop */
1231 
1232 	/*
1233  	 * finally, unlock object and return.
1234  	 */
1235 
1236 	simple_unlock(&uobj->vmobjlock);
1237 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1238 	return(VM_PAGER_OK);
1239 }
1240 
1241 /*
1242  * uao_releasepg: handle released page in an aobj
1243  *
1244  * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
1245  *      to dispose of.
1246  * => caller must handle PG_WANTED case
1247  * => called with page's object locked, pageq's unlocked
1248  * => returns TRUE if page's object is still alive, FALSE if we
1249  *      killed the page's object.    if we return TRUE, then we
1250  *      return with the object locked.
1251  * => if (nextpgp != NULL) => we return the next page on the queue, and return
1252  *                              with the page queues locked [for pagedaemon]
1253  * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
1254  * => we kill the aobj if it is not referenced and we are suppose to
1255  *      kill it ("KILLME").
1256  */
1257 static boolean_t
1258 uao_releasepg(pg, nextpgp)
1259 	struct vm_page *pg;
1260 	struct vm_page **nextpgp;	/* OUT */
1261 {
1262 	struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
1263 
1264 	KASSERT(pg->flags & PG_RELEASED);
1265 
1266 	/*
1267  	 * dispose of the page [caller handles PG_WANTED] and swap slot.
1268  	 */
1269 	pmap_page_protect(pg, VM_PROT_NONE);
1270 	uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
1271 	uvm_lock_pageq();
1272 	if (nextpgp)
1273 		*nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */
1274 	uvm_pagefree(pg);
1275 	if (!nextpgp)
1276 		uvm_unlock_pageq();		/* keep locked for daemon */
1277 
1278 	/*
1279  	 * if we're not killing the object, we're done.
1280  	 */
1281 	if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
1282 		return TRUE;
1283 	KASSERT(aobj->u_obj.uo_refs == 0);
1284 
1285 	/*
1286  	 * if there are still pages in the object, we're done for now.
1287  	 */
1288 	if (aobj->u_obj.uo_npages != 0)
1289 		return TRUE;
1290 
1291 	KASSERT(TAILQ_EMPTY(&aobj->u_obj.memq));
1292 
1293 	/*
1294  	 * finally, free the rest.
1295  	 */
1296 	uao_free(aobj);
1297 
1298 	return FALSE;
1299 }
1300 
1301 
1302 /*
1303  * uao_dropswap:  release any swap resources from this aobj page.
1304  *
1305  * => aobj must be locked or have a reference count of 0.
1306  */
1307 
1308 void
1309 uao_dropswap(uobj, pageidx)
1310 	struct uvm_object *uobj;
1311 	int pageidx;
1312 {
1313 	int slot;
1314 
1315 	slot = uao_set_swslot(uobj, pageidx, 0);
1316 	if (slot) {
1317 		uvm_swap_free(slot, 1);
1318 	}
1319 }
1320 
1321 
1322 /*
1323  * page in every page in every aobj that is paged-out to a range of swslots.
1324  *
1325  * => nothing should be locked.
1326  * => returns TRUE if pagein was aborted due to lack of memory.
1327  */
1328 boolean_t
1329 uao_swap_off(startslot, endslot)
1330 	int startslot, endslot;
1331 {
1332 	struct uvm_aobj *aobj, *nextaobj;
1333 
1334 	/*
1335 	 * walk the list of all aobjs.
1336 	 */
1337 
1338 restart:
1339 	simple_lock(&uao_list_lock);
1340 
1341 	for (aobj = LIST_FIRST(&uao_list);
1342 	     aobj != NULL;
1343 	     aobj = nextaobj) {
1344 		boolean_t rv;
1345 
1346 		/*
1347 		 * try to get the object lock,
1348 		 * start all over if we fail.
1349 		 * most of the time we'll get the aobj lock,
1350 		 * so this should be a rare case.
1351 		 */
1352 		if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
1353 			simple_unlock(&uao_list_lock);
1354 			goto restart;
1355 		}
1356 
1357 		/*
1358 		 * add a ref to the aobj so it doesn't disappear
1359 		 * while we're working.
1360 		 */
1361 		uao_reference_locked(&aobj->u_obj);
1362 
1363 		/*
1364 		 * now it's safe to unlock the uao list.
1365 		 */
1366 		simple_unlock(&uao_list_lock);
1367 
1368 		/*
1369 		 * page in any pages in the swslot range.
1370 		 * if there's an error, abort and return the error.
1371 		 */
1372 		rv = uao_pagein(aobj, startslot, endslot);
1373 		if (rv) {
1374 			uao_detach_locked(&aobj->u_obj);
1375 			return rv;
1376 		}
1377 
1378 		/*
1379 		 * we're done with this aobj.
1380 		 * relock the list and drop our ref on the aobj.
1381 		 */
1382 		simple_lock(&uao_list_lock);
1383 		nextaobj = LIST_NEXT(aobj, u_list);
1384 		uao_detach_locked(&aobj->u_obj);
1385 	}
1386 
1387 	/*
1388 	 * done with traversal, unlock the list
1389 	 */
1390 	simple_unlock(&uao_list_lock);
1391 	return FALSE;
1392 }
1393 
1394 
1395 /*
1396  * page in any pages from aobj in the given range.
1397  *
1398  * => aobj must be locked and is returned locked.
1399  * => returns TRUE if pagein was aborted due to lack of memory.
1400  */
1401 static boolean_t
1402 uao_pagein(aobj, startslot, endslot)
1403 	struct uvm_aobj *aobj;
1404 	int startslot, endslot;
1405 {
1406 	boolean_t rv;
1407 
1408 	if (UAO_USES_SWHASH(aobj)) {
1409 		struct uao_swhash_elt *elt;
1410 		int bucket;
1411 
1412 restart:
1413 		for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
1414 			for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
1415 			     elt != NULL;
1416 			     elt = LIST_NEXT(elt, list)) {
1417 				int i;
1418 
1419 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1420 					int slot = elt->slots[i];
1421 
1422 					/*
1423 					 * if the slot isn't in range, skip it.
1424 					 */
1425 					if (slot < startslot ||
1426 					    slot >= endslot) {
1427 						continue;
1428 					}
1429 
1430 					/*
1431 					 * process the page,
1432 					 * the start over on this object
1433 					 * since the swhash elt
1434 					 * may have been freed.
1435 					 */
1436 					rv = uao_pagein_page(aobj,
1437 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1438 					if (rv) {
1439 						return rv;
1440 					}
1441 					goto restart;
1442 				}
1443 			}
1444 		}
1445 	} else {
1446 		int i;
1447 
1448 		for (i = 0; i < aobj->u_pages; i++) {
1449 			int slot = aobj->u_swslots[i];
1450 
1451 			/*
1452 			 * if the slot isn't in range, skip it
1453 			 */
1454 			if (slot < startslot || slot >= endslot) {
1455 				continue;
1456 			}
1457 
1458 			/*
1459 			 * process the page.
1460 			 */
1461 			rv = uao_pagein_page(aobj, i);
1462 			if (rv) {
1463 				return rv;
1464 			}
1465 		}
1466 	}
1467 
1468 	return FALSE;
1469 }
1470 
1471 /*
1472  * page in a page from an aobj.  used for swap_off.
1473  * returns TRUE if pagein was aborted due to lack of memory.
1474  *
1475  * => aobj must be locked and is returned locked.
1476  */
1477 static boolean_t
1478 uao_pagein_page(aobj, pageidx)
1479 	struct uvm_aobj *aobj;
1480 	int pageidx;
1481 {
1482 	struct vm_page *pg;
1483 	int rv, slot, npages;
1484 
1485 	pg = NULL;
1486 	npages = 1;
1487 	/* locked: aobj */
1488 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
1489 		     &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
1490 	/* unlocked: aobj */
1491 
1492 	/*
1493 	 * relock and finish up.
1494 	 */
1495 	simple_lock(&aobj->u_obj.vmobjlock);
1496 
1497 	switch (rv) {
1498 	case VM_PAGER_OK:
1499 		break;
1500 
1501 	case VM_PAGER_ERROR:
1502 	case VM_PAGER_REFAULT:
1503 		/*
1504 		 * nothing more to do on errors.
1505 		 * VM_PAGER_REFAULT can only mean that the anon was freed,
1506 		 * so again there's nothing to do.
1507 		 */
1508 		return FALSE;
1509 
1510 	}
1511 	KASSERT((pg->flags & PG_RELEASED) == 0);
1512 
1513 	/*
1514 	 * ok, we've got the page now.
1515 	 * mark it as dirty, clear its swslot and un-busy it.
1516 	 */
1517 	slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
1518 	uvm_swap_free(slot, 1);
1519 	pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE);
1520 	UVM_PAGE_OWN(pg, NULL);
1521 
1522 	/*
1523 	 * deactivate the page (to put it on a page queue).
1524 	 */
1525 	pmap_clear_reference(pg);
1526 #ifndef UBC
1527 	pmap_page_protect(pg, VM_PROT_NONE);
1528 #endif
1529 	uvm_lock_pageq();
1530 	uvm_pagedeactivate(pg);
1531 	uvm_unlock_pageq();
1532 
1533 	return FALSE;
1534 }
1535