xref: /netbsd-src/sys/uvm/uvm_aobj.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: uvm_aobj.c,v 1.64 2004/04/25 16:42:44 simonb Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5  *                    Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
35  */
36 /*
37  * uvm_aobj.c: anonymous memory uvm_object pager
38  *
39  * author: Chuck Silvers <chuq@chuq.com>
40  * started: Jan-1998
41  *
42  * - design mostly from Chuck Cranor
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.64 2004/04/25 16:42:44 simonb Exp $");
47 
48 #include "opt_uvmhist.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/proc.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/pool.h>
56 #include <sys/kernel.h>
57 
58 #include <uvm/uvm.h>
59 
60 /*
61  * an aobj manages anonymous-memory backed uvm_objects.   in addition
62  * to keeping the list of resident pages, it also keeps a list of
63  * allocated swap blocks.  depending on the size of the aobj this list
64  * of allocated swap blocks is either stored in an array (small objects)
65  * or in a hash table (large objects).
66  */
67 
68 /*
69  * local structures
70  */
71 
72 /*
73  * for hash tables, we break the address space of the aobj into blocks
74  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
75  * be a power of two.
76  */
77 
78 #define UAO_SWHASH_CLUSTER_SHIFT 4
79 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
80 
81 /* get the "tag" for this page index */
82 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
83 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
84 
85 /* given an ELT and a page index, find the swap slot */
86 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
87 	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
88 
89 /* given an ELT, return its pageidx base */
90 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
91 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
92 
93 /*
94  * the swhash hash function
95  */
96 
97 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
98 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
99 			    & (AOBJ)->u_swhashmask)])
100 
101 /*
102  * the swhash threshhold determines if we will use an array or a
103  * hash table to store the list of allocated swap blocks.
104  */
105 
106 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
107 #define UAO_USES_SWHASH(AOBJ) \
108 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
109 
110 /*
111  * the number of buckets in a swhash, with an upper bound
112  */
113 
114 #define UAO_SWHASH_MAXBUCKETS 256
115 #define UAO_SWHASH_BUCKETS(AOBJ) \
116 	(MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
117 	     UAO_SWHASH_MAXBUCKETS))
118 
119 
120 /*
121  * uao_swhash_elt: when a hash table is being used, this structure defines
122  * the format of an entry in the bucket list.
123  */
124 
125 struct uao_swhash_elt {
126 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
127 	voff_t tag;				/* our 'tag' */
128 	int count;				/* our number of active slots */
129 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
130 };
131 
132 /*
133  * uao_swhash: the swap hash table structure
134  */
135 
136 LIST_HEAD(uao_swhash, uao_swhash_elt);
137 
138 /*
139  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
140  * NOTE: Pages for this pool must not come from a pageable kernel map!
141  */
142 POOL_INIT(uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 0, 0, 0,
143     "uaoeltpl", NULL);
144 
145 /*
146  * uvm_aobj: the actual anon-backed uvm_object
147  *
148  * => the uvm_object is at the top of the structure, this allows
149  *   (struct uvm_aobj *) == (struct uvm_object *)
150  * => only one of u_swslots and u_swhash is used in any given aobj
151  */
152 
153 struct uvm_aobj {
154 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
155 	int u_pages;		 /* number of pages in entire object */
156 	int u_flags;		 /* the flags (see uvm_aobj.h) */
157 	int *u_swslots;		 /* array of offset->swapslot mappings */
158 				 /*
159 				  * hashtable of offset->swapslot mappings
160 				  * (u_swhash is an array of bucket heads)
161 				  */
162 	struct uao_swhash *u_swhash;
163 	u_long u_swhashmask;		/* mask for hashtable */
164 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
165 };
166 
167 /*
168  * uvm_aobj_pool: pool of uvm_aobj structures
169  */
170 POOL_INIT(uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0, "aobjpl",
171     &pool_allocator_nointr);
172 
173 MALLOC_DEFINE(M_UVMAOBJ, "UVM aobj", "UVM aobj and related structures");
174 
175 /*
176  * local functions
177  */
178 
179 static struct uao_swhash_elt *uao_find_swhash_elt
180     (struct uvm_aobj *, int, boolean_t);
181 
182 static void	uao_free(struct uvm_aobj *);
183 static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
184 		    int *, int, vm_prot_t, int, int);
185 static boolean_t uao_put(struct uvm_object *, voff_t, voff_t, int);
186 static boolean_t uao_pagein(struct uvm_aobj *, int, int);
187 static boolean_t uao_pagein_page(struct uvm_aobj *, int);
188 
189 /*
190  * aobj_pager
191  *
192  * note that some functions (e.g. put) are handled elsewhere
193  */
194 
195 struct uvm_pagerops aobj_pager = {
196 	NULL,			/* init */
197 	uao_reference,		/* reference */
198 	uao_detach,		/* detach */
199 	NULL,			/* fault */
200 	uao_get,		/* get */
201 	uao_put,		/* flush */
202 };
203 
204 /*
205  * uao_list: global list of active aobjs, locked by uao_list_lock
206  */
207 
208 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
209 static struct simplelock uao_list_lock;
210 
211 /*
212  * functions
213  */
214 
215 /*
216  * hash table/array related functions
217  */
218 
219 /*
220  * uao_find_swhash_elt: find (or create) a hash table entry for a page
221  * offset.
222  *
223  * => the object should be locked by the caller
224  */
225 
226 static struct uao_swhash_elt *
227 uao_find_swhash_elt(aobj, pageidx, create)
228 	struct uvm_aobj *aobj;
229 	int pageidx;
230 	boolean_t create;
231 {
232 	struct uao_swhash *swhash;
233 	struct uao_swhash_elt *elt;
234 	voff_t page_tag;
235 
236 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
237 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
238 
239 	/*
240 	 * now search the bucket for the requested tag
241 	 */
242 
243 	LIST_FOREACH(elt, swhash, list) {
244 		if (elt->tag == page_tag) {
245 			return elt;
246 		}
247 	}
248 	if (!create) {
249 		return NULL;
250 	}
251 
252 	/*
253 	 * allocate a new entry for the bucket and init/insert it in
254 	 */
255 
256 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
257 	if (elt == NULL) {
258 		return NULL;
259 	}
260 	LIST_INSERT_HEAD(swhash, elt, list);
261 	elt->tag = page_tag;
262 	elt->count = 0;
263 	memset(elt->slots, 0, sizeof(elt->slots));
264 	return elt;
265 }
266 
267 /*
268  * uao_find_swslot: find the swap slot number for an aobj/pageidx
269  *
270  * => object must be locked by caller
271  */
272 
273 int
274 uao_find_swslot(uobj, pageidx)
275 	struct uvm_object *uobj;
276 	int pageidx;
277 {
278 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
279 	struct uao_swhash_elt *elt;
280 
281 	/*
282 	 * if noswap flag is set, then we never return a slot
283 	 */
284 
285 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
286 		return(0);
287 
288 	/*
289 	 * if hashing, look in hash table.
290 	 */
291 
292 	if (UAO_USES_SWHASH(aobj)) {
293 		elt = uao_find_swhash_elt(aobj, pageidx, FALSE);
294 		if (elt)
295 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
296 		else
297 			return(0);
298 	}
299 
300 	/*
301 	 * otherwise, look in the array
302 	 */
303 
304 	return(aobj->u_swslots[pageidx]);
305 }
306 
307 /*
308  * uao_set_swslot: set the swap slot for a page in an aobj.
309  *
310  * => setting a slot to zero frees the slot
311  * => object must be locked by caller
312  * => we return the old slot number, or -1 if we failed to allocate
313  *    memory to record the new slot number
314  */
315 
316 int
317 uao_set_swslot(uobj, pageidx, slot)
318 	struct uvm_object *uobj;
319 	int pageidx, slot;
320 {
321 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
322 	struct uao_swhash_elt *elt;
323 	int oldslot;
324 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
325 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
326 	    aobj, pageidx, slot, 0);
327 
328 	/*
329 	 * if noswap flag is set, then we can't set a non-zero slot.
330 	 */
331 
332 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
333 		if (slot == 0)
334 			return(0);
335 
336 		printf("uao_set_swslot: uobj = %p\n", uobj);
337 		panic("uao_set_swslot: NOSWAP object");
338 	}
339 
340 	/*
341 	 * are we using a hash table?  if so, add it in the hash.
342 	 */
343 
344 	if (UAO_USES_SWHASH(aobj)) {
345 
346 		/*
347 		 * Avoid allocating an entry just to free it again if
348 		 * the page had not swap slot in the first place, and
349 		 * we are freeing.
350 		 */
351 
352 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
353 		if (elt == NULL) {
354 			return slot ? -1 : 0;
355 		}
356 
357 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
358 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
359 
360 		/*
361 		 * now adjust the elt's reference counter and free it if we've
362 		 * dropped it to zero.
363 		 */
364 
365 		if (slot) {
366 			if (oldslot == 0)
367 				elt->count++;
368 		} else {
369 			if (oldslot)
370 				elt->count--;
371 
372 			if (elt->count == 0) {
373 				LIST_REMOVE(elt, list);
374 				pool_put(&uao_swhash_elt_pool, elt);
375 			}
376 		}
377 	} else {
378 		/* we are using an array */
379 		oldslot = aobj->u_swslots[pageidx];
380 		aobj->u_swslots[pageidx] = slot;
381 	}
382 	return (oldslot);
383 }
384 
385 /*
386  * end of hash/array functions
387  */
388 
389 /*
390  * uao_free: free all resources held by an aobj, and then free the aobj
391  *
392  * => the aobj should be dead
393  */
394 
395 static void
396 uao_free(aobj)
397 	struct uvm_aobj *aobj;
398 {
399 	int swpgonlydelta = 0;
400 
401 	simple_unlock(&aobj->u_obj.vmobjlock);
402 	if (UAO_USES_SWHASH(aobj)) {
403 		int i, hashbuckets = aobj->u_swhashmask + 1;
404 
405 		/*
406 		 * free the swslots from each hash bucket,
407 		 * then the hash bucket, and finally the hash table itself.
408 		 */
409 
410 		for (i = 0; i < hashbuckets; i++) {
411 			struct uao_swhash_elt *elt, *next;
412 
413 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
414 			     elt != NULL;
415 			     elt = next) {
416 				int j;
417 
418 				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
419 					int slot = elt->slots[j];
420 
421 					if (slot > 0) {
422 						uvm_swap_free(slot, 1);
423 						swpgonlydelta++;
424 					}
425 				}
426 
427 				next = LIST_NEXT(elt, list);
428 				pool_put(&uao_swhash_elt_pool, elt);
429 			}
430 		}
431 		free(aobj->u_swhash, M_UVMAOBJ);
432 	} else {
433 		int i;
434 
435 		/*
436 		 * free the array
437 		 */
438 
439 		for (i = 0; i < aobj->u_pages; i++) {
440 			int slot = aobj->u_swslots[i];
441 
442 			if (slot > 0) {
443 				uvm_swap_free(slot, 1);
444 				swpgonlydelta++;
445 			}
446 		}
447 		free(aobj->u_swslots, M_UVMAOBJ);
448 	}
449 
450 	/*
451 	 * finally free the aobj itself
452 	 */
453 
454 	pool_put(&uvm_aobj_pool, aobj);
455 
456 	/*
457 	 * adjust the counter of pages only in swap for all
458 	 * the swap slots we've freed.
459 	 */
460 
461 	if (swpgonlydelta > 0) {
462 		simple_lock(&uvm.swap_data_lock);
463 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
464 		uvmexp.swpgonly -= swpgonlydelta;
465 		simple_unlock(&uvm.swap_data_lock);
466 	}
467 }
468 
469 /*
470  * pager functions
471  */
472 
473 /*
474  * uao_create: create an aobj of the given size and return its uvm_object.
475  *
476  * => for normal use, flags are always zero
477  * => for the kernel object, the flags are:
478  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
479  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
480  */
481 
482 struct uvm_object *
483 uao_create(size, flags)
484 	vsize_t size;
485 	int flags;
486 {
487 	static struct uvm_aobj kernel_object_store;
488 	static int kobj_alloced = 0;
489 	int pages = round_page(size) >> PAGE_SHIFT;
490 	struct uvm_aobj *aobj;
491 
492 	/*
493 	 * malloc a new aobj unless we are asked for the kernel object
494 	 */
495 
496 	if (flags & UAO_FLAG_KERNOBJ) {
497 		KASSERT(!kobj_alloced);
498 		aobj = &kernel_object_store;
499 		aobj->u_pages = pages;
500 		aobj->u_flags = UAO_FLAG_NOSWAP;
501 		aobj->u_obj.uo_refs = UVM_OBJ_KERN;
502 		kobj_alloced = UAO_FLAG_KERNOBJ;
503 	} else if (flags & UAO_FLAG_KERNSWAP) {
504 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
505 		aobj = &kernel_object_store;
506 		kobj_alloced = UAO_FLAG_KERNSWAP;
507 	} else {
508 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
509 		aobj->u_pages = pages;
510 		aobj->u_flags = 0;
511 		aobj->u_obj.uo_refs = 1;
512 	}
513 
514 	/*
515  	 * allocate hash/array if necessary
516  	 *
517  	 * note: in the KERNSWAP case no need to worry about locking since
518  	 * we are still booting we should be the only thread around.
519  	 */
520 
521 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
522 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
523 		    M_NOWAIT : M_WAITOK;
524 
525 		/* allocate hash table or array depending on object size */
526 		if (UAO_USES_SWHASH(aobj)) {
527 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
528 			    HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
529 			if (aobj->u_swhash == NULL)
530 				panic("uao_create: hashinit swhash failed");
531 		} else {
532 			aobj->u_swslots = malloc(pages * sizeof(int),
533 			    M_UVMAOBJ, mflags);
534 			if (aobj->u_swslots == NULL)
535 				panic("uao_create: malloc swslots failed");
536 			memset(aobj->u_swslots, 0, pages * sizeof(int));
537 		}
538 
539 		if (flags) {
540 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
541 			return(&aobj->u_obj);
542 		}
543 	}
544 
545 	/*
546  	 * init aobj fields
547  	 */
548 
549 	simple_lock_init(&aobj->u_obj.vmobjlock);
550 	aobj->u_obj.pgops = &aobj_pager;
551 	TAILQ_INIT(&aobj->u_obj.memq);
552 	aobj->u_obj.uo_npages = 0;
553 
554 	/*
555  	 * now that aobj is ready, add it to the global list
556  	 */
557 
558 	simple_lock(&uao_list_lock);
559 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
560 	simple_unlock(&uao_list_lock);
561 	return(&aobj->u_obj);
562 }
563 
564 
565 
566 /*
567  * uao_init: set up aobj pager subsystem
568  *
569  * => called at boot time from uvm_pager_init()
570  */
571 
572 void
573 uao_init(void)
574 {
575 	static int uao_initialized;
576 
577 	if (uao_initialized)
578 		return;
579 	uao_initialized = TRUE;
580 	LIST_INIT(&uao_list);
581 	simple_lock_init(&uao_list_lock);
582 }
583 
584 /*
585  * uao_reference: add a ref to an aobj
586  *
587  * => aobj must be unlocked
588  * => just lock it and call the locked version
589  */
590 
591 void
592 uao_reference(uobj)
593 	struct uvm_object *uobj;
594 {
595 	simple_lock(&uobj->vmobjlock);
596 	uao_reference_locked(uobj);
597 	simple_unlock(&uobj->vmobjlock);
598 }
599 
600 /*
601  * uao_reference_locked: add a ref to an aobj that is already locked
602  *
603  * => aobj must be locked
604  * this needs to be separate from the normal routine
605  * since sometimes we need to add a reference to an aobj when
606  * it's already locked.
607  */
608 
609 void
610 uao_reference_locked(uobj)
611 	struct uvm_object *uobj;
612 {
613 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
614 
615 	/*
616  	 * kernel_object already has plenty of references, leave it alone.
617  	 */
618 
619 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
620 		return;
621 
622 	uobj->uo_refs++;
623 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
624 		    uobj, uobj->uo_refs,0,0);
625 }
626 
627 /*
628  * uao_detach: drop a reference to an aobj
629  *
630  * => aobj must be unlocked
631  * => just lock it and call the locked version
632  */
633 
634 void
635 uao_detach(uobj)
636 	struct uvm_object *uobj;
637 {
638 	simple_lock(&uobj->vmobjlock);
639 	uao_detach_locked(uobj);
640 }
641 
642 /*
643  * uao_detach_locked: drop a reference to an aobj
644  *
645  * => aobj must be locked, and is unlocked (or freed) upon return.
646  * this needs to be separate from the normal routine
647  * since sometimes we need to detach from an aobj when
648  * it's already locked.
649  */
650 
651 void
652 uao_detach_locked(uobj)
653 	struct uvm_object *uobj;
654 {
655 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
656 	struct vm_page *pg;
657 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
658 
659 	/*
660  	 * detaching from kernel_object is a noop.
661  	 */
662 
663 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
664 		simple_unlock(&uobj->vmobjlock);
665 		return;
666 	}
667 
668 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
669 	uobj->uo_refs--;
670 	if (uobj->uo_refs) {
671 		simple_unlock(&uobj->vmobjlock);
672 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
673 		return;
674 	}
675 
676 	/*
677  	 * remove the aobj from the global list.
678  	 */
679 
680 	simple_lock(&uao_list_lock);
681 	LIST_REMOVE(aobj, u_list);
682 	simple_unlock(&uao_list_lock);
683 
684 	/*
685  	 * free all the pages left in the aobj.  for each page,
686 	 * when the page is no longer busy (and thus after any disk i/o that
687 	 * it's involved in is complete), release any swap resources and
688 	 * free the page itself.
689  	 */
690 
691 	uvm_lock_pageq();
692 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
693 		pmap_page_protect(pg, VM_PROT_NONE);
694 		if (pg->flags & PG_BUSY) {
695 			pg->flags |= PG_WANTED;
696 			uvm_unlock_pageq();
697 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, FALSE,
698 			    "uao_det", 0);
699 			simple_lock(&uobj->vmobjlock);
700 			uvm_lock_pageq();
701 			continue;
702 		}
703 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
704 		uvm_pagefree(pg);
705 	}
706 	uvm_unlock_pageq();
707 
708 	/*
709  	 * finally, free the aobj itself.
710  	 */
711 
712 	uao_free(aobj);
713 }
714 
715 /*
716  * uao_put: flush pages out of a uvm object
717  *
718  * => object should be locked by caller.  we may _unlock_ the object
719  *	if (and only if) we need to clean a page (PGO_CLEANIT).
720  *	XXXJRT Currently, however, we don't.  In the case of cleaning
721  *	XXXJRT a page, we simply just deactivate it.  Should probably
722  *	XXXJRT handle this better, in the future (although "flushing"
723  *	XXXJRT anonymous memory isn't terribly important).
724  * => if PGO_CLEANIT is not set, then we will neither unlock the object
725  *	or block.
726  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
727  *	for flushing.
728  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
729  *	that new pages are inserted on the tail end of the list.  thus,
730  *	we can make a complete pass through the object in one go by starting
731  *	at the head and working towards the tail (new pages are put in
732  *	front of us).
733  * => NOTE: we are allowed to lock the page queues, so the caller
734  *	must not be holding the lock on them [e.g. pagedaemon had
735  *	better not call us with the queues locked]
736  * => we return TRUE unless we encountered some sort of I/O error
737  *	XXXJRT currently never happens, as we never directly initiate
738  *	XXXJRT I/O
739  *
740  * note on page traversal:
741  *	we can traverse the pages in an object either by going down the
742  *	linked list in "uobj->memq", or we can go over the address range
743  *	by page doing hash table lookups for each address.  depending
744  *	on how many pages are in the object it may be cheaper to do one
745  *	or the other.  we set "by_list" to true if we are using memq.
746  *	if the cost of a hash lookup was equal to the cost of the list
747  *	traversal we could compare the number of pages in the start->stop
748  *	range to the total number of pages in the object.  however, it
749  *	seems that a hash table lookup is more expensive than the linked
750  *	list traversal, so we multiply the number of pages in the
751  *	start->stop range by a penalty which we define below.
752  */
753 
754 int
755 uao_put(uobj, start, stop, flags)
756 	struct uvm_object *uobj;
757 	voff_t start, stop;
758 	int flags;
759 {
760 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
761 	struct vm_page *pg, *nextpg, curmp, endmp;
762 	boolean_t by_list;
763 	voff_t curoff;
764 	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
765 
766 	curoff = 0;
767 	if (flags & PGO_ALLPAGES) {
768 		start = 0;
769 		stop = aobj->u_pages << PAGE_SHIFT;
770 		by_list = TRUE;		/* always go by the list */
771 	} else {
772 		start = trunc_page(start);
773 		stop = round_page(stop);
774 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
775 			printf("uao_flush: strange, got an out of range "
776 			    "flush (fixed)\n");
777 			stop = aobj->u_pages << PAGE_SHIFT;
778 		}
779 		by_list = (uobj->uo_npages <=
780 		    ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
781 	}
782 	UVMHIST_LOG(maphist,
783 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
784 	    start, stop, by_list, flags);
785 
786 	/*
787 	 * Don't need to do any work here if we're not freeing
788 	 * or deactivating pages.
789 	 */
790 
791 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
792 		simple_unlock(&uobj->vmobjlock);
793 		return 0;
794 	}
795 
796 	/*
797 	 * Initialize the marker pages.  See the comment in
798 	 * genfs_putpages() also.
799 	 */
800 
801 	curmp.uobject = uobj;
802 	curmp.offset = (voff_t)-1;
803 	curmp.flags = PG_BUSY;
804 	endmp.uobject = uobj;
805 	endmp.offset = (voff_t)-1;
806 	endmp.flags = PG_BUSY;
807 
808 	/*
809 	 * now do it.  note: we must update nextpg in the body of loop or we
810 	 * will get stuck.  we need to use nextpg if we'll traverse the list
811 	 * because we may free "pg" before doing the next loop.
812 	 */
813 
814 	if (by_list) {
815 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
816 		nextpg = TAILQ_FIRST(&uobj->memq);
817 		PHOLD(curlwp);
818 	} else {
819 		curoff = start;
820 		nextpg = NULL;	/* Quell compiler warning */
821 	}
822 
823 	uvm_lock_pageq();
824 
825 	/* locked: both page queues and uobj */
826 	for (;;) {
827 		if (by_list) {
828 			pg = nextpg;
829 			if (pg == &endmp)
830 				break;
831 			nextpg = TAILQ_NEXT(pg, listq);
832 			if (pg->offset < start || pg->offset >= stop)
833 				continue;
834 		} else {
835 			if (curoff < stop) {
836 				pg = uvm_pagelookup(uobj, curoff);
837 				curoff += PAGE_SIZE;
838 			} else
839 				break;
840 			if (pg == NULL)
841 				continue;
842 		}
843 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
844 
845 		/*
846 		 * XXX In these first 3 cases, we always just
847 		 * XXX deactivate the page.  We may want to
848 		 * XXX handle the different cases more specifically
849 		 * XXX in the future.
850 		 */
851 
852 		case PGO_CLEANIT|PGO_FREE:
853 		case PGO_CLEANIT|PGO_DEACTIVATE:
854 		case PGO_DEACTIVATE:
855  deactivate_it:
856 			/* skip the page if it's loaned or wired */
857 			if (pg->loan_count != 0 || pg->wire_count != 0)
858 				continue;
859 
860 			/* ...and deactivate the page. */
861 			pmap_clear_reference(pg);
862 			uvm_pagedeactivate(pg);
863 			continue;
864 
865 		case PGO_FREE:
866 
867 			/*
868 			 * If there are multiple references to
869 			 * the object, just deactivate the page.
870 			 */
871 
872 			if (uobj->uo_refs > 1)
873 				goto deactivate_it;
874 
875 			/* XXX skip the page if it's loaned or wired */
876 			if (pg->loan_count != 0 || pg->wire_count != 0)
877 				continue;
878 
879 			/*
880 			 * wait and try again if the page is busy.
881 			 * otherwise free the swap slot and the page.
882 			 */
883 
884 			pmap_page_protect(pg, VM_PROT_NONE);
885 			if (pg->flags & PG_BUSY) {
886 				if (by_list) {
887 					TAILQ_INSERT_BEFORE(pg, &curmp, listq);
888 				}
889 				pg->flags |= PG_WANTED;
890 				uvm_unlock_pageq();
891 				UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
892 				    "uao_put", 0);
893 				simple_lock(&uobj->vmobjlock);
894 				uvm_lock_pageq();
895 				if (by_list) {
896 					nextpg = TAILQ_NEXT(&curmp, listq);
897 					TAILQ_REMOVE(&uobj->memq, &curmp,
898 					    listq);
899 				} else
900 					curoff -= PAGE_SIZE;
901 				continue;
902 			}
903 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
904 			uvm_pagefree(pg);
905 			continue;
906 		}
907 	}
908 	uvm_unlock_pageq();
909 	if (by_list) {
910 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
911 		PRELE(curlwp);
912 	}
913 	simple_unlock(&uobj->vmobjlock);
914 	return 0;
915 }
916 
917 /*
918  * uao_get: fetch me a page
919  *
920  * we have three cases:
921  * 1: page is resident     -> just return the page.
922  * 2: page is zero-fill    -> allocate a new page and zero it.
923  * 3: page is swapped out  -> fetch the page from swap.
924  *
925  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
926  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
927  * then we will need to return EBUSY.
928  *
929  * => prefer map unlocked (not required)
930  * => object must be locked!  we will _unlock_ it before starting any I/O.
931  * => flags: PGO_ALLPAGES: get all of the pages
932  *           PGO_LOCKED: fault data structures are locked
933  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
934  * => NOTE: caller must check for released pages!!
935  */
936 
937 static int
938 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
939 	struct uvm_object *uobj;
940 	voff_t offset;
941 	struct vm_page **pps;
942 	int *npagesp;
943 	int centeridx, advice, flags;
944 	vm_prot_t access_type;
945 {
946 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
947 	voff_t current_offset;
948 	struct vm_page *ptmp = NULL;	/* Quell compiler warning */
949 	int lcv, gotpages, maxpages, swslot, error, pageidx;
950 	boolean_t done;
951 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
952 
953 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
954 		    aobj, offset, flags,0);
955 
956 	/*
957  	 * get number of pages
958  	 */
959 
960 	maxpages = *npagesp;
961 
962 	/*
963  	 * step 1: handled the case where fault data structures are locked.
964  	 */
965 
966 	if (flags & PGO_LOCKED) {
967 
968 		/*
969  		 * step 1a: get pages that are already resident.   only do
970 		 * this if the data structures are locked (i.e. the first
971 		 * time through).
972  		 */
973 
974 		done = TRUE;	/* be optimistic */
975 		gotpages = 0;	/* # of pages we got so far */
976 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
977 		    lcv++, current_offset += PAGE_SIZE) {
978 			/* do we care about this page?  if not, skip it */
979 			if (pps[lcv] == PGO_DONTCARE)
980 				continue;
981 			ptmp = uvm_pagelookup(uobj, current_offset);
982 
983 			/*
984  			 * if page is new, attempt to allocate the page,
985 			 * zero-fill'd.
986  			 */
987 
988 			if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
989 			    current_offset >> PAGE_SHIFT) == 0) {
990 				ptmp = uvm_pagealloc(uobj, current_offset,
991 				    NULL, UVM_PGA_ZERO);
992 				if (ptmp) {
993 					/* new page */
994 					ptmp->flags &= ~(PG_FAKE);
995 					ptmp->pqflags |= PQ_AOBJ;
996 					goto gotpage;
997 				}
998 			}
999 
1000 			/*
1001 			 * to be useful must get a non-busy page
1002 			 */
1003 
1004 			if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
1005 				if (lcv == centeridx ||
1006 				    (flags & PGO_ALLPAGES) != 0)
1007 					/* need to do a wait or I/O! */
1008 					done = FALSE;
1009 					continue;
1010 			}
1011 
1012 			/*
1013 			 * useful page: busy/lock it and plug it in our
1014 			 * result array
1015 			 */
1016 
1017 			/* caller must un-busy this page */
1018 			ptmp->flags |= PG_BUSY;
1019 			UVM_PAGE_OWN(ptmp, "uao_get1");
1020 gotpage:
1021 			pps[lcv] = ptmp;
1022 			gotpages++;
1023 		}
1024 
1025 		/*
1026  		 * step 1b: now we've either done everything needed or we
1027 		 * to unlock and do some waiting or I/O.
1028  		 */
1029 
1030 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
1031 		*npagesp = gotpages;
1032 		if (done)
1033 			return 0;
1034 		else
1035 			return EBUSY;
1036 	}
1037 
1038 	/*
1039  	 * step 2: get non-resident or busy pages.
1040  	 * object is locked.   data structures are unlocked.
1041  	 */
1042 
1043 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
1044 	    lcv++, current_offset += PAGE_SIZE) {
1045 
1046 		/*
1047 		 * - skip over pages we've already gotten or don't want
1048 		 * - skip over pages we don't _have_ to get
1049 		 */
1050 
1051 		if (pps[lcv] != NULL ||
1052 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
1053 			continue;
1054 
1055 		pageidx = current_offset >> PAGE_SHIFT;
1056 
1057 		/*
1058  		 * we have yet to locate the current page (pps[lcv]).   we
1059 		 * first look for a page that is already at the current offset.
1060 		 * if we find a page, we check to see if it is busy or
1061 		 * released.  if that is the case, then we sleep on the page
1062 		 * until it is no longer busy or released and repeat the lookup.
1063 		 * if the page we found is neither busy nor released, then we
1064 		 * busy it (so we own it) and plug it into pps[lcv].   this
1065 		 * 'break's the following while loop and indicates we are
1066 		 * ready to move on to the next page in the "lcv" loop above.
1067  		 *
1068  		 * if we exit the while loop with pps[lcv] still set to NULL,
1069 		 * then it means that we allocated a new busy/fake/clean page
1070 		 * ptmp in the object and we need to do I/O to fill in the data.
1071  		 */
1072 
1073 		/* top of "pps" while loop */
1074 		while (pps[lcv] == NULL) {
1075 			/* look for a resident page */
1076 			ptmp = uvm_pagelookup(uobj, current_offset);
1077 
1078 			/* not resident?   allocate one now (if we can) */
1079 			if (ptmp == NULL) {
1080 
1081 				ptmp = uvm_pagealloc(uobj, current_offset,
1082 				    NULL, 0);
1083 
1084 				/* out of RAM? */
1085 				if (ptmp == NULL) {
1086 					simple_unlock(&uobj->vmobjlock);
1087 					UVMHIST_LOG(pdhist,
1088 					    "sleeping, ptmp == NULL\n",0,0,0,0);
1089 					uvm_wait("uao_getpage");
1090 					simple_lock(&uobj->vmobjlock);
1091 					continue;
1092 				}
1093 
1094 				/*
1095 				 * safe with PQ's unlocked: because we just
1096 				 * alloc'd the page
1097 				 */
1098 
1099 				ptmp->pqflags |= PQ_AOBJ;
1100 
1101 				/*
1102 				 * got new page ready for I/O.  break pps while
1103 				 * loop.  pps[lcv] is still NULL.
1104 				 */
1105 
1106 				break;
1107 			}
1108 
1109 			/* page is there, see if we need to wait on it */
1110 			if ((ptmp->flags & PG_BUSY) != 0) {
1111 				ptmp->flags |= PG_WANTED;
1112 				UVMHIST_LOG(pdhist,
1113 				    "sleeping, ptmp->flags 0x%x\n",
1114 				    ptmp->flags,0,0,0);
1115 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
1116 				    FALSE, "uao_get", 0);
1117 				simple_lock(&uobj->vmobjlock);
1118 				continue;
1119 			}
1120 
1121 			/*
1122  			 * if we get here then the page has become resident and
1123 			 * unbusy between steps 1 and 2.  we busy it now (so we
1124 			 * own it) and set pps[lcv] (so that we exit the while
1125 			 * loop).
1126  			 */
1127 
1128 			/* we own it, caller must un-busy */
1129 			ptmp->flags |= PG_BUSY;
1130 			UVM_PAGE_OWN(ptmp, "uao_get2");
1131 			pps[lcv] = ptmp;
1132 		}
1133 
1134 		/*
1135  		 * if we own the valid page at the correct offset, pps[lcv] will
1136  		 * point to it.   nothing more to do except go to the next page.
1137  		 */
1138 
1139 		if (pps[lcv])
1140 			continue;			/* next lcv */
1141 
1142 		/*
1143  		 * we have a "fake/busy/clean" page that we just allocated.
1144  		 * do the needed "i/o", either reading from swap or zeroing.
1145  		 */
1146 
1147 		swslot = uao_find_swslot(&aobj->u_obj, pageidx);
1148 
1149 		/*
1150  		 * just zero the page if there's nothing in swap.
1151  		 */
1152 
1153 		if (swslot == 0) {
1154 
1155 			/*
1156 			 * page hasn't existed before, just zero it.
1157 			 */
1158 
1159 			uvm_pagezero(ptmp);
1160 		} else {
1161 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
1162 			     swslot, 0,0,0);
1163 
1164 			/*
1165 			 * page in the swapped-out page.
1166 			 * unlock object for i/o, relock when done.
1167 			 */
1168 
1169 			simple_unlock(&uobj->vmobjlock);
1170 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1171 			simple_lock(&uobj->vmobjlock);
1172 
1173 			/*
1174 			 * I/O done.  check for errors.
1175 			 */
1176 
1177 			if (error != 0) {
1178 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
1179 				    error,0,0,0);
1180 				if (ptmp->flags & PG_WANTED)
1181 					wakeup(ptmp);
1182 
1183 				/*
1184 				 * remove the swap slot from the aobj
1185 				 * and mark the aobj as having no real slot.
1186 				 * don't free the swap slot, thus preventing
1187 				 * it from being used again.
1188 				 */
1189 
1190 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
1191 							SWSLOT_BAD);
1192 				if (swslot > 0) {
1193 					uvm_swap_markbad(swslot, 1);
1194 				}
1195 
1196 				uvm_lock_pageq();
1197 				uvm_pagefree(ptmp);
1198 				uvm_unlock_pageq();
1199 				simple_unlock(&uobj->vmobjlock);
1200 				return error;
1201 			}
1202 		}
1203 
1204 		/*
1205  		 * we got the page!   clear the fake flag (indicates valid
1206 		 * data now in page) and plug into our result array.   note
1207 		 * that page is still busy.
1208  		 *
1209  		 * it is the callers job to:
1210  		 * => check if the page is released
1211  		 * => unbusy the page
1212  		 * => activate the page
1213  		 */
1214 
1215 		ptmp->flags &= ~PG_FAKE;
1216 		pps[lcv] = ptmp;
1217 	}
1218 
1219 	/*
1220  	 * finally, unlock object and return.
1221  	 */
1222 
1223 	simple_unlock(&uobj->vmobjlock);
1224 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1225 	return 0;
1226 }
1227 
1228 /*
1229  * uao_dropswap:  release any swap resources from this aobj page.
1230  *
1231  * => aobj must be locked or have a reference count of 0.
1232  */
1233 
1234 void
1235 uao_dropswap(uobj, pageidx)
1236 	struct uvm_object *uobj;
1237 	int pageidx;
1238 {
1239 	int slot;
1240 
1241 	slot = uao_set_swslot(uobj, pageidx, 0);
1242 	if (slot) {
1243 		uvm_swap_free(slot, 1);
1244 	}
1245 }
1246 
1247 /*
1248  * page in every page in every aobj that is paged-out to a range of swslots.
1249  *
1250  * => nothing should be locked.
1251  * => returns TRUE if pagein was aborted due to lack of memory.
1252  */
1253 
1254 boolean_t
1255 uao_swap_off(startslot, endslot)
1256 	int startslot, endslot;
1257 {
1258 	struct uvm_aobj *aobj, *nextaobj;
1259 	boolean_t rv;
1260 
1261 	/*
1262 	 * walk the list of all aobjs.
1263 	 */
1264 
1265 restart:
1266 	simple_lock(&uao_list_lock);
1267 	for (aobj = LIST_FIRST(&uao_list);
1268 	     aobj != NULL;
1269 	     aobj = nextaobj) {
1270 
1271 		/*
1272 		 * try to get the object lock, start all over if we fail.
1273 		 * most of the time we'll get the aobj lock,
1274 		 * so this should be a rare case.
1275 		 */
1276 
1277 		if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
1278 			simple_unlock(&uao_list_lock);
1279 			goto restart;
1280 		}
1281 
1282 		/*
1283 		 * add a ref to the aobj so it doesn't disappear
1284 		 * while we're working.
1285 		 */
1286 
1287 		uao_reference_locked(&aobj->u_obj);
1288 
1289 		/*
1290 		 * now it's safe to unlock the uao list.
1291 		 */
1292 
1293 		simple_unlock(&uao_list_lock);
1294 
1295 		/*
1296 		 * page in any pages in the swslot range.
1297 		 * if there's an error, abort and return the error.
1298 		 */
1299 
1300 		rv = uao_pagein(aobj, startslot, endslot);
1301 		if (rv) {
1302 			uao_detach_locked(&aobj->u_obj);
1303 			return rv;
1304 		}
1305 
1306 		/*
1307 		 * we're done with this aobj.
1308 		 * relock the list and drop our ref on the aobj.
1309 		 */
1310 
1311 		simple_lock(&uao_list_lock);
1312 		nextaobj = LIST_NEXT(aobj, u_list);
1313 		uao_detach_locked(&aobj->u_obj);
1314 	}
1315 
1316 	/*
1317 	 * done with traversal, unlock the list
1318 	 */
1319 	simple_unlock(&uao_list_lock);
1320 	return FALSE;
1321 }
1322 
1323 
1324 /*
1325  * page in any pages from aobj in the given range.
1326  *
1327  * => aobj must be locked and is returned locked.
1328  * => returns TRUE if pagein was aborted due to lack of memory.
1329  */
1330 static boolean_t
1331 uao_pagein(aobj, startslot, endslot)
1332 	struct uvm_aobj *aobj;
1333 	int startslot, endslot;
1334 {
1335 	boolean_t rv;
1336 
1337 	if (UAO_USES_SWHASH(aobj)) {
1338 		struct uao_swhash_elt *elt;
1339 		int bucket;
1340 
1341 restart:
1342 		for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
1343 			for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
1344 			     elt != NULL;
1345 			     elt = LIST_NEXT(elt, list)) {
1346 				int i;
1347 
1348 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1349 					int slot = elt->slots[i];
1350 
1351 					/*
1352 					 * if the slot isn't in range, skip it.
1353 					 */
1354 
1355 					if (slot < startslot ||
1356 					    slot >= endslot) {
1357 						continue;
1358 					}
1359 
1360 					/*
1361 					 * process the page,
1362 					 * the start over on this object
1363 					 * since the swhash elt
1364 					 * may have been freed.
1365 					 */
1366 
1367 					rv = uao_pagein_page(aobj,
1368 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1369 					if (rv) {
1370 						return rv;
1371 					}
1372 					goto restart;
1373 				}
1374 			}
1375 		}
1376 	} else {
1377 		int i;
1378 
1379 		for (i = 0; i < aobj->u_pages; i++) {
1380 			int slot = aobj->u_swslots[i];
1381 
1382 			/*
1383 			 * if the slot isn't in range, skip it
1384 			 */
1385 
1386 			if (slot < startslot || slot >= endslot) {
1387 				continue;
1388 			}
1389 
1390 			/*
1391 			 * process the page.
1392 			 */
1393 
1394 			rv = uao_pagein_page(aobj, i);
1395 			if (rv) {
1396 				return rv;
1397 			}
1398 		}
1399 	}
1400 
1401 	return FALSE;
1402 }
1403 
1404 /*
1405  * page in a page from an aobj.  used for swap_off.
1406  * returns TRUE if pagein was aborted due to lack of memory.
1407  *
1408  * => aobj must be locked and is returned locked.
1409  */
1410 
1411 static boolean_t
1412 uao_pagein_page(aobj, pageidx)
1413 	struct uvm_aobj *aobj;
1414 	int pageidx;
1415 {
1416 	struct vm_page *pg;
1417 	int rv, npages;
1418 
1419 	pg = NULL;
1420 	npages = 1;
1421 	/* locked: aobj */
1422 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
1423 		     &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
1424 	/* unlocked: aobj */
1425 
1426 	/*
1427 	 * relock and finish up.
1428 	 */
1429 
1430 	simple_lock(&aobj->u_obj.vmobjlock);
1431 	switch (rv) {
1432 	case 0:
1433 		break;
1434 
1435 	case EIO:
1436 	case ERESTART:
1437 
1438 		/*
1439 		 * nothing more to do on errors.
1440 		 * ERESTART can only mean that the anon was freed,
1441 		 * so again there's nothing to do.
1442 		 */
1443 
1444 		return FALSE;
1445 
1446 	default:
1447 		return TRUE;
1448 	}
1449 
1450 	/*
1451 	 * ok, we've got the page now.
1452 	 * mark it as dirty, clear its swslot and un-busy it.
1453 	 */
1454 	uao_dropswap(&aobj->u_obj, pageidx);
1455 
1456 	/*
1457 	 * deactivate the page (to make sure it's on a page queue).
1458 	 */
1459 	uvm_lock_pageq();
1460 	if (pg->wire_count == 0)
1461 		uvm_pagedeactivate(pg);
1462 	uvm_unlock_pageq();
1463 
1464 	if (pg->flags & PG_WANTED) {
1465 		wakeup(pg);
1466 	}
1467 	pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
1468 	UVM_PAGE_OWN(pg, NULL);
1469 
1470 	return FALSE;
1471 }
1472